summaryrefslogtreecommitdiff
path: root/chromium/v8/src
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src')
-rw-r--r--chromium/v8/src/api/api-natives.cc4
-rw-r--r--chromium/v8/src/api/api.cc104
-rw-r--r--chromium/v8/src/asmjs/asm-scanner.cc8
-rw-r--r--chromium/v8/src/asmjs/asm-scanner.h2
-rw-r--r--chromium/v8/src/ast/ast.cc1
-rw-r--r--chromium/v8/src/ast/ast.h1
-rw-r--r--chromium/v8/src/ast/scopes.h8
-rw-r--r--chromium/v8/src/base/bit-field.h4
-rw-r--r--chromium/v8/src/base/bits.h29
-rw-r--r--chromium/v8/src/base/bounds.h8
-rw-r--r--chromium/v8/src/base/cpu.cc67
-rw-r--r--chromium/v8/src/base/cpu.h2
-rw-r--r--chromium/v8/src/base/enum-set.h4
-rw-r--r--chromium/v8/src/base/iterator.h3
-rw-r--r--chromium/v8/src/base/logging.h6
-rw-r--r--chromium/v8/src/base/macros.h14
-rw-r--r--chromium/v8/src/base/optional.h24
-rw-r--r--chromium/v8/src/base/platform/platform-posix.cc8
-rw-r--r--chromium/v8/src/base/platform/platform-solaris.cc18
-rw-r--r--chromium/v8/src/base/platform/time.h14
-rw-r--r--chromium/v8/src/base/template-utils.h14
-rw-r--r--chromium/v8/src/builtins/aggregate-error.tq49
-rw-r--r--chromium/v8/src/builtins/array-join.tq4
-rw-r--r--chromium/v8/src/builtins/array-slice.tq12
-rw-r--r--chromium/v8/src/builtins/base.tq132
-rw-r--r--chromium/v8/src/builtins/bigint.tq1
-rw-r--r--chromium/v8/src/builtins/builtins-array-gen.cc44
-rw-r--r--chromium/v8/src/builtins/builtins-array-gen.h12
-rw-r--r--chromium/v8/src/builtins/builtins-async-iterator-gen.cc94
-rw-r--r--chromium/v8/src/builtins/builtins-call-gen.cc73
-rw-r--r--chromium/v8/src/builtins/builtins-collections-gen.cc2
-rw-r--r--chromium/v8/src/builtins/builtins-constructor-gen.cc79
-rw-r--r--chromium/v8/src/builtins/builtins-conversion-gen.cc386
-rw-r--r--chromium/v8/src/builtins/builtins-date-gen.cc2
-rw-r--r--chromium/v8/src/builtins/builtins-definitions.h115
-rw-r--r--chromium/v8/src/builtins/builtins-descriptors.h28
-rw-r--r--chromium/v8/src/builtins/builtins-function-gen.cc202
-rw-r--r--chromium/v8/src/builtins/builtins-handler-gen.cc39
-rw-r--r--chromium/v8/src/builtins/builtins-internal-gen.cc20
-rw-r--r--chromium/v8/src/builtins/builtins-microtask-queue-gen.cc7
-rw-r--r--chromium/v8/src/builtins/builtins-number-gen.cc1060
-rw-r--r--chromium/v8/src/builtins/builtins-object-gen.cc7
-rw-r--r--chromium/v8/src/builtins/builtins-promise.h4
-rw-r--r--chromium/v8/src/builtins/builtins-regexp-gen.cc10
-rw-r--r--chromium/v8/src/builtins/builtins-string-gen.cc16
-rw-r--r--chromium/v8/src/builtins/builtins-string.cc12
-rw-r--r--chromium/v8/src/builtins/builtins-string.tq37
-rw-r--r--chromium/v8/src/builtins/builtins-typed-array-gen.cc44
-rw-r--r--chromium/v8/src/builtins/builtins-typed-array-gen.h5
-rw-r--r--chromium/v8/src/builtins/builtins-wasm-gen.cc153
-rw-r--r--chromium/v8/src/builtins/builtins-wasm-gen.h4
-rw-r--r--chromium/v8/src/builtins/cast.tq313
-rw-r--r--chromium/v8/src/builtins/constants-table-builder.cc37
-rw-r--r--chromium/v8/src/builtins/constants-table-builder.h5
-rw-r--r--chromium/v8/src/builtins/conversion.tq232
-rw-r--r--chromium/v8/src/builtins/convert.tq15
-rw-r--r--chromium/v8/src/builtins/function.tq109
-rw-r--r--chromium/v8/src/builtins/growable-fixed-array.tq3
-rw-r--r--chromium/v8/src/builtins/internal.tq35
-rw-r--r--chromium/v8/src/builtins/math.tq6
-rw-r--r--chromium/v8/src/builtins/number.tq678
-rw-r--r--chromium/v8/src/builtins/promise-abstract-operations.tq45
-rw-r--r--chromium/v8/src/builtins/promise-all-element-closure.tq99
-rw-r--r--chromium/v8/src/builtins/promise-all.tq301
-rw-r--r--chromium/v8/src/builtins/promise-any.tq130
-rw-r--r--chromium/v8/src/builtins/promise-race.tq56
-rw-r--r--chromium/v8/src/builtins/regexp.tq3
-rw-r--r--chromium/v8/src/builtins/setup-builtins-internal.cc5
-rw-r--r--chromium/v8/src/builtins/torque-internal.tq28
-rw-r--r--chromium/v8/src/builtins/typed-array-entries.tq27
-rw-r--r--chromium/v8/src/builtins/typed-array-keys.tq27
-rw-r--r--chromium/v8/src/builtins/typed-array-values.tq27
-rw-r--r--chromium/v8/src/builtins/wasm.tq129
-rw-r--r--chromium/v8/src/codegen/arm/assembler-arm.cc35
-rw-r--r--chromium/v8/src/codegen/arm/assembler-arm.h11
-rw-r--r--chromium/v8/src/codegen/arm/interface-descriptors-arm.cc29
-rw-r--r--chromium/v8/src/codegen/arm/macro-assembler-arm.cc2
-rw-r--r--chromium/v8/src/codegen/arm64/assembler-arm64.cc72
-rw-r--r--chromium/v8/src/codegen/arm64/assembler-arm64.h19
-rw-r--r--chromium/v8/src/codegen/arm64/constants-arm64.h17
-rw-r--r--chromium/v8/src/codegen/arm64/decoder-arm64-inl.h1
-rw-r--r--chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc29
-rw-r--r--chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h18
-rw-r--r--chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc17
-rw-r--r--chromium/v8/src/codegen/arm64/macro-assembler-arm64.h26
-rw-r--r--chromium/v8/src/codegen/arm64/register-arm64.h8
-rw-r--r--chromium/v8/src/codegen/assembler.cc2
-rw-r--r--chromium/v8/src/codegen/assembler.h19
-rw-r--r--chromium/v8/src/codegen/code-factory.cc17
-rw-r--r--chromium/v8/src/codegen/code-factory.h1
-rw-r--r--chromium/v8/src/codegen/code-stub-assembler.cc604
-rw-r--r--chromium/v8/src/codegen/code-stub-assembler.h350
-rw-r--r--chromium/v8/src/codegen/compiler.cc21
-rw-r--r--chromium/v8/src/codegen/cpu-features.h5
-rw-r--r--chromium/v8/src/codegen/external-reference.cc19
-rw-r--r--chromium/v8/src/codegen/external-reference.h13
-rw-r--r--chromium/v8/src/codegen/ia32/assembler-ia32.cc56
-rw-r--r--chromium/v8/src/codegen/ia32/assembler-ia32.h10
-rw-r--r--chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc30
-rw-r--r--chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc26
-rw-r--r--chromium/v8/src/codegen/ia32/macro-assembler-ia32.h34
-rw-r--r--chromium/v8/src/codegen/ia32/sse-instr.h1
-rw-r--r--chromium/v8/src/codegen/interface-descriptors.cc32
-rw-r--r--chromium/v8/src/codegen/interface-descriptors.h427
-rw-r--r--chromium/v8/src/codegen/machine-type.h40
-rw-r--r--chromium/v8/src/codegen/mips/assembler-mips.cc16
-rw-r--r--chromium/v8/src/codegen/mips/assembler-mips.h36
-rw-r--r--chromium/v8/src/codegen/mips/interface-descriptors-mips.cc46
-rw-r--r--chromium/v8/src/codegen/mips/macro-assembler-mips.cc4
-rw-r--r--chromium/v8/src/codegen/mips64/assembler-mips64.cc15
-rw-r--r--chromium/v8/src/codegen/mips64/assembler-mips64.h36
-rw-r--r--chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc46
-rw-r--r--chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc3
-rw-r--r--chromium/v8/src/codegen/optimized-compilation-info.cc72
-rw-r--r--chromium/v8/src/codegen/optimized-compilation-info.h189
-rw-r--r--chromium/v8/src/codegen/ppc/assembler-ppc.cc16
-rw-r--r--chromium/v8/src/codegen/ppc/assembler-ppc.h62
-rw-r--r--chromium/v8/src/codegen/ppc/constants-ppc.h231
-rw-r--r--chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc29
-rw-r--r--chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc2
-rw-r--r--chromium/v8/src/codegen/register.h10
-rw-r--r--chromium/v8/src/codegen/reloc-info.h9
-rw-r--r--chromium/v8/src/codegen/s390/constants-s390.h11
-rw-r--r--chromium/v8/src/codegen/s390/interface-descriptors-s390.cc29
-rw-r--r--chromium/v8/src/codegen/s390/macro-assembler-s390.cc4
-rw-r--r--chromium/v8/src/codegen/safepoint-table.cc2
-rw-r--r--chromium/v8/src/codegen/source-position-table.cc18
-rw-r--r--chromium/v8/src/codegen/source-position-table.h6
-rw-r--r--chromium/v8/src/codegen/x64/assembler-x64.cc217
-rw-r--r--chromium/v8/src/codegen/x64/assembler-x64.h110
-rw-r--r--chromium/v8/src/codegen/x64/interface-descriptors-x64.cc49
-rw-r--r--chromium/v8/src/codegen/x64/macro-assembler-x64.cc4
-rw-r--r--chromium/v8/src/codegen/x64/macro-assembler-x64.h9
-rw-r--r--chromium/v8/src/codegen/x64/sse-instr.h1
-rw-r--r--chromium/v8/src/common/checks.h2
-rw-r--r--chromium/v8/src/common/globals.h76
-rw-r--r--chromium/v8/src/common/message-template.h6
-rw-r--r--chromium/v8/src/compiler/access-builder.cc38
-rw-r--r--chromium/v8/src/compiler/access-builder.h12
-rw-r--r--chromium/v8/src/compiler/access-info.cc8
-rw-r--r--chromium/v8/src/compiler/allocation-builder-inl.h13
-rw-r--r--chromium/v8/src/compiler/allocation-builder.h5
-rw-r--r--chromium/v8/src/compiler/backend/arm/code-generator-arm.cc150
-rw-r--r--chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h16
-rw-r--r--chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc16
-rw-r--r--chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc53
-rw-r--r--chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc123
-rw-r--r--chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h29
-rw-r--r--chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc29
-rw-r--r--chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc86
-rw-r--r--chromium/v8/src/compiler/backend/code-generator.cc47
-rw-r--r--chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc31
-rw-r--r--chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h15
-rw-r--r--chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc15
-rw-r--r--chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc68
-rw-r--r--chromium/v8/src/compiler/backend/instruction-selector-impl.h2
-rw-r--r--chromium/v8/src/compiler/backend/instruction-selector.cc97
-rw-r--r--chromium/v8/src/compiler/backend/instruction.h2
-rw-r--r--chromium/v8/src/compiler/backend/mips/code-generator-mips.cc102
-rw-r--r--chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h19
-rw-r--r--chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc19
-rw-r--r--chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc39
-rw-r--r--chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc102
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h19
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc19
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc39
-rw-r--r--chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc683
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h93
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc93
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc307
-rw-r--r--chromium/v8/src/compiler/backend/register-allocator.cc47
-rw-r--r--chromium/v8/src/compiler/backend/register-allocator.h11
-rw-r--r--chromium/v8/src/compiler/backend/s390/code-generator-s390.cc133
-rw-r--r--chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h31
-rw-r--r--chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc31
-rw-r--r--chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc47
-rw-r--r--chromium/v8/src/compiler/backend/x64/code-generator-x64.cc194
-rw-r--r--chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h19
-rw-r--r--chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc19
-rw-r--r--chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc53
-rw-r--r--chromium/v8/src/compiler/basic-block-instrumentor.cc74
-rw-r--r--chromium/v8/src/compiler/basic-block-instrumentor.h6
-rw-r--r--chromium/v8/src/compiler/bytecode-graph-builder.cc319
-rw-r--r--chromium/v8/src/compiler/bytecode-graph-builder.h1
-rw-r--r--chromium/v8/src/compiler/code-assembler.cc5
-rw-r--r--chromium/v8/src/compiler/code-assembler.h3
-rw-r--r--chromium/v8/src/compiler/effect-control-linearizer.cc16
-rw-r--r--chromium/v8/src/compiler/globals.h13
-rw-r--r--chromium/v8/src/compiler/graph-assembler.cc106
-rw-r--r--chromium/v8/src/compiler/graph-assembler.h12
-rw-r--r--chromium/v8/src/compiler/graph-visualizer.cc121
-rw-r--r--chromium/v8/src/compiler/graph-visualizer.h26
-rw-r--r--chromium/v8/src/compiler/js-call-reducer.cc200
-rw-r--r--chromium/v8/src/compiler/js-create-lowering.cc31
-rw-r--r--chromium/v8/src/compiler/js-generic-lowering.cc739
-rw-r--r--chromium/v8/src/compiler/js-generic-lowering.h18
-rw-r--r--chromium/v8/src/compiler/js-heap-broker.cc12
-rw-r--r--chromium/v8/src/compiler/js-heap-broker.h29
-rw-r--r--chromium/v8/src/compiler/js-heap-copy-reducer.cc44
-rw-r--r--chromium/v8/src/compiler/js-inlining.cc7
-rw-r--r--chromium/v8/src/compiler/js-native-context-specialization.cc17
-rw-r--r--chromium/v8/src/compiler/js-operator.cc206
-rw-r--r--chromium/v8/src/compiler/js-operator.h169
-rw-r--r--chromium/v8/src/compiler/js-type-hint-lowering.cc27
-rw-r--r--chromium/v8/src/compiler/js-type-hint-lowering.h1
-rw-r--r--chromium/v8/src/compiler/js-typed-lowering.cc125
-rw-r--r--chromium/v8/src/compiler/linkage.cc17
-rw-r--r--chromium/v8/src/compiler/linkage.h21
-rw-r--r--chromium/v8/src/compiler/load-elimination.h2
-rw-r--r--chromium/v8/src/compiler/machine-graph-verifier.cc46
-rw-r--r--chromium/v8/src/compiler/machine-graph.cc5
-rw-r--r--chromium/v8/src/compiler/machine-graph.h1
-rw-r--r--chromium/v8/src/compiler/machine-operator-reducer.cc173
-rw-r--r--chromium/v8/src/compiler/machine-operator-reducer.h1
-rw-r--r--chromium/v8/src/compiler/machine-operator.cc25
-rw-r--r--chromium/v8/src/compiler/machine-operator.h25
-rw-r--r--chromium/v8/src/compiler/memory-lowering.h1
-rw-r--r--chromium/v8/src/compiler/node-matchers.h2
-rw-r--r--chromium/v8/src/compiler/node.h10
-rw-r--r--chromium/v8/src/compiler/opcodes.cc2
-rw-r--r--chromium/v8/src/compiler/opcodes.h119
-rw-r--r--chromium/v8/src/compiler/operator-properties.cc2
-rw-r--r--chromium/v8/src/compiler/pipeline-statistics.cc4
-rw-r--r--chromium/v8/src/compiler/pipeline.cc365
-rw-r--r--chromium/v8/src/compiler/representation-change.cc68
-rw-r--r--chromium/v8/src/compiler/representation-change.h9
-rw-r--r--chromium/v8/src/compiler/schedule.cc2
-rw-r--r--chromium/v8/src/compiler/scheduler.cc4
-rw-r--r--chromium/v8/src/compiler/simd-scalar-lowering.cc95
-rw-r--r--chromium/v8/src/compiler/simd-scalar-lowering.h1
-rw-r--r--chromium/v8/src/compiler/simplified-lowering.cc428
-rw-r--r--chromium/v8/src/compiler/simplified-operator.cc44
-rw-r--r--chromium/v8/src/compiler/simplified-operator.h2
-rw-r--r--chromium/v8/src/compiler/typed-optimization.cc2
-rw-r--r--chromium/v8/src/compiler/typer.cc81
-rw-r--r--chromium/v8/src/compiler/types.cc4
-rw-r--r--chromium/v8/src/compiler/wasm-compiler.cc456
-rw-r--r--chromium/v8/src/compiler/wasm-compiler.h24
-rw-r--r--chromium/v8/src/d8/cov.cc74
-rw-r--r--chromium/v8/src/d8/cov.h15
-rw-r--r--chromium/v8/src/d8/d8-platforms.cc2
-rw-r--r--chromium/v8/src/d8/d8-posix.cc16
-rw-r--r--chromium/v8/src/d8/d8.cc579
-rw-r--r--chromium/v8/src/d8/d8.h25
-rw-r--r--chromium/v8/src/debug/debug-coverage.cc4
-rw-r--r--chromium/v8/src/debug/debug-evaluate.cc35
-rw-r--r--chromium/v8/src/debug/debug-evaluate.h2
-rw-r--r--chromium/v8/src/debug/debug-frames.cc1
-rw-r--r--chromium/v8/src/debug/debug-interface.h10
-rw-r--r--chromium/v8/src/debug/debug-scopes.cc96
-rw-r--r--chromium/v8/src/debug/debug-scopes.h12
-rw-r--r--chromium/v8/src/debug/debug.cc7
-rw-r--r--chromium/v8/src/debug/liveedit.cc20
-rw-r--r--chromium/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc3
-rw-r--r--chromium/v8/src/deoptimizer/OWNERS7
-rw-r--r--chromium/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc2
-rw-r--r--chromium/v8/src/deoptimizer/deoptimize-reason.h2
-rw-r--r--chromium/v8/src/deoptimizer/deoptimizer.cc25
-rw-r--r--chromium/v8/src/diagnostics/arm/disasm-arm.cc65
-rw-r--r--chromium/v8/src/diagnostics/arm64/disasm-arm64.cc12
-rw-r--r--chromium/v8/src/diagnostics/basic-block-profiler.cc142
-rw-r--r--chromium/v8/src/diagnostics/basic-block-profiler.h85
-rw-r--r--chromium/v8/src/diagnostics/code-tracer.h24
-rw-r--r--chromium/v8/src/diagnostics/ia32/disasm-ia32.cc39
-rw-r--r--chromium/v8/src/diagnostics/objects-debug.cc78
-rw-r--r--chromium/v8/src/diagnostics/objects-printer.cc134
-rw-r--r--chromium/v8/src/diagnostics/ppc/disasm-ppc.cc81
-rw-r--r--chromium/v8/src/diagnostics/x64/disasm-x64.cc24
-rw-r--r--chromium/v8/src/execution/arm/simulator-arm.cc48
-rw-r--r--chromium/v8/src/execution/arm64/pointer-auth-arm64.cc4
-rw-r--r--chromium/v8/src/execution/arm64/pointer-authentication-arm64.h33
-rw-r--r--chromium/v8/src/execution/arm64/simulator-arm64.cc11
-rw-r--r--chromium/v8/src/execution/arm64/simulator-arm64.h9
-rw-r--r--chromium/v8/src/execution/arm64/simulator-logic-arm64.cc59
-rw-r--r--chromium/v8/src/execution/frames-inl.h4
-rw-r--r--chromium/v8/src/execution/frames.cc12
-rw-r--r--chromium/v8/src/execution/frames.h6
-rw-r--r--chromium/v8/src/execution/futex-emulation.cc32
-rw-r--r--chromium/v8/src/execution/futex-emulation.h21
-rw-r--r--chromium/v8/src/execution/isolate.cc315
-rw-r--r--chromium/v8/src/execution/isolate.h7
-rw-r--r--chromium/v8/src/execution/local-isolate-wrapper-inl.h148
-rw-r--r--chromium/v8/src/execution/local-isolate-wrapper.h85
-rw-r--r--chromium/v8/src/execution/messages.cc45
-rw-r--r--chromium/v8/src/execution/messages.h42
-rw-r--r--chromium/v8/src/execution/microtask-queue.h2
-rw-r--r--chromium/v8/src/execution/off-thread-isolate-inl.h8
-rw-r--r--chromium/v8/src/execution/off-thread-isolate.cc107
-rw-r--r--chromium/v8/src/execution/off-thread-isolate.h11
-rw-r--r--chromium/v8/src/execution/s390/simulator-s390.cc92
-rw-r--r--chromium/v8/src/execution/simulator.h4
-rw-r--r--chromium/v8/src/execution/stack-guard.cc7
-rw-r--r--chromium/v8/src/extensions/cputracemark-extension.cc4
-rw-r--r--chromium/v8/src/flags/flag-definitions.h86
-rw-r--r--chromium/v8/src/handles/global-handles.cc2
-rw-r--r--chromium/v8/src/handles/handles-inl.h17
-rw-r--r--chromium/v8/src/handles/handles.h13
-rw-r--r--chromium/v8/src/handles/off-thread-transfer-handle-storage-inl.h77
-rw-r--r--chromium/v8/src/handles/off-thread-transfer-handle-storage.h47
-rw-r--r--chromium/v8/src/heap/allocation-stats.h117
-rw-r--r--chromium/v8/src/heap/base-space.cc33
-rw-r--r--chromium/v8/src/heap/base-space.h81
-rw-r--r--chromium/v8/src/heap/base/asm/arm/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/arm64/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/arm64/push_registers_masm.S (renamed from chromium/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S)0
-rw-r--r--chromium/v8/src/heap/base/asm/ia32/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/ia32/push_registers_masm.S (renamed from chromium/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S)0
-rw-r--r--chromium/v8/src/heap/base/asm/mips/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/mips64/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/ppc/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/s390/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/x64/push_registers_masm.S (renamed from chromium/v8/src/heap/cppgc/asm/x64/push_registers_masm.S)0
-rw-r--r--chromium/v8/src/heap/base/stack.cc (renamed from chromium/v8/src/heap/cppgc/stack.cc)10
-rw-r--r--chromium/v8/src/heap/base/stack.h (renamed from chromium/v8/src/heap/cppgc/stack.h)14
-rw-r--r--chromium/v8/src/heap/basic-memory-chunk.cc38
-rw-r--r--chromium/v8/src/heap/basic-memory-chunk.h231
-rw-r--r--chromium/v8/src/heap/code-object-registry.cc75
-rw-r--r--chromium/v8/src/heap/code-object-registry.h38
-rw-r--r--chromium/v8/src/heap/code-stats.cc2
-rw-r--r--chromium/v8/src/heap/combined-heap.cc3
-rw-r--r--chromium/v8/src/heap/combined-heap.h2
-rw-r--r--chromium/v8/src/heap/concurrent-allocator-inl.h18
-rw-r--r--chromium/v8/src/heap/concurrent-allocator.cc78
-rw-r--r--chromium/v8/src/heap/concurrent-allocator.h20
-rw-r--r--chromium/v8/src/heap/concurrent-marking.cc6
-rw-r--r--chromium/v8/src/heap/cppgc-js/cpp-heap.cc141
-rw-r--r--chromium/v8/src/heap/cppgc-js/cpp-heap.h42
-rw-r--r--chromium/v8/src/heap/cppgc/allocation.cc16
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap-local-data.cc36
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap.cc85
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap.h53
-rw-r--r--chromium/v8/src/heap/cppgc/free-list.cc9
-rw-r--r--chromium/v8/src/heap/cppgc/garbage-collector.h56
-rw-r--r--chromium/v8/src/heap/cppgc/gc-info-table.cc5
-rw-r--r--chromium/v8/src/heap/cppgc/gc-info-table.h3
-rw-r--r--chromium/v8/src/heap/cppgc/gc-info.cc5
-rw-r--r--chromium/v8/src/heap/cppgc/gc-invoker.cc105
-rw-r--r--chromium/v8/src/heap/cppgc/gc-invoker.h47
-rw-r--r--chromium/v8/src/heap/cppgc/globals.h7
-rw-r--r--chromium/v8/src/heap/cppgc/heap-base.cc88
-rw-r--r--chromium/v8/src/heap/cppgc/heap-base.h151
-rw-r--r--chromium/v8/src/heap/cppgc/heap-growing.cc99
-rw-r--r--chromium/v8/src/heap/cppgc/heap-growing.h53
-rw-r--r--chromium/v8/src/heap/cppgc/heap-inl.h33
-rw-r--r--chromium/v8/src/heap/cppgc/heap-object-header-inl.h5
-rw-r--r--chromium/v8/src/heap/cppgc/heap-object-header.h3
-rw-r--r--chromium/v8/src/heap/cppgc/heap-page-inl.h30
-rw-r--r--chromium/v8/src/heap/cppgc/heap-page.cc141
-rw-r--r--chromium/v8/src/heap/cppgc/heap-page.h56
-rw-r--r--chromium/v8/src/heap/cppgc/heap-space.cc20
-rw-r--r--chromium/v8/src/heap/cppgc/heap-space.h5
-rw-r--r--chromium/v8/src/heap/cppgc/heap.cc110
-rw-r--r--chromium/v8/src/heap/cppgc/heap.h134
-rw-r--r--chromium/v8/src/heap/cppgc/marker.cc140
-rw-r--r--chromium/v8/src/heap/cppgc/marker.h90
-rw-r--r--chromium/v8/src/heap/cppgc/marking-visitor.cc67
-rw-r--r--chromium/v8/src/heap/cppgc/marking-visitor.h22
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator-inl.h4
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator.cc145
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator.h41
-rw-r--r--chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h1
-rw-r--r--chromium/v8/src/heap/cppgc/page-memory-inl.h10
-rw-r--r--chromium/v8/src/heap/cppgc/page-memory.h16
-rw-r--r--chromium/v8/src/heap/cppgc/persistent-node.cc12
-rw-r--r--chromium/v8/src/heap/cppgc/platform.cc10
-rw-r--r--chromium/v8/src/heap/cppgc/pointer-policies.cc4
-rw-r--r--chromium/v8/src/heap/cppgc/prefinalizer-handler.cc19
-rw-r--r--chromium/v8/src/heap/cppgc/prefinalizer-handler.h2
-rw-r--r--chromium/v8/src/heap/cppgc/process-heap.cc13
-rw-r--r--chromium/v8/src/heap/cppgc/raw-heap.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/raw-heap.h10
-rw-r--r--chromium/v8/src/heap/cppgc/stats-collector.cc114
-rw-r--r--chromium/v8/src/heap/cppgc/stats-collector.h130
-rw-r--r--chromium/v8/src/heap/cppgc/sweeper.cc479
-rw-r--r--chromium/v8/src/heap/cppgc/sweeper.h7
-rw-r--r--chromium/v8/src/heap/cppgc/task-handle.h47
-rw-r--r--chromium/v8/src/heap/cppgc/virtual-memory.cc56
-rw-r--r--chromium/v8/src/heap/cppgc/virtual-memory.h60
-rw-r--r--chromium/v8/src/heap/cppgc/visitor.cc76
-rw-r--r--chromium/v8/src/heap/cppgc/visitor.h34
-rw-r--r--chromium/v8/src/heap/cppgc/write-barrier.cc84
-rw-r--r--chromium/v8/src/heap/factory-base.cc2
-rw-r--r--chromium/v8/src/heap/factory.cc81
-rw-r--r--chromium/v8/src/heap/factory.h13
-rw-r--r--chromium/v8/src/heap/finalization-registry-cleanup-task.h3
-rw-r--r--chromium/v8/src/heap/free-list-inl.h36
-rw-r--r--chromium/v8/src/heap/free-list.cc596
-rw-r--r--chromium/v8/src/heap/free-list.h520
-rw-r--r--chromium/v8/src/heap/heap-inl.h30
-rw-r--r--chromium/v8/src/heap/heap.cc274
-rw-r--r--chromium/v8/src/heap/heap.h54
-rw-r--r--chromium/v8/src/heap/incremental-marking.cc74
-rw-r--r--chromium/v8/src/heap/incremental-marking.h13
-rw-r--r--chromium/v8/src/heap/large-spaces.cc13
-rw-r--r--chromium/v8/src/heap/list.h12
-rw-r--r--chromium/v8/src/heap/local-allocator.h2
-rw-r--r--chromium/v8/src/heap/local-heap.cc8
-rw-r--r--chromium/v8/src/heap/local-heap.h27
-rw-r--r--chromium/v8/src/heap/mark-compact-inl.h6
-rw-r--r--chromium/v8/src/heap/mark-compact.cc71
-rw-r--r--chromium/v8/src/heap/mark-compact.h45
-rw-r--r--chromium/v8/src/heap/marking-visitor.h11
-rw-r--r--chromium/v8/src/heap/memory-allocator.cc778
-rw-r--r--chromium/v8/src/heap/memory-allocator.h451
-rw-r--r--chromium/v8/src/heap/memory-chunk.cc307
-rw-r--r--chromium/v8/src/heap/memory-chunk.h197
-rw-r--r--chromium/v8/src/heap/memory-measurement.cc16
-rw-r--r--chromium/v8/src/heap/memory-measurement.h3
-rw-r--r--chromium/v8/src/heap/new-spaces-inl.h179
-rw-r--r--chromium/v8/src/heap/new-spaces.cc653
-rw-r--r--chromium/v8/src/heap/new-spaces.h501
-rw-r--r--chromium/v8/src/heap/object-stats.cc3
-rw-r--r--chromium/v8/src/heap/off-thread-heap.cc191
-rw-r--r--chromium/v8/src/heap/off-thread-heap.h25
-rw-r--r--chromium/v8/src/heap/paged-spaces-inl.h208
-rw-r--r--chromium/v8/src/heap/paged-spaces.cc1047
-rw-r--r--chromium/v8/src/heap/paged-spaces.h588
-rw-r--r--chromium/v8/src/heap/read-only-heap.cc30
-rw-r--r--chromium/v8/src/heap/read-only-heap.h5
-rw-r--r--chromium/v8/src/heap/read-only-spaces.cc437
-rw-r--r--chromium/v8/src/heap/read-only-spaces.h90
-rw-r--r--chromium/v8/src/heap/remembered-set-inl.h446
-rw-r--r--chromium/v8/src/heap/remembered-set.h406
-rw-r--r--chromium/v8/src/heap/safepoint.cc26
-rw-r--r--chromium/v8/src/heap/safepoint.h8
-rw-r--r--chromium/v8/src/heap/scavenger-inl.h4
-rw-r--r--chromium/v8/src/heap/scavenger.cc3
-rw-r--r--chromium/v8/src/heap/setup-heap-internal.cc20
-rw-r--r--chromium/v8/src/heap/spaces-inl.h405
-rw-r--r--chromium/v8/src/heap/spaces.cc3441
-rw-r--r--chromium/v8/src/heap/spaces.h2276
-rw-r--r--chromium/v8/src/heap/sweeper.cc26
-rw-r--r--chromium/v8/src/heap/sweeper.h5
-rw-r--r--chromium/v8/src/ic/accessor-assembler.cc34
-rw-r--r--chromium/v8/src/ic/binary-op-assembler.cc13
-rw-r--r--chromium/v8/src/ic/binary-op-assembler.h66
-rw-r--r--chromium/v8/src/ic/ic.cc28
-rw-r--r--chromium/v8/src/ic/unary-op-assembler.cc283
-rw-r--r--chromium/v8/src/ic/unary-op-assembler.h45
-rw-r--r--chromium/v8/src/init/bootstrapper.cc48
-rw-r--r--chromium/v8/src/init/heap-symbols.h6
-rw-r--r--chromium/v8/src/init/v8.cc4
-rw-r--r--chromium/v8/src/inspector/v8-console.cc2
-rw-r--r--chromium/v8/src/inspector/v8-debugger-agent-impl.cc3
-rw-r--r--chromium/v8/src/inspector/v8-debugger-script.cc13
-rw-r--r--chromium/v8/src/inspector/v8-inspector-impl.cc20
-rw-r--r--chromium/v8/src/inspector/v8-runtime-agent-impl.cc7
-rw-r--r--chromium/v8/src/inspector/v8-runtime-agent-impl.h2
-rw-r--r--chromium/v8/src/inspector/value-mirror.cc213
-rw-r--r--chromium/v8/src/interpreter/bytecode-array-writer.cc2
-rw-r--r--chromium/v8/src/interpreter/bytecode-generator.cc3
-rw-r--r--chromium/v8/src/interpreter/interpreter-assembler.cc5
-rw-r--r--chromium/v8/src/interpreter/interpreter-generator.cc333
-rw-r--r--chromium/v8/src/interpreter/interpreter.cc4
-rw-r--r--chromium/v8/src/json/json-parser.cc4
-rw-r--r--chromium/v8/src/json/json-parser.h3
-rw-r--r--chromium/v8/src/libplatform/default-foreground-task-runner.h3
-rw-r--r--chromium/v8/src/libplatform/default-platform.cc7
-rw-r--r--chromium/v8/src/libsampler/sampler.h2
-rw-r--r--chromium/v8/src/logging/counters-inl.h1
-rw-r--r--chromium/v8/src/logging/counters.cc5
-rw-r--r--chromium/v8/src/logging/counters.h28
-rw-r--r--chromium/v8/src/logging/log-utils.h2
-rw-r--r--chromium/v8/src/logging/log.h4
-rw-r--r--chromium/v8/src/logging/off-thread-logger.h1
-rw-r--r--chromium/v8/src/logging/tracing-flags.cc17
-rw-r--r--chromium/v8/src/logging/tracing-flags.h50
-rw-r--r--chromium/v8/src/objects/api-callbacks.tq1
-rw-r--r--chromium/v8/src/objects/arguments-inl.h32
-rw-r--r--chromium/v8/src/objects/arguments.h43
-rw-r--r--chromium/v8/src/objects/arguments.tq109
-rw-r--r--chromium/v8/src/objects/backing-store.cc18
-rw-r--r--chromium/v8/src/objects/bigint.cc2
-rw-r--r--chromium/v8/src/objects/class-definitions-tq-deps-inl.h1
-rw-r--r--chromium/v8/src/objects/code.cc3
-rw-r--r--chromium/v8/src/objects/compilation-cache.h2
-rw-r--r--chromium/v8/src/objects/contexts.cc16
-rw-r--r--chromium/v8/src/objects/contexts.h3
-rw-r--r--chromium/v8/src/objects/contexts.tq5
-rw-r--r--chromium/v8/src/objects/debug-objects.cc7
-rw-r--r--chromium/v8/src/objects/debug-objects.h1
-rw-r--r--chromium/v8/src/objects/debug-objects.tq4
-rw-r--r--chromium/v8/src/objects/descriptor-array-inl.h12
-rw-r--r--chromium/v8/src/objects/descriptor-array.h12
-rw-r--r--chromium/v8/src/objects/elements.cc54
-rw-r--r--chromium/v8/src/objects/feedback-vector-inl.h63
-rw-r--r--chromium/v8/src/objects/feedback-vector.cc47
-rw-r--r--chromium/v8/src/objects/feedback-vector.h18
-rw-r--r--chromium/v8/src/objects/fixed-array-inl.h23
-rw-r--r--chromium/v8/src/objects/fixed-array.h3
-rw-r--r--chromium/v8/src/objects/fixed-array.tq5
-rw-r--r--chromium/v8/src/objects/frame-array.h3
-rw-r--r--chromium/v8/src/objects/heap-object.h2
-rw-r--r--chromium/v8/src/objects/heap-object.tq1
-rw-r--r--chromium/v8/src/objects/instance-type-inl.h8
-rw-r--r--chromium/v8/src/objects/instance-type.h5
-rw-r--r--chromium/v8/src/objects/internal-index.h4
-rw-r--r--chromium/v8/src/objects/intl-objects.cc10
-rw-r--r--chromium/v8/src/objects/intl-objects.h5
-rw-r--r--chromium/v8/src/objects/js-aggregate-error-inl.h25
-rw-r--r--chromium/v8/src/objects/js-aggregate-error.h27
-rw-r--r--chromium/v8/src/objects/js-aggregate-error.tq81
-rw-r--r--chromium/v8/src/objects/js-array.h1
-rw-r--r--chromium/v8/src/objects/js-array.tq20
-rw-r--r--chromium/v8/src/objects/js-collator.cc2
-rw-r--r--chromium/v8/src/objects/js-collection.h2
-rw-r--r--chromium/v8/src/objects/js-date-time-format.cc312
-rw-r--r--chromium/v8/src/objects/js-number-format.cc12
-rw-r--r--chromium/v8/src/objects/js-objects-inl.h8
-rw-r--r--chromium/v8/src/objects/js-objects.cc36
-rw-r--r--chromium/v8/src/objects/js-objects.h9
-rw-r--r--chromium/v8/src/objects/js-regexp.h43
-rw-r--r--chromium/v8/src/objects/js-regexp.tq11
-rw-r--r--chromium/v8/src/objects/js-relative-time-format.cc6
-rw-r--r--chromium/v8/src/objects/keys.cc2
-rw-r--r--chromium/v8/src/objects/lookup.cc13
-rw-r--r--chromium/v8/src/objects/map-inl.h4
-rw-r--r--chromium/v8/src/objects/map-updater.cc12
-rw-r--r--chromium/v8/src/objects/map.cc10
-rw-r--r--chromium/v8/src/objects/map.h6
-rw-r--r--chromium/v8/src/objects/map.tq13
-rw-r--r--chromium/v8/src/objects/maybe-object-inl.h8
-rw-r--r--chromium/v8/src/objects/maybe-object.h4
-rw-r--r--chromium/v8/src/objects/module-inl.h5
-rw-r--r--chromium/v8/src/objects/name.tq21
-rw-r--r--chromium/v8/src/objects/object-list-macros.h4
-rw-r--r--chromium/v8/src/objects/objects-body-descriptors-inl.h5
-rw-r--r--chromium/v8/src/objects/objects-definitions.h1
-rw-r--r--chromium/v8/src/objects/objects-inl.h4
-rw-r--r--chromium/v8/src/objects/objects.cc129
-rw-r--r--chromium/v8/src/objects/oddball.h1
-rw-r--r--chromium/v8/src/objects/ordered-hash-table.cc21
-rw-r--r--chromium/v8/src/objects/ordered-hash-table.h7
-rw-r--r--chromium/v8/src/objects/property-descriptor.cc18
-rw-r--r--chromium/v8/src/objects/prototype-info-inl.h14
-rw-r--r--chromium/v8/src/objects/prototype-info.h37
-rw-r--r--chromium/v8/src/objects/prototype-info.tq32
-rw-r--r--chromium/v8/src/objects/regexp-match-info.tq1
-rw-r--r--chromium/v8/src/objects/scope-info.cc46
-rw-r--r--chromium/v8/src/objects/scope-info.h22
-rw-r--r--chromium/v8/src/objects/scope-info.tq2
-rw-r--r--chromium/v8/src/objects/script-inl.h42
-rw-r--r--chromium/v8/src/objects/script.h54
-rw-r--r--chromium/v8/src/objects/script.tq52
-rw-r--r--chromium/v8/src/objects/shared-function-info-inl.h11
-rw-r--r--chromium/v8/src/objects/shared-function-info.tq9
-rw-r--r--chromium/v8/src/objects/smi.h14
-rw-r--r--chromium/v8/src/objects/source-text-module.h4
-rw-r--r--chromium/v8/src/objects/source-text-module.tq7
-rw-r--r--chromium/v8/src/objects/stack-frame-info-inl.h41
-rw-r--r--chromium/v8/src/objects/stack-frame-info.cc18
-rw-r--r--chromium/v8/src/objects/stack-frame-info.h43
-rw-r--r--chromium/v8/src/objects/stack-frame-info.tq21
-rw-r--r--chromium/v8/src/objects/string-table.h2
-rw-r--r--chromium/v8/src/objects/string.cc116
-rw-r--r--chromium/v8/src/objects/string.h15
-rw-r--r--chromium/v8/src/objects/string.tq17
-rw-r--r--chromium/v8/src/objects/tagged-impl.h5
-rw-r--r--chromium/v8/src/objects/tagged-index.h8
-rw-r--r--chromium/v8/src/objects/template.tq59
-rw-r--r--chromium/v8/src/objects/templates-inl.h45
-rw-r--r--chromium/v8/src/objects/templates.h33
-rw-r--r--chromium/v8/src/objects/transitions.cc83
-rw-r--r--chromium/v8/src/objects/transitions.h4
-rw-r--r--chromium/v8/src/objects/type-hints.cc2
-rw-r--r--chromium/v8/src/objects/type-hints.h1
-rw-r--r--chromium/v8/src/parsing/parser-base.h26
-rw-r--r--chromium/v8/src/parsing/parser.cc10
-rw-r--r--chromium/v8/src/parsing/parser.h10
-rw-r--r--chromium/v8/src/parsing/parsing.cc24
-rw-r--r--chromium/v8/src/parsing/parsing.h29
-rw-r--r--chromium/v8/src/parsing/pending-compilation-error-handler.cc12
-rw-r--r--chromium/v8/src/parsing/pending-compilation-error-handler.h13
-rw-r--r--chromium/v8/src/parsing/preparse-data-impl.h2
-rw-r--r--chromium/v8/src/parsing/preparser.cc4
-rw-r--r--chromium/v8/src/parsing/preparser.h4
-rw-r--r--chromium/v8/src/parsing/scanner-inl.h2
-rw-r--r--chromium/v8/src/parsing/scanner.cc37
-rw-r--r--chromium/v8/src/parsing/scanner.h14
-rw-r--r--chromium/v8/src/parsing/token.h4
-rw-r--r--chromium/v8/src/profiler/cpu-profiler.cc4
-rw-r--r--chromium/v8/src/profiler/cpu-profiler.h2
-rw-r--r--chromium/v8/src/profiler/heap-snapshot-generator.cc4
-rw-r--r--chromium/v8/src/profiler/profile-generator.cc2
-rw-r--r--chromium/v8/src/profiler/profile-generator.h5
-rw-r--r--chromium/v8/src/profiler/tick-sample.cc7
-rw-r--r--chromium/v8/src/profiler/tracing-cpu-profiler.cc5
-rw-r--r--chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc7
-rw-r--r--chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h2
-rw-r--r--chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc7
-rw-r--r--chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h2
-rw-r--r--chromium/v8/src/regexp/gen-regexp-special-case.cc5
-rw-r--r--chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc7
-rw-r--r--chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h2
-rw-r--r--chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc7
-rw-r--r--chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h2
-rw-r--r--chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc7
-rw-r--r--chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h2
-rw-r--r--chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc7
-rw-r--r--chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h2
-rw-r--r--chromium/v8/src/regexp/regexp-ast.h5
-rw-r--r--chromium/v8/src/regexp/regexp-bytecode-generator.cc10
-rw-r--r--chromium/v8/src/regexp/regexp-bytecode-generator.h1
-rw-r--r--chromium/v8/src/regexp/regexp-bytecode-peephole.cc19
-rw-r--r--chromium/v8/src/regexp/regexp-bytecodes.h16
-rw-r--r--chromium/v8/src/regexp/regexp-compiler-tonode.cc18
-rw-r--r--chromium/v8/src/regexp/regexp-compiler.cc139
-rw-r--r--chromium/v8/src/regexp/regexp-compiler.h4
-rw-r--r--chromium/v8/src/regexp/regexp-dotprinter.cc2
-rw-r--r--chromium/v8/src/regexp/regexp-interpreter.cc200
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc8
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler-tracer.h1
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler.cc41
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler.h15
-rw-r--r--chromium/v8/src/regexp/regexp-parser.cc58
-rw-r--r--chromium/v8/src/regexp/regexp-parser.h7
-rw-r--r--chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc7
-rw-r--r--chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h2
-rw-r--r--chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc7
-rw-r--r--chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h1
-rw-r--r--chromium/v8/src/roots/roots-inl.h13
-rw-r--r--chromium/v8/src/roots/roots.h15
-rw-r--r--chromium/v8/src/runtime/runtime-debug.cc9
-rw-r--r--chromium/v8/src/runtime/runtime-internal.cc3
-rw-r--r--chromium/v8/src/runtime/runtime-object.cc13
-rw-r--r--chromium/v8/src/runtime/runtime-promise.cc12
-rw-r--r--chromium/v8/src/runtime/runtime-scopes.cc43
-rw-r--r--chromium/v8/src/runtime/runtime-test.cc162
-rw-r--r--chromium/v8/src/runtime/runtime-wasm.cc44
-rw-r--r--chromium/v8/src/runtime/runtime.cc6
-rw-r--r--chromium/v8/src/runtime/runtime.h7
-rw-r--r--chromium/v8/src/snapshot/code-serializer.cc70
-rw-r--r--chromium/v8/src/snapshot/context-deserializer.cc2
-rw-r--r--chromium/v8/src/snapshot/deserializer-allocator.cc48
-rw-r--r--chromium/v8/src/snapshot/deserializer-allocator.h9
-rw-r--r--chromium/v8/src/snapshot/deserializer.cc152
-rw-r--r--chromium/v8/src/snapshot/deserializer.h30
-rw-r--r--chromium/v8/src/snapshot/object-deserializer.cc106
-rw-r--r--chromium/v8/src/snapshot/object-deserializer.h6
-rw-r--r--chromium/v8/src/snapshot/read-only-deserializer.cc4
-rw-r--r--chromium/v8/src/snapshot/serializer-allocator.cc3
-rw-r--r--chromium/v8/src/snapshot/serializer.cc17
-rw-r--r--chromium/v8/src/snapshot/snapshot-utils.cc2
-rw-r--r--chromium/v8/src/snapshot/snapshot.cc24
-rw-r--r--chromium/v8/src/snapshot/snapshot.h1
-rw-r--r--chromium/v8/src/snapshot/startup-deserializer.cc2
-rw-r--r--chromium/v8/src/strings/uri.cc5
-rw-r--r--chromium/v8/src/torque/cfg.h2
-rw-r--r--chromium/v8/src/torque/constants.h2
-rw-r--r--chromium/v8/src/torque/csa-generator.cc47
-rw-r--r--chromium/v8/src/torque/declarable.cc19
-rw-r--r--chromium/v8/src/torque/declarable.h29
-rw-r--r--chromium/v8/src/torque/implementation-visitor.cc182
-rw-r--r--chromium/v8/src/torque/implementation-visitor.h31
-rw-r--r--chromium/v8/src/torque/instance-type-generator.cc55
-rw-r--r--chromium/v8/src/torque/instructions.h4
-rw-r--r--chromium/v8/src/torque/ls/message.h8
-rw-r--r--chromium/v8/src/torque/torque-compiler.cc1
-rw-r--r--chromium/v8/src/torque/torque-parser.cc85
-rw-r--r--chromium/v8/src/torque/type-visitor.cc10
-rw-r--r--chromium/v8/src/torque/types.h5
-rw-r--r--chromium/v8/src/torque/utils.h22
-rw-r--r--chromium/v8/src/tracing/trace-categories.h7
-rw-r--r--chromium/v8/src/tracing/tracing-category-observer.cc7
-rw-r--r--chromium/v8/src/trap-handler/handler-inside-posix.cc10
-rw-r--r--chromium/v8/src/utils/ostreams.cc4
-rw-r--r--chromium/v8/src/utils/ostreams.h9
-rw-r--r--chromium/v8/src/utils/pointer-with-payload.h16
-rw-r--r--chromium/v8/src/utils/vector.h29
-rw-r--r--chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h512
-rw-r--r--chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h667
-rw-r--r--chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h625
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler.cc124
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler.h104
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-compiler.cc370
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-compiler.h2
-rw-r--r--chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h206
-rw-r--r--chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h348
-rw-r--r--chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h174
-rw-r--r--chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h174
-rw-r--r--chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h630
-rw-r--r--chromium/v8/src/wasm/c-api.cc146
-rw-r--r--chromium/v8/src/wasm/c-api.h2
-rw-r--r--chromium/v8/src/wasm/decoder.h1
-rw-r--r--chromium/v8/src/wasm/function-body-decoder-impl.h2667
-rw-r--r--chromium/v8/src/wasm/function-body-decoder.cc40
-rw-r--r--chromium/v8/src/wasm/function-body-decoder.h9
-rw-r--r--chromium/v8/src/wasm/function-compiler.cc7
-rw-r--r--chromium/v8/src/wasm/function-compiler.h2
-rw-r--r--chromium/v8/src/wasm/graph-builder-interface.cc142
-rw-r--r--chromium/v8/src/wasm/local-decl-encoder.cc25
-rw-r--r--chromium/v8/src/wasm/memory-tracing.h6
-rw-r--r--chromium/v8/src/wasm/module-compiler.cc202
-rw-r--r--chromium/v8/src/wasm/module-compiler.h3
-rw-r--r--chromium/v8/src/wasm/module-decoder.cc296
-rw-r--r--chromium/v8/src/wasm/module-instantiate.cc112
-rw-r--r--chromium/v8/src/wasm/streaming-decoder.cc325
-rw-r--r--chromium/v8/src/wasm/streaming-decoder.h212
-rw-r--r--chromium/v8/src/wasm/struct-types.h36
-rw-r--r--chromium/v8/src/wasm/sync-streaming-decoder.cc112
-rw-r--r--chromium/v8/src/wasm/value-type.h330
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.cc65
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.h31
-rw-r--r--chromium/v8/src/wasm/wasm-constants.h22
-rw-r--r--chromium/v8/src/wasm/wasm-debug-evaluate.cc20
-rw-r--r--chromium/v8/src/wasm/wasm-debug-evaluate.h1
-rw-r--r--chromium/v8/src/wasm/wasm-debug.cc725
-rw-r--r--chromium/v8/src/wasm/wasm-debug.h20
-rw-r--r--chromium/v8/src/wasm/wasm-engine.cc85
-rw-r--r--chromium/v8/src/wasm/wasm-external-refs.cc122
-rw-r--r--chromium/v8/src/wasm/wasm-external-refs.h14
-rw-r--r--chromium/v8/src/wasm/wasm-feature-flags.h33
-rw-r--r--chromium/v8/src/wasm/wasm-interpreter.cc4456
-rw-r--r--chromium/v8/src/wasm/wasm-interpreter.h228
-rw-r--r--chromium/v8/src/wasm/wasm-js.cc216
-rw-r--r--chromium/v8/src/wasm/wasm-module-builder.cc25
-rw-r--r--chromium/v8/src/wasm/wasm-module.cc78
-rw-r--r--chromium/v8/src/wasm/wasm-module.h41
-rw-r--r--chromium/v8/src/wasm/wasm-objects-inl.h54
-rw-r--r--chromium/v8/src/wasm/wasm-objects.cc70
-rw-r--r--chromium/v8/src/wasm/wasm-objects.h88
-rw-r--r--chromium/v8/src/wasm/wasm-objects.tq15
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes-inl.h631
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.cc592
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.h127
-rw-r--r--chromium/v8/src/wasm/wasm-subtyping.cc167
-rw-r--r--chromium/v8/src/wasm/wasm-subtyping.h42
-rw-r--r--chromium/v8/src/wasm/wasm-value.h2
-rw-r--r--chromium/v8/src/zone/OWNERS1
-rw-r--r--chromium/v8/src/zone/accounting-allocator.h26
-rw-r--r--chromium/v8/src/zone/zone.cc50
-rw-r--r--chromium/v8/src/zone/zone.h12
733 files changed, 34432 insertions, 25645 deletions
diff --git a/chromium/v8/src/api/api-natives.cc b/chromium/v8/src/api/api-natives.cc
index 410c37ce98d..e21dbd0eeed 100644
--- a/chromium/v8/src/api/api-natives.cc
+++ b/chromium/v8/src/api/api-natives.cc
@@ -371,7 +371,7 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<JSReceiver> new_target,
bool is_prototype) {
Handle<JSFunction> constructor;
- int serial_number = Smi::ToInt(info->serial_number());
+ int serial_number = info->serial_number();
if (!new_target.is_null()) {
if (IsSimpleInstantiation(isolate, *info, *new_target)) {
constructor = Handle<JSFunction>::cast(new_target);
@@ -462,7 +462,7 @@ MaybeHandle<Object> GetInstancePrototype(Isolate* isolate,
MaybeHandle<JSFunction> InstantiateFunction(
Isolate* isolate, Handle<NativeContext> native_context,
Handle<FunctionTemplateInfo> data, MaybeHandle<Name> maybe_name) {
- int serial_number = Smi::ToInt(data->serial_number());
+ int serial_number = data->serial_number();
if (serial_number) {
Handle<JSObject> result;
if (ProbeInstantiationsCache(isolate, native_context, serial_number,
diff --git a/chromium/v8/src/api/api.cc b/chromium/v8/src/api/api.cc
index 93780bceec4..49b1a1d1573 100644
--- a/chromium/v8/src/api/api.cc
+++ b/chromium/v8/src/api/api.cc
@@ -11,11 +11,10 @@
#include <utility> // For move
#include <vector>
-#include "src/api/api-inl.h"
-
#include "include/v8-fast-api-calls.h"
#include "include/v8-profiler.h"
#include "include/v8-util.h"
+#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
#include "src/base/functional.h"
#include "src/base/logging.h"
@@ -58,6 +57,7 @@
#include "src/json/json-parser.h"
#include "src/json/json-stringifier.h"
#include "src/logging/counters.h"
+#include "src/logging/tracing-flags.h"
#include "src/numbers/conversions-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/contexts.h"
@@ -107,6 +107,7 @@
#include "src/utils/detachable-vector.h"
#include "src/utils/version.h"
#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/value-type.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
@@ -503,7 +504,11 @@ void Utils::ReportOOMFailure(i::Isolate* isolate, const char* location,
if (fatal_callback == nullptr) {
base::OS::PrintError("\n#\n# Fatal %s OOM in %s\n#\n\n",
is_heap_oom ? "javascript" : "process", location);
+#ifdef V8_FUZZILLI
+ exit(0);
+#else
base::OS::Abort();
+#endif // V8_FUZZILLI
} else {
fatal_callback(location,
is_heap_oom
@@ -823,6 +828,8 @@ bool StartupData::CanBeRehashed() const {
return i::Snapshot::ExtractRehashability(this);
}
+bool StartupData::IsValid() const { return i::Snapshot::VersionIsValid(this); }
+
void V8::SetDcheckErrorHandler(DcheckErrorCallback that) {
v8::base::SetDcheckFunction(that);
}
@@ -1274,7 +1281,7 @@ void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
that->set_number_of_properties(0);
- that->set_tag(i::Smi::FromInt(type));
+ that->set_tag(type);
}
void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
@@ -1286,7 +1293,7 @@ void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
auto value_obj = Utils::OpenHandle(*value);
CHECK(!value_obj->IsJSReceiver() || value_obj->IsTemplateInfo());
if (value_obj->IsObjectTemplateInfo()) {
- templ->set_serial_number(i::Smi::zero());
+ templ->set_serial_number(0);
if (templ->IsFunctionTemplateInfo()) {
i::Handle<i::FunctionTemplateInfo>::cast(templ)->set_do_not_cache(true);
}
@@ -1336,7 +1343,7 @@ Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
auto self = Utils::OpenHandle(this);
i::Isolate* i_isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::Object> result(self->GetPrototypeTemplate(), i_isolate);
+ i::Handle<i::HeapObject> result(self->GetPrototypeTemplate(), i_isolate);
if (result->IsUndefined(i_isolate)) {
// Do not cache prototype objects.
result = Utils::OpenHandle(
@@ -1351,7 +1358,8 @@ void FunctionTemplate::SetPrototypeProviderTemplate(
auto self = Utils::OpenHandle(this);
i::Isolate* i_isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::Object> result = Utils::OpenHandle(*prototype_provider);
+ i::Handle<i::FunctionTemplateInfo> result =
+ Utils::OpenHandle(*prototype_provider);
CHECK(self->GetPrototypeTemplate().IsUndefined(i_isolate));
CHECK(self->GetParentTemplate().IsUndefined(i_isolate));
i::FunctionTemplateInfo::SetPrototypeProviderTemplate(i_isolate, self,
@@ -1394,7 +1402,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
if (!do_not_cache) {
next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
}
- obj->set_serial_number(i::Smi::FromInt(next_serial_number));
+ obj->set_serial_number(next_serial_number);
}
if (callback != nullptr) {
Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type,
@@ -1605,15 +1613,19 @@ static Local<ObjectTemplate> ObjectTemplateNew(
i::OBJECT_TEMPLATE_INFO_TYPE, i::AllocationType::kOld);
i::Handle<i::ObjectTemplateInfo> obj =
i::Handle<i::ObjectTemplateInfo>::cast(struct_obj);
- InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
- int next_serial_number = 0;
- if (!do_not_cache) {
- next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
+ {
+ // Disallow GC until all fields of obj have acceptable types.
+ i::DisallowHeapAllocation no_gc;
+ InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
+ int next_serial_number = 0;
+ if (!do_not_cache) {
+ next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
+ }
+ obj->set_serial_number(next_serial_number);
+ obj->set_data(0);
}
- obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (!constructor.IsEmpty())
obj->set_constructor(*Utils::OpenHandle(*constructor));
- obj->set_data(i::Smi::zero());
return Utils::ToLocal(obj);
}
@@ -2234,6 +2246,28 @@ Local<UnboundModuleScript> Module::GetUnboundModuleScript() {
self->GetIsolate()));
}
+int Module::ScriptId() {
+ i::Handle<i::Module> self = Utils::OpenHandle(this);
+ Utils::ApiCheck(self->IsSourceTextModule(), "v8::Module::ScriptId",
+ "v8::Module::ScriptId must be used on an SourceTextModule");
+
+ // The SharedFunctionInfo is not available for errored modules.
+ Utils::ApiCheck(GetStatus() != kErrored, "v8::Module::ScriptId",
+ "v8::Module::ScriptId must not be used on an errored module");
+ i::Handle<i::SharedFunctionInfo> sfi(
+ i::Handle<i::SourceTextModule>::cast(self)->GetSharedFunctionInfo(),
+ self->GetIsolate());
+ return ToApiHandle<UnboundScript>(sfi)->GetId();
+}
+
+bool Module::IsSourceTextModule() const {
+ return Utils::OpenHandle(this)->IsSourceTextModule();
+}
+
+bool Module::IsSyntheticModule() const {
+ return Utils::OpenHandle(this)->IsSyntheticModule();
+}
+
int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); }
Maybe<bool> Module::InstantiateModule(Local<Context> context,
@@ -5802,9 +5836,9 @@ static i::Handle<ObjectType> CreateEnvironment(
v8::Local<ObjectTemplate> proxy_template;
i::Handle<i::FunctionTemplateInfo> proxy_constructor;
i::Handle<i::FunctionTemplateInfo> global_constructor;
- i::Handle<i::Object> named_interceptor(
+ i::Handle<i::HeapObject> named_interceptor(
isolate->factory()->undefined_value());
- i::Handle<i::Object> indexed_interceptor(
+ i::Handle<i::HeapObject> indexed_interceptor(
isolate->factory()->undefined_value());
if (!maybe_global_template.IsEmpty()) {
@@ -8415,8 +8449,7 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
i::ReadOnlySpace* ro_space = heap->read_only_space();
heap_statistics->total_heap_size_ += ro_space->CommittedMemory();
heap_statistics->total_physical_size_ += ro_space->CommittedPhysicalMemory();
- heap_statistics->total_available_size_ += ro_space->Available();
- heap_statistics->used_heap_size_ += ro_space->SizeOfObjects();
+ heap_statistics->used_heap_size_ += ro_space->Size();
#endif // V8_SHARED_RO_HEAP
heap_statistics->total_heap_size_executable_ =
@@ -8450,18 +8483,26 @@ bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::Heap* heap = isolate->heap();
- i::Space* space = heap->space(static_cast<int>(index));
i::AllocationSpace allocation_space = static_cast<i::AllocationSpace>(index);
- space_statistics->space_name_ = i::Heap::GetSpaceName(allocation_space);
-
- if (allocation_space == i::RO_SPACE && V8_SHARED_RO_HEAP_BOOL) {
- // RO_SPACE memory is accounted for elsewhere when ReadOnlyHeap is shared.
- space_statistics->space_size_ = 0;
- space_statistics->space_used_size_ = 0;
- space_statistics->space_available_size_ = 0;
- space_statistics->physical_space_size_ = 0;
+ space_statistics->space_name_ = i::BaseSpace::GetSpaceName(allocation_space);
+
+ if (allocation_space == i::RO_SPACE) {
+ if (V8_SHARED_RO_HEAP_BOOL) {
+ // RO_SPACE memory is accounted for elsewhere when ReadOnlyHeap is shared.
+ space_statistics->space_size_ = 0;
+ space_statistics->space_used_size_ = 0;
+ space_statistics->space_available_size_ = 0;
+ space_statistics->physical_space_size_ = 0;
+ } else {
+ i::ReadOnlySpace* space = heap->read_only_space();
+ space_statistics->space_size_ = space->CommittedMemory();
+ space_statistics->space_used_size_ = space->Size();
+ space_statistics->space_available_size_ = 0;
+ space_statistics->physical_space_size_ = space->CommittedPhysicalMemory();
+ }
} else {
+ i::Space* space = heap->space(static_cast<int>(index));
space_statistics->space_size_ = space->CommittedMemory();
space_statistics->space_used_size_ = space->SizeOfObjects();
space_statistics->space_available_size_ = space->Available();
@@ -10305,9 +10346,11 @@ int debug::WasmValue::value_type() {
v8::Local<v8::Array> debug::WasmValue::bytes() {
i::Handle<i::WasmValue> obj = Utils::OpenHandle(this);
- // Should only be called on i32, i64, f32, f64, s128.
- DCHECK_GE(1, obj->value_type());
- DCHECK_LE(5, obj->value_type());
+ DCHECK(i::wasm::ValueType::Kind::kI32 == obj->value_type() ||
+ i::wasm::ValueType::Kind::kI64 == obj->value_type() ||
+ i::wasm::ValueType::Kind::kF32 == obj->value_type() ||
+ i::wasm::ValueType::Kind::kF64 == obj->value_type() ||
+ i::wasm::ValueType::Kind::kS128 == obj->value_type());
i::Isolate* isolate = obj->GetIsolate();
i::Handle<i::Object> bytes_or_ref(obj->bytes_or_ref(), isolate);
@@ -10329,8 +10372,7 @@ v8::Local<v8::Array> debug::WasmValue::bytes() {
v8::Local<v8::Value> debug::WasmValue::ref() {
i::Handle<i::WasmValue> obj = Utils::OpenHandle(this);
- // Should only be called on anyref.
- DCHECK_EQ(6, obj->value_type());
+ DCHECK_EQ(i::wasm::kHeapExtern, obj->value_type());
i::Isolate* isolate = obj->GetIsolate();
i::Handle<i::Object> bytes_or_ref(obj->bytes_or_ref(), isolate);
diff --git a/chromium/v8/src/asmjs/asm-scanner.cc b/chromium/v8/src/asmjs/asm-scanner.cc
index 73140867084..3ac9ef2d6fe 100644
--- a/chromium/v8/src/asmjs/asm-scanner.cc
+++ b/chromium/v8/src/asmjs/asm-scanner.cc
@@ -99,7 +99,7 @@ void AsmJsScanner::Next() {
preceded_by_newline_ = true;
break;
- case kEndOfInput:
+ case kEndOfInputU:
token_ = kEndOfInput;
return;
@@ -354,7 +354,7 @@ bool AsmJsScanner::ConsumeCComment() {
if (ch == '\n') {
preceded_by_newline_ = true;
}
- if (ch == kEndOfInput) {
+ if (ch == kEndOfInputU) {
return false;
}
}
@@ -367,7 +367,7 @@ void AsmJsScanner::ConsumeCPPComment() {
preceded_by_newline_ = true;
return;
}
- if (ch == kEndOfInput) {
+ if (ch == kEndOfInputU) {
return;
}
}
@@ -377,7 +377,7 @@ void AsmJsScanner::ConsumeString(uc32 quote) {
// Only string allowed is 'use asm' / "use asm".
const char* expected = "use asm";
for (; *expected != '\0'; ++expected) {
- if (stream_->Advance() != *expected) {
+ if (stream_->Advance() != static_cast<uc32>(*expected)) {
token_ = kParseError;
return;
}
diff --git a/chromium/v8/src/asmjs/asm-scanner.h b/chromium/v8/src/asmjs/asm-scanner.h
index 076a7607e38..9e7250ff2cd 100644
--- a/chromium/v8/src/asmjs/asm-scanner.h
+++ b/chromium/v8/src/asmjs/asm-scanner.h
@@ -135,6 +135,8 @@ class V8_EXPORT_PRIVATE AsmJsScanner {
};
// clang-format on
+ static constexpr uc32 kEndOfInputU = static_cast<uc32>(kEndOfInput);
+
private:
Utf16CharacterStream* stream_;
token_t token_;
diff --git a/chromium/v8/src/ast/ast.cc b/chromium/v8/src/ast/ast.cc
index 651508b677f..8cd1140154a 100644
--- a/chromium/v8/src/ast/ast.cc
+++ b/chromium/v8/src/ast/ast.cc
@@ -927,6 +927,7 @@ Call::CallType Call::GetCallType() const {
}
if (property != nullptr) {
if (property->IsPrivateReference()) {
+ if (is_optional_chain) return PRIVATE_OPTIONAL_CHAIN_CALL;
return PRIVATE_CALL;
}
bool is_super = property->IsSuperAccess();
diff --git a/chromium/v8/src/ast/ast.h b/chromium/v8/src/ast/ast.h
index 6fcf30499a5..dab3981c638 100644
--- a/chromium/v8/src/ast/ast.h
+++ b/chromium/v8/src/ast/ast.h
@@ -1630,6 +1630,7 @@ class Call final : public Expression {
NAMED_SUPER_PROPERTY_CALL,
KEYED_SUPER_PROPERTY_CALL,
PRIVATE_CALL,
+ PRIVATE_OPTIONAL_CHAIN_CALL,
SUPER_CALL,
OTHER_CALL,
};
diff --git a/chromium/v8/src/ast/scopes.h b/chromium/v8/src/ast/scopes.h
index 11f44bb4984..babb90bdd01 100644
--- a/chromium/v8/src/ast/scopes.h
+++ b/chromium/v8/src/ast/scopes.h
@@ -102,6 +102,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
}
inline explicit Snapshot(Scope* scope);
+ // Disallow copy and move.
+ Snapshot(const Snapshot&) = delete;
+ Snapshot(Snapshot&&) = delete;
+
~Snapshot() {
// If we're still active, there was no arrow function. In that case outer
// calls eval if it already called eval before this snapshot started, or
@@ -142,10 +146,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Scope* top_inner_scope_;
UnresolvedList::Iterator top_unresolved_;
base::ThreadedList<Variable>::Iterator top_local_;
-
- // Disallow copy and move.
- Snapshot(const Snapshot&) = delete;
- Snapshot(Snapshot&&) = delete;
};
enum class DeserializationMode { kIncludingVariables, kScopesOnly };
diff --git a/chromium/v8/src/base/bit-field.h b/chromium/v8/src/base/bit-field.h
index 9cebac32de4..ca5fb459210 100644
--- a/chromium/v8/src/base/bit-field.h
+++ b/chromium/v8/src/base/bit-field.h
@@ -52,9 +52,7 @@ class BitField final {
// Returns a type U with the bit field value encoded.
static constexpr U encode(T value) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(is_valid(value));
-#endif
+ CONSTEXPR_DCHECK(is_valid(value));
return static_cast<U>(value) << kShift;
}
diff --git a/chromium/v8/src/base/bits.h b/chromium/v8/src/base/bits.h
index b74b98e1069..cf4b77fa186 100644
--- a/chromium/v8/src/base/bits.h
+++ b/chromium/v8/src/base/bits.h
@@ -32,22 +32,27 @@ constexpr inline
return sizeof(T) == 8 ? __builtin_popcountll(static_cast<uint64_t>(value))
: __builtin_popcount(static_cast<uint32_t>(value));
#else
+ // Fall back to divide-and-conquer popcount (see "Hacker's Delight" by Henry
+ // S. Warren, Jr.), chapter 5-1.
constexpr uint64_t mask[] = {0x5555555555555555, 0x3333333333333333,
0x0f0f0f0f0f0f0f0f};
- // Start with 1 bit wide buckets of [0,1].
+ // Start with 64 buckets of 1 bits, holding values from [0,1].
value = ((value >> 1) & mask[0]) + (value & mask[0]);
- // Having 2 bit wide buckets of [0,2] now.
+ // Having 32 buckets of 2 bits, holding values from [0,2] now.
value = ((value >> 2) & mask[1]) + (value & mask[1]);
- // Having 4 bit wide buckets of [0,4] now.
- value = (value >> 4) + value;
- // Having 4 bit wide buckets of [0,8] now.
- if (sizeof(T) > 1)
- value = ((value >> (sizeof(T) > 1 ? 8 : 0)) & mask[2]) + (value & mask[2]);
- // Having 8 bit wide buckets of [0,16] now.
+ // Having 16 buckets of 4 bits, holding values from [0,4] now.
+ value = ((value >> 4) & mask[2]) + (value & mask[2]);
+ // Having 8 buckets of 8 bits, holding values from [0,8] now.
+ // From this point on, the buckets are bigger than the number of bits
+ // required to hold the values, and the buckets are bigger the maximum
+ // result, so there's no need to mask value anymore, since there's no
+ // more risk of overflow between buckets.
+ if (sizeof(T) > 1) value = (value >> (sizeof(T) > 1 ? 8 : 0)) + value;
+ // Having 4 buckets of 16 bits, holding values from [0,16] now.
if (sizeof(T) > 2) value = (value >> (sizeof(T) > 2 ? 16 : 0)) + value;
- // Having 8 bit wide buckets of [0,32] now.
+ // Having 2 buckets of 32 bits, holding values from [0,32] now.
if (sizeof(T) > 4) value = (value >> (sizeof(T) > 4 ? 32 : 0)) + value;
- // Having 8 bit wide buckets of [0,64] now.
+ // Having 1 buckets of 64 bits, holding values from [0,64] now.
return static_cast<unsigned>(value & 0xff);
#endif
}
@@ -140,9 +145,7 @@ constexpr inline bool IsPowerOfTwo(T value) {
template <typename T,
typename = typename std::enable_if<std::is_integral<T>::value>::type>
inline constexpr int WhichPowerOfTwo(T value) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(IsPowerOfTwo(value));
-#endif
+ CONSTEXPR_DCHECK(IsPowerOfTwo(value));
#if V8_HAS_BUILTIN_CTZ
STATIC_ASSERT(sizeof(T) <= 8);
return sizeof(T) == 8 ? __builtin_ctzll(static_cast<uint64_t>(value))
diff --git a/chromium/v8/src/base/bounds.h b/chromium/v8/src/base/bounds.h
index 236e29b7ccd..fb8c968d660 100644
--- a/chromium/v8/src/base/bounds.h
+++ b/chromium/v8/src/base/bounds.h
@@ -15,9 +15,7 @@ namespace base {
// branch.
template <typename T, typename U>
inline constexpr bool IsInRange(T value, U lower_limit, U higher_limit) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_LE(lower_limit, higher_limit);
-#endif
+ CONSTEXPR_DCHECK(lower_limit <= higher_limit);
STATIC_ASSERT(sizeof(U) <= sizeof(T));
using unsigned_T = typename std::make_unsigned<T>::type;
// Use static_cast to support enum classes.
@@ -29,7 +27,9 @@ inline constexpr bool IsInRange(T value, U lower_limit, U higher_limit) {
// Checks if [index, index+length) is in range [0, max). Note that this check
// works even if {index+length} would wrap around.
-inline constexpr bool IsInBounds(size_t index, size_t length, size_t max) {
+template <typename T,
+ typename = typename std::enable_if<std::is_unsigned<T>::value>::type>
+inline constexpr bool IsInBounds(T index, T length, T max) {
return length <= max && index <= (max - length);
}
diff --git a/chromium/v8/src/base/cpu.cc b/chromium/v8/src/base/cpu.cc
index bbdae525e30..bae1afe7d1d 100644
--- a/chromium/v8/src/base/cpu.cc
+++ b/chromium/v8/src/base/cpu.cc
@@ -75,7 +75,8 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#endif // !V8_LIBC_MSVCRT
-#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS || \
+ V8_HOST_ARCH_MIPS64
#if V8_OS_LINUX
@@ -108,6 +109,51 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
#define HWCAP_LPAE (1 << 20)
+#endif // V8_HOST_ARCH_ARM
+
+#if V8_HOST_ARCH_ARM64
+
+// See <uapi/asm/hwcap.h> kernel header.
+/*
+ * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
+ */
+#define HWCAP_FP (1 << 0)
+#define HWCAP_ASIMD (1 << 1)
+#define HWCAP_EVTSTRM (1 << 2)
+#define HWCAP_AES (1 << 3)
+#define HWCAP_PMULL (1 << 4)
+#define HWCAP_SHA1 (1 << 5)
+#define HWCAP_SHA2 (1 << 6)
+#define HWCAP_CRC32 (1 << 7)
+#define HWCAP_ATOMICS (1 << 8)
+#define HWCAP_FPHP (1 << 9)
+#define HWCAP_ASIMDHP (1 << 10)
+#define HWCAP_CPUID (1 << 11)
+#define HWCAP_ASIMDRDM (1 << 12)
+#define HWCAP_JSCVT (1 << 13)
+#define HWCAP_FCMA (1 << 14)
+#define HWCAP_LRCPC (1 << 15)
+#define HWCAP_DCPOP (1 << 16)
+#define HWCAP_SHA3 (1 << 17)
+#define HWCAP_SM3 (1 << 18)
+#define HWCAP_SM4 (1 << 19)
+#define HWCAP_ASIMDDP (1 << 20)
+#define HWCAP_SHA512 (1 << 21)
+#define HWCAP_SVE (1 << 22)
+#define HWCAP_ASIMDFHM (1 << 23)
+#define HWCAP_DIT (1 << 24)
+#define HWCAP_USCAT (1 << 25)
+#define HWCAP_ILRCPC (1 << 26)
+#define HWCAP_FLAGM (1 << 27)
+#define HWCAP_SSBS (1 << 28)
+#define HWCAP_SB (1 << 29)
+#define HWCAP_PACA (1 << 30)
+#define HWCAP_PACG (1UL << 31)
+
+#endif // V8_HOST_ARCH_ARM64
+
+#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64
+
static uint32_t ReadELFHWCaps() {
uint32_t result = 0;
#if V8_GLIBC_PREREQ(2, 16)
@@ -136,7 +182,7 @@ static uint32_t ReadELFHWCaps() {
return result;
}
-#endif // V8_HOST_ARCH_ARM
+#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64
#if V8_HOST_ARCH_MIPS
int __detect_fp64_mode(void) {
@@ -298,7 +344,8 @@ static bool HasListItem(const char* list, const char* item) {
#endif // V8_OS_LINUX
-#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 ||
+ // V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
CPU::CPU()
: stepping_(0),
@@ -337,6 +384,7 @@ CPU::CPU()
has_vfp_(false),
has_vfp3_(false),
has_vfp3_d32_(false),
+ has_jscvt_(false),
is_fp64_mode_(false),
has_non_stop_time_stamp_counter_(false),
has_msa_(false) {
@@ -609,6 +657,19 @@ CPU::CPU()
// Windows makes high-resolution thread timing information available in
// user-space.
has_non_stop_time_stamp_counter_ = true;
+
+#elif V8_OS_LINUX
+ // Try to extract the list of CPU features from ELF hwcaps.
+ uint32_t hwcaps = ReadELFHWCaps();
+ if (hwcaps != 0) {
+ has_jscvt_ = (hwcaps & HWCAP_JSCVT) != 0;
+ } else {
+ // Try to fallback to "Features" CPUInfo field
+ CPUInfo cpu_info;
+ char* features = cpu_info.ExtractField("Features");
+ has_jscvt_ = HasListItem(features, "jscvt");
+ delete[] features;
+ }
#endif // V8_OS_WIN
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
diff --git a/chromium/v8/src/base/cpu.h b/chromium/v8/src/base/cpu.h
index 4b4becfa204..8cec23c8e8e 100644
--- a/chromium/v8/src/base/cpu.h
+++ b/chromium/v8/src/base/cpu.h
@@ -110,6 +110,7 @@ class V8_BASE_EXPORT CPU final {
bool has_vfp() const { return has_vfp_; }
bool has_vfp3() const { return has_vfp3_; }
bool has_vfp3_d32() const { return has_vfp3_d32_; }
+ bool has_jscvt() const { return has_jscvt_; }
// mips features
bool is_fp64_mode() const { return is_fp64_mode_; }
@@ -153,6 +154,7 @@ class V8_BASE_EXPORT CPU final {
bool has_vfp_;
bool has_vfp3_;
bool has_vfp3_d32_;
+ bool has_jscvt_;
bool is_fp64_mode_;
bool has_non_stop_time_stamp_counter_;
bool has_msa_;
diff --git a/chromium/v8/src/base/enum-set.h b/chromium/v8/src/base/enum-set.h
index 927a8f87fe0..2415f1c500b 100644
--- a/chromium/v8/src/base/enum-set.h
+++ b/chromium/v8/src/base/enum-set.h
@@ -63,9 +63,7 @@ class EnumSet {
explicit constexpr EnumSet(T bits) : bits_(bits) {}
static constexpr T Mask(E element) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_GT(sizeof(T) * 8, static_cast<int>(element));
-#endif
+ CONSTEXPR_DCHECK(sizeof(T) * 8 > static_cast<size_t>(element));
return T{1} << static_cast<typename std::underlying_type<E>::type>(element);
}
diff --git a/chromium/v8/src/base/iterator.h b/chromium/v8/src/base/iterator.h
index 86d4b068d33..0bec8725227 100644
--- a/chromium/v8/src/base/iterator.h
+++ b/chromium/v8/src/base/iterator.h
@@ -36,8 +36,7 @@ class iterator_range {
typename std::iterator_traits<iterator>::difference_type;
iterator_range() : begin_(), end_() {}
- template <typename ForwardIterator1, typename ForwardIterator2>
- iterator_range(ForwardIterator1 begin, ForwardIterator2 end)
+ iterator_range(ForwardIterator begin, ForwardIterator end)
: begin_(begin), end_(end) {}
iterator begin() { return begin_; }
diff --git a/chromium/v8/src/base/logging.h b/chromium/v8/src/base/logging.h
index 790018c98e9..fe39f988225 100644
--- a/chromium/v8/src/base/logging.h
+++ b/chromium/v8/src/base/logging.h
@@ -134,6 +134,12 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
#endif
+#if V8_HAS_CXX14_CONSTEXPR
+#define CONSTEXPR_DCHECK(cond) DCHECK(cond)
+#else
+#define CONSTEXPR_DCHECK(cond)
+#endif
+
// Define PrintCheckOperand<T> for each T which defines operator<< for ostream.
template <typename T>
typename std::enable_if<
diff --git a/chromium/v8/src/base/macros.h b/chromium/v8/src/base/macros.h
index e22dd00895a..4eb652cae55 100644
--- a/chromium/v8/src/base/macros.h
+++ b/chromium/v8/src/base/macros.h
@@ -171,22 +171,12 @@ V8_INLINE Dest bit_cast(Source const& source) {
#endif
#endif
-// Helper macro to define no_sanitize attributes only with clang.
-#if defined(__clang__) && defined(__has_attribute)
-#if __has_attribute(no_sanitize)
-#define CLANG_NO_SANITIZE(what) __attribute__((no_sanitize(what)))
-#endif
-#endif
-#if !defined(CLANG_NO_SANITIZE)
-#define CLANG_NO_SANITIZE(what)
-#endif
-
// DISABLE_CFI_PERF -- Disable Control Flow Integrity checks for Perf reasons.
-#define DISABLE_CFI_PERF CLANG_NO_SANITIZE("cfi")
+#define DISABLE_CFI_PERF V8_CLANG_NO_SANITIZE("cfi")
// DISABLE_CFI_ICALL -- Disable Control Flow Integrity indirect call checks,
// useful because calls into JITed code can not be CFI verified.
-#define DISABLE_CFI_ICALL CLANG_NO_SANITIZE("cfi-icall")
+#define DISABLE_CFI_ICALL V8_CLANG_NO_SANITIZE("cfi-icall")
#if V8_CC_GNU
#define V8_IMMEDIATE_CRASH() __builtin_trap()
diff --git a/chromium/v8/src/base/optional.h b/chromium/v8/src/base/optional.h
index 6610c7ffc33..3c13e654c80 100644
--- a/chromium/v8/src/base/optional.h
+++ b/chromium/v8/src/base/optional.h
@@ -557,33 +557,33 @@ class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
return *this;
}
- const T* operator->() const {
- DCHECK(storage_.is_populated_);
+ constexpr const T* operator->() const {
+ CONSTEXPR_DCHECK(storage_.is_populated_);
return &storage_.value_;
}
- T* operator->() {
- DCHECK(storage_.is_populated_);
+ constexpr T* operator->() {
+ CONSTEXPR_DCHECK(storage_.is_populated_);
return &storage_.value_;
}
- const T& operator*() const & {
- DCHECK(storage_.is_populated_);
+ constexpr const T& operator*() const& {
+ CONSTEXPR_DCHECK(storage_.is_populated_);
return storage_.value_;
}
- T& operator*() & {
- DCHECK(storage_.is_populated_);
+ constexpr T& operator*() & {
+ CONSTEXPR_DCHECK(storage_.is_populated_);
return storage_.value_;
}
- const T&& operator*() const && {
- DCHECK(storage_.is_populated_);
+ constexpr const T&& operator*() const&& {
+ CONSTEXPR_DCHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
- T&& operator*() && {
- DCHECK(storage_.is_populated_);
+ constexpr T&& operator*() && {
+ CONSTEXPR_DCHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
diff --git a/chromium/v8/src/base/platform/platform-posix.cc b/chromium/v8/src/base/platform/platform-posix.cc
index c3f0b08ddde..238750bab16 100644
--- a/chromium/v8/src/base/platform/platform-posix.cc
+++ b/chromium/v8/src/base/platform/platform-posix.cc
@@ -531,7 +531,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name,
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
size_t size, void* initial) {
if (FILE* file = fopen(name, "w+")) {
- if (size == 0) return new PosixMemoryMappedFile(file, 0, 0);
+ if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
size_t result = fwrite(initial, 1, size, file);
if (result == size && !ferror(file)) {
void* memory = mmap(OS::GetRandomMmapAddr(), result,
@@ -970,7 +970,8 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
// pthread_getattr_np used below is non portable (hence the _np suffix). We
// keep this version in POSIX as most Linux-compatible derivatives will
// support it. MacOS and FreeBSD are different here.
-#if !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && !defined(_AIX)
+#if !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && !defined(_AIX) && \
+ !defined(V8_OS_SOLARIS)
// static
void* Stack::GetStackStart() {
@@ -996,7 +997,8 @@ void* Stack::GetStackStart() {
return nullptr;
}
-#endif // !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && !defined(_AIX)
+#endif // !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) &&
+ // !defined(_AIX) && !defined(V8_OS_SOLARIS)
// static
void* Stack::GetCurrentStackPosition() { return __builtin_frame_address(0); }
diff --git a/chromium/v8/src/base/platform/platform-solaris.cc b/chromium/v8/src/base/platform/platform-solaris.cc
index b5b16dac568..b4ac98ce73b 100644
--- a/chromium/v8/src/base/platform/platform-solaris.cc
+++ b/chromium/v8/src/base/platform/platform-solaris.cc
@@ -65,5 +65,23 @@ void OS::SignalCodeMovingGC() {}
void OS::AdjustSchedulingParams() {}
+// static
+void* Stack::GetStackStart() {
+ pthread_attr_t attr;
+ int error;
+ pthread_attr_init(&attr);
+ error = pthread_attr_get_np(pthread_self(), &attr);
+ if (!error) {
+ void* base;
+ size_t size;
+ error = pthread_attr_getstack(&attr, &base, &size);
+ CHECK(!error);
+ pthread_attr_destroy(&attr);
+ return reinterpret_cast<uint8_t*>(base) + size;
+ }
+ pthread_attr_destroy(&attr);
+ return nullptr;
+}
+
} // namespace base
} // namespace v8
diff --git a/chromium/v8/src/base/platform/time.h b/chromium/v8/src/base/platform/time.h
index 5f69129ecbc..63a5e8a0591 100644
--- a/chromium/v8/src/base/platform/time.h
+++ b/chromium/v8/src/base/platform/time.h
@@ -14,6 +14,7 @@
#include "src/base/base-export.h"
#include "src/base/bits.h"
#include "src/base/macros.h"
+#include "src/base/safe_conversions.h"
#if V8_OS_WIN
#include "src/base/win32-headers.h"
#endif
@@ -90,6 +91,11 @@ class V8_BASE_EXPORT TimeDelta final {
return TimeDelta(nanoseconds / TimeConstants::kNanosecondsPerMicrosecond);
}
+ static TimeDelta FromMillisecondsD(double milliseconds) {
+ return FromDouble(milliseconds *
+ TimeConstants::kMicrosecondsPerMillisecond);
+ }
+
// Returns the maximum time delta, which should be greater than any reasonable
// time delta we might compare it to. Adding or subtracting the maximum time
// delta to a time or another time delta has an undefined result.
@@ -201,6 +207,9 @@ class V8_BASE_EXPORT TimeDelta final {
}
private:
+ // TODO(v8:10620): constexpr requires constexpr saturated_cast.
+ static inline TimeDelta FromDouble(double value);
+
template<class TimeClass> friend class time_internal::TimeBase;
// Constructs a delta given the duration in microseconds. This is private
// to avoid confusion by callers with an integer constructor. Use
@@ -212,6 +221,11 @@ class V8_BASE_EXPORT TimeDelta final {
};
// static
+TimeDelta TimeDelta::FromDouble(double value) {
+ return TimeDelta(saturated_cast<int64_t>(value));
+}
+
+// static
constexpr TimeDelta TimeDelta::Max() {
return TimeDelta(std::numeric_limits<int64_t>::max());
}
diff --git a/chromium/v8/src/base/template-utils.h b/chromium/v8/src/base/template-utils.h
index 8f89672e1af..617ef6ce34b 100644
--- a/chromium/v8/src/base/template-utils.h
+++ b/chromium/v8/src/base/template-utils.h
@@ -7,6 +7,7 @@
#include <array>
#include <functional>
+#include <iosfwd>
#include <type_traits>
#include <utility>
@@ -22,6 +23,12 @@ constexpr inline auto make_array_helper(Function f,
return {{f(Indexes)...}};
}
+template <template <size_t> class Value, std::size_t... Indexes>
+constexpr inline auto make_array_helper(std::index_sequence<Indexes...>)
+ -> std::array<typename Value<0>::value_type, sizeof...(Indexes)> {
+ return {{Value<Indexes>()...}};
+}
+
} // namespace detail
// base::make_array: Create an array of fixed length, initialized by a function.
@@ -35,6 +42,13 @@ constexpr auto make_array(Function f) {
return detail::make_array_helper(f, std::make_index_sequence<Size>{});
}
+// The same as above, but taking a template instead of a function to generate
+// the values for the array.
+template <std::size_t Size, template <size_t> class Value>
+constexpr auto make_array() {
+ return detail::make_array_helper<Value>(std::make_index_sequence<Size>{});
+}
+
// Helper to determine how to pass values: Pass scalars and arrays by value,
// others by const reference (even if it was a non-const ref before; this is
// disallowed by the style guide anyway).
diff --git a/chromium/v8/src/builtins/aggregate-error.tq b/chromium/v8/src/builtins/aggregate-error.tq
new file mode 100644
index 00000000000..0f4a47b3e73
--- /dev/null
+++ b/chromium/v8/src/builtins/aggregate-error.tq
@@ -0,0 +1,49 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-objects.h'
+
+namespace error {
+
+transitioning javascript builtin AggregateErrorConstructor(
+ js-implicit context: NativeContext, target: JSFunction,
+ newTarget: JSAny)(...arguments): JSAny {
+ // This function is implementing the spec as suggested by
+ // https://github.com/tc39/proposal-promise-any/pull/59 . FIXME(marja):
+ // change this if the PR is declined, otherwise remove the comment.
+
+ // 1. If NewTarget is undefined, let newTarget be the active function
+ // object, else let newTarget be NewTarget.
+ // 2. Let O be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%AggregateError.prototype%", « [[ErrorData]], [[AggregateErrors]] »).
+ // 3. If _message_ is not _undefined_, then
+ // a. Let msg be ? ToString(_message_).
+ // b. Let msgDesc be the PropertyDescriptor { [[Value]]: _msg_,
+ // [[Writable]]: *true*, [[Enumerable]]: *false*, [[Configurable]]: *true*
+ // c. Perform ! DefinePropertyOrThrow(_O_, *"message"*, _msgDesc_).
+ const message: JSAny = arguments[1];
+ const obj: JSObject =
+ ConstructAggregateErrorHelper(context, target, newTarget, message);
+
+ // 4. Let errorsList be ? IterableToList(errors).
+ const errors: JSAny = arguments[0];
+ const errorsList = iterator::IterableToListWithSymbolLookup(errors);
+
+ // 5. Perform ! DefinePropertyOrThrow(_O_, `"errors"`, Property Descriptor {
+ // [[Configurable]]: *true*, [[Enumerable]]: *false*, [[Writable]]: *true*,
+ // [[Value]]: ! CreateArrayFromList(_errorsList_) }).
+ SetOwnPropertyIgnoreAttributes(
+ obj, ErrorsStringConstant(), errorsList,
+ SmiConstant(PropertyAttributes::DONT_ENUM));
+
+ // 6. Return O.
+ return obj;
+}
+
+extern transitioning runtime ConstructAggregateErrorHelper(
+ Context, JSFunction, JSAny, Object): JSObject;
+
+extern transitioning runtime ConstructInternalAggregateErrorHelper(
+ Context, Object): JSObject;
+}
diff --git a/chromium/v8/src/builtins/array-join.tq b/chromium/v8/src/builtins/array-join.tq
index 08d0cbf894b..7bf74e4e317 100644
--- a/chromium/v8/src/builtins/array-join.tq
+++ b/chromium/v8/src/builtins/array-join.tq
@@ -296,7 +296,7 @@ transitioning macro ArrayJoinImpl<T: type>(implicit context: Context)(
}
case (obj: JSAny): {
if (IsNullOrUndefined(obj)) continue;
- next = ToString(context, obj);
+ next = string::ToString(context, obj);
}
}
}
@@ -418,7 +418,7 @@ macro LoadJoinStack(implicit context: Context)(): FixedArray
const stack: HeapObject = UnsafeCast<HeapObject>(
nativeContext[NativeContextSlot::ARRAY_JOIN_STACK_INDEX]);
if (stack == Undefined) goto IfUninitialized;
- assert(IsFixedArray(stack));
+ assert(Is<FixedArray>(stack));
return UnsafeCast<FixedArray>(stack);
}
diff --git a/chromium/v8/src/builtins/array-slice.tq b/chromium/v8/src/builtins/array-slice.tq
index 147dae6f72f..97026586adf 100644
--- a/chromium/v8/src/builtins/array-slice.tq
+++ b/chromium/v8/src/builtins/array-slice.tq
@@ -36,21 +36,18 @@ macro HandleFastAliasedSloppyArgumentsSlice(
const sloppyElements: SloppyArgumentsElements =
Cast<SloppyArgumentsElements>(args.elements) otherwise Bailout;
- const sloppyElementsLength: Smi = sloppyElements.length;
- const parameterMapLength: Smi =
- sloppyElementsLength - kSloppyArgumentsParameterMapStart;
+ const parameterMapLength: Smi = sloppyElements.length;
// Check to make sure that the extraction will not access outside the
// defined arguments
const end: Smi = start + count;
const unmappedElements: FixedArray =
- Cast<FixedArray>(sloppyElements.objects[kSloppyArgumentsArgumentsIndex])
+ Cast<FixedArray>(sloppyElements.arguments)
otherwise Bailout;
const unmappedElementsLength: Smi = unmappedElements.length;
if (SmiAbove(end, unmappedElementsLength)) goto Bailout;
- const argumentsContext: Context =
- UnsafeCast<Context>(sloppyElements.objects[kSloppyArgumentsContextIndex]);
+ const argumentsContext: Context = sloppyElements.context;
const arrayMap: Map =
LoadJSArrayElementsMap(ElementsKind::HOLEY_ELEMENTS, context);
@@ -63,8 +60,7 @@ macro HandleFastAliasedSloppyArgumentsSlice(
// Fill in the part of the result that map to context-mapped parameters.
for (let current: Smi = start; current < to; ++current) {
- const e: Object =
- sloppyElements.objects[current + kSloppyArgumentsParameterMapStart];
+ const e: Object = sloppyElements.mapped_entries[current];
const newElement = UnsafeCast<(JSAny | TheHole)>(
e != TheHole ? argumentsContext[UnsafeCast<Smi>(e)] :
unmappedElements.objects[current]);
diff --git a/chromium/v8/src/builtins/base.tq b/chromium/v8/src/builtins/base.tq
index 1d2c4546461..b42923412b1 100644
--- a/chromium/v8/src/builtins/base.tq
+++ b/chromium/v8/src/builtins/base.tq
@@ -78,7 +78,8 @@ type JSPrimitive = Numeric|String|Symbol|Boolean|Null|Undefined;
// TheHole or FixedArray.
type JSAny = JSReceiver|JSPrimitive;
-type JSAnyNotNumber = BigInt|String|Symbol|Boolean|Null|Undefined|JSReceiver;
+type JSAnyNotNumeric = String|Symbol|Boolean|Null|Undefined|JSReceiver;
+type JSAnyNotNumber = BigInt|JSAnyNotNumeric;
// This is the intersection of JSAny and HeapObject.
type JSAnyNotSmi = JSAnyNotNumber|HeapNumber;
@@ -134,6 +135,7 @@ const kDoubleHole: float64_or_hole = float64_or_hole{is_hole: true, value: 0};
// The HashTable inheritance hierarchy doesn't actually look like this in C++
// because it uses some class templates that we can't yet (and may never)
// express in Torque, but this is the expected organization of instance types.
+@doNotGenerateCast
extern class HashTable extends FixedArray generates 'TNode<FixedArray>';
extern class OrderedHashMap extends HashTable;
extern class OrderedHashSet extends HashTable;
@@ -264,6 +266,7 @@ extern enum MessageTemplate {
kNotGeneric,
kCalledNonCallable,
kCalledOnNullOrUndefined,
+ kCannotConvertToPrimitive,
kProtoObjectOrNull,
kInvalidOffset,
kInvalidTypedArrayLength,
@@ -307,6 +310,7 @@ extern enum MessageTemplate {
kProxyGetPrototypeOfNonExtensible,
kProxySetPrototypeOfNonExtensible,
kProxyDeletePropertyNonExtensible,
+ kUndefinedOrNullToObject,
kWeakRefsCleanupMustBeCallable,
kWasmTrapUnreachable,
kWasmTrapMemOutOfBounds,
@@ -320,14 +324,24 @@ extern enum MessageTemplate {
kWasmTrapDataSegmentDropped,
kWasmTrapElemSegmentDropped,
kWasmTrapTableOutOfBounds,
- kWasmTrapBrOnExnNullRef,
- kWasmTrapRethrowNullRef,
+ kWasmTrapBrOnExnNull,
+ kWasmTrapRethrowNull,
kWasmTrapNullDereference,
kWasmTrapIllegalCast,
kWasmTrapArrayOutOfBounds,
...
}
+extern enum PropertyAttributes extends int31 {
+ NONE,
+ READ_ONLY,
+ DONT_ENUM,
+ DONT_DELETE,
+ ALL_ATTRIBUTES_MASK,
+ FROZEN,
+ ...
+}
+
const kMaxArrayIndex:
constexpr uint32 generates 'JSArray::kMaxArrayIndex';
const kArrayBufferMaxByteLength:
@@ -364,12 +378,6 @@ const kMaxRegularHeapObjectSize: constexpr int31
const kMaxNewSpaceFixedArrayElements: constexpr int31
generates 'FixedArray::kMaxRegularLength';
-const kSloppyArgumentsArgumentsIndex: constexpr int31
- generates 'SloppyArgumentsElements::kArgumentsIndex';
-const kSloppyArgumentsContextIndex: constexpr int31
- generates 'SloppyArgumentsElements::kContextIndex';
-const kSloppyArgumentsParameterMapStart: constexpr int31
- generates 'SloppyArgumentsElements::kParameterMapStart';
extern enum PrimitiveType { kString, kBoolean, kSymbol, kNumber }
@@ -387,7 +395,9 @@ type Boolean = True|False;
type NumberOrUndefined = Number|Undefined;
+extern macro DefaultStringConstant(): String;
extern macro EmptyStringConstant(): EmptyString;
+extern macro ErrorsStringConstant(): String;
extern macro FalseConstant(): False;
extern macro Int32FalseConstant(): bool;
extern macro Int32TrueConstant(): bool;
@@ -396,11 +406,17 @@ extern macro LengthStringConstant(): String;
extern macro MatchSymbolConstant(): Symbol;
extern macro MessageStringConstant(): String;
extern macro NanConstant(): NaN;
+extern macro NameStringConstant(): String;
extern macro NullConstant(): Null;
+extern macro NumberStringConstant(): String;
extern macro ReturnStringConstant(): String;
+extern macro StringStringConstant(): String;
extern macro TheHoleConstant(): TheHole;
+extern macro ToPrimitiveSymbolConstant(): PublicSymbol;
+extern macro ToStringStringConstant(): String;
extern macro TrueConstant(): True;
extern macro UndefinedConstant(): Undefined;
+extern macro ValueOfStringConstant(): String;
const TheHole: TheHole = TheHoleConstant();
const Null: Null = NullConstant();
@@ -459,7 +475,7 @@ extern macro Print(Object);
extern macro DebugBreak();
// ES6 7.1.4 ToInteger ( argument )
-transitioning macro ToIntegerImpl(implicit context: Context)(input: Object):
+transitioning macro ToIntegerImpl(implicit context: Context)(input: JSAny):
Number {
let input = input;
@@ -478,28 +494,28 @@ transitioning macro ToIntegerImpl(implicit context: Context)(input: Object):
assert(IsNumberNormalized(result));
return result;
}
- case (ho: HeapObject): {
- input = math::NonNumberToNumber(ho);
+ case (a: JSAnyNotNumber): {
+ input = conversion::NonNumberToNumber(a);
}
}
}
unreachable;
}
-transitioning builtin ToInteger(implicit context: Context)(input: Object):
+transitioning builtin ToInteger(implicit context: Context)(input: JSAny):
Number {
return ToIntegerImpl(input);
}
@export
-transitioning macro ToInteger_Inline(implicit context: Context)(input: Object):
+transitioning macro ToInteger_Inline(implicit context: Context)(input: JSAny):
Number {
typeswitch (input) {
case (s: Smi): {
return s;
}
- case (ho: HeapObject): {
- return ToInteger(ho);
+ case (JSAny): {
+ return ToInteger(input);
}
}
}
@@ -518,6 +534,8 @@ extern transitioning macro GetProperty(implicit context: Context)(
JSAny, JSAny): JSAny;
extern transitioning builtin SetProperty(implicit context: Context)(
JSAny, JSAny, JSAny): JSAny;
+extern transitioning builtin SetPropertyIgnoreAttributes(
+ implicit context: Context)(JSObject, String, JSAny, Smi): JSAny;
extern transitioning builtin SetPropertyInLiteral(implicit context: Context)(
JSAny, JSAny, JSAny): JSAny;
extern transitioning builtin DeleteProperty(implicit context: Context)(
@@ -529,6 +547,8 @@ extern transitioning macro HasProperty_Inline(implicit context: Context)(
extern builtin LoadIC(
Context, JSAny, JSAny, TaggedIndex, FeedbackVector): JSAny;
+extern macro SetPropertyStrict(Context, Object, Object, Object): Object;
+
extern macro ThrowRangeError(implicit context: Context)(
constexpr MessageTemplate): never;
extern macro ThrowRangeError(implicit context: Context)(
@@ -581,10 +601,6 @@ extern builtin ToObject(Context, JSAny): JSReceiver;
extern macro ToObject_Inline(Context, JSAny): JSReceiver;
extern macro IsNullOrUndefined(Object): bool;
extern macro IsString(HeapObject): bool;
-transitioning builtin ToString(context: Context, o: JSAny): String {
- return ToStringImpl(context, o);
-}
-extern transitioning runtime ToStringRT(Context, JSAny): String;
extern transitioning builtin NonPrimitiveToPrimitive_String(
Context, JSAny): JSPrimitive;
extern transitioning builtin NonPrimitiveToPrimitive_Default(
@@ -616,6 +632,18 @@ extern macro StringCharCodeAt(String, uintptr): int32;
extern runtime StringCompareSequence(Context, String, String, Number): Boolean;
extern macro StringFromSingleCharCode(int32): String;
+extern macro NumberToString(Number): String;
+extern macro StringToNumber(String): Number;
+extern transitioning macro NonNumberToNumber(implicit context: Context)(
+ JSAnyNotNumber): Number;
+extern transitioning macro NonNumberToNumeric(implicit context: Context)(
+ JSAnyNotNumber): Numeric;
+
+extern macro Equal(JSAny, JSAny, Context): Boolean;
+macro Equal(implicit context: Context)(left: JSAny, right: JSAny): Boolean {
+ return Equal(left, right);
+}
+
extern macro StrictEqual(JSAny, JSAny): Boolean;
extern macro SmiLexicographicCompare(Smi, Smi): Smi;
extern runtime ReThrow(Context, JSAny): never;
@@ -778,6 +806,8 @@ extern operator '+' macro ConstexprInt31Add(
constexpr int31, constexpr int31): constexpr int31;
extern operator '*' macro ConstexprInt31Mul(
constexpr int31, constexpr int31): constexpr int31;
+extern operator '-' macro Int32Sub(int16, int16): int32;
+extern operator '-' macro Int32Sub(uint16, uint16): int32;
extern operator '-' macro Int32Sub(int32, int32): int32;
extern operator '*' macro Int32Mul(int32, int32): int32;
extern operator '/' macro Int32Div(int32, int32): int32;
@@ -814,6 +844,7 @@ extern operator '+' macro Float64Add(float64, float64): float64;
extern operator '-' macro Float64Sub(float64, float64): float64;
extern operator '*' macro Float64Mul(float64, float64): float64;
extern operator '/' macro Float64Div(float64, float64): float64;
+extern operator '%' macro Float64Mod(float64, float64): float64;
extern operator '+' macro NumberAdd(Number, Number): Number;
extern operator '-' macro NumberSub(Number, Number): Number;
@@ -850,6 +881,12 @@ extern operator '!' macro ConstexprBoolNot(constexpr bool): constexpr bool;
extern operator '!' macro Word32BinaryNot(bool): bool;
extern operator '!' macro IsFalse(Boolean): bool;
+extern operator '==' macro
+ConstexprInt31Equal(
+ constexpr InstanceType, constexpr InstanceType): constexpr bool;
+extern operator '-' macro ConstexprUint32Sub(
+ constexpr InstanceType, constexpr InstanceType): constexpr int32;
+
extern operator '.instanceType' macro LoadInstanceType(HeapObject):
InstanceType;
@@ -882,6 +919,7 @@ extern macro TaggedIsNotSmi(Object): bool;
extern macro TaggedIsPositiveSmi(Object): bool;
extern macro IsValidPositiveSmi(intptr): bool;
+extern macro IsInteger(JSAny): bool;
extern macro IsInteger(HeapNumber): bool;
extern macro AllocateHeapNumberWithValue(float64): HeapNumber;
@@ -912,6 +950,7 @@ macro SmiTag<T : type extends uint31>(value: T): SmiTagged<T> {
return %RawDownCast<SmiTagged<T>>(SmiFromUint32(value));
}
extern macro SmiToInt32(Smi): int32;
+extern macro SmiToFloat64(Smi): float64;
extern macro TaggedIndexToIntPtr(TaggedIndex): intptr;
extern macro IntPtrToTaggedIndex(intptr): TaggedIndex;
extern macro TaggedIndexToSmi(TaggedIndex): Smi;
@@ -919,6 +958,7 @@ extern macro SmiToTaggedIndex(Smi): TaggedIndex;
extern macro RoundIntPtrToFloat64(intptr): float64;
extern macro ChangeFloat32ToFloat64(float32): float64;
extern macro ChangeNumberToFloat64(Number): float64;
+extern macro ChangeNumberToUint32(Number): uint32;
extern macro ChangeTaggedNonSmiToInt32(implicit context: Context)(JSAnyNotSmi):
int32;
extern macro ChangeTaggedToFloat64(implicit context: Context)(JSAny): float64;
@@ -938,6 +978,7 @@ extern macro NumberConstant(constexpr int32): Number;
extern macro NumberConstant(constexpr uint32): Number;
extern macro IntPtrConstant(constexpr int31): intptr;
extern macro IntPtrConstant(constexpr int32): intptr;
+extern macro Uint16Constant(constexpr uint16): uint16;
extern macro Int32Constant(constexpr int31): int31;
extern macro Int32Constant(constexpr int32): int32;
extern macro Float64Constant(constexpr int31): float64;
@@ -962,22 +1003,6 @@ extern macro BitcastWordToTagged(uintptr): Object;
extern macro BitcastTaggedToWord(Tagged): intptr;
extern macro BitcastTaggedToWordForTagAndSmiBits(Tagged): intptr;
-macro Is<A : type extends Object, B : type extends Object>(
- implicit context: Context)(o: B): bool {
- Cast<A>(o) otherwise return false;
- return true;
-}
-
-macro UnsafeCast<A : type extends Object>(implicit context: Context)(o: Object):
- A {
- assert(Is<A>(o));
- return %RawDownCast<A>(o);
-}
-
-macro UnsafeConstCast<T: type>(r: const &T):&T {
- return %RawDownCast<&T>(r);
-}
-
extern macro FixedArrayMapConstant(): Map;
extern macro FixedDoubleArrayMapConstant(): Map;
extern macro FixedCOWArrayMapConstant(): Map;
@@ -986,7 +1011,6 @@ extern macro EmptyFixedArrayConstant(): EmptyFixedArray;
extern macro PromiseCapabilityMapConstant(): Map;
extern macro OneByteStringMapConstant(): Map;
extern macro StringMapConstant(): Map;
-extern macro SloppyArgumentsElementsMapConstant(): Map;
const kFixedArrayMap: Map = FixedArrayMapConstant();
const kFixedDoubleArrayMap: Map = FixedDoubleArrayMapConstant();
@@ -998,7 +1022,6 @@ const kPromiseCapabilityMap: Map = PromiseCapabilityMapConstant();
const kOneByteStringMap: Map = OneByteStringMapConstant();
// The map of a non-internalized internal SeqTwoByteString.
const kStringMap: Map = StringMapConstant();
-const kSloppyArgumentsElementsMap: Map = SloppyArgumentsElementsMapConstant();
extern macro IsPrototypeInitialArrayPrototype(implicit context: Context)(Map):
bool;
@@ -1371,7 +1394,6 @@ transitioning macro GetMethod(implicit context: Context)(
MessageTemplate::kPropertyNotFunction, value, symbol, o);
}
-extern macro NumberToString(Number): String;
extern macro IsOneByteStringInstanceType(InstanceType): bool;
// After converting an index to an integer, calculate a relative index:
@@ -1514,6 +1536,9 @@ macro IsFastJSArrayForReadWithNoCustomIteration(context: Context, o: Object):
extern transitioning runtime
CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, JSAny);
+extern transitioning runtime SetOwnPropertyIgnoreAttributes(
+ implicit context: Context)(JSObject, String, JSAny, Smi);
+
namespace runtime {
extern runtime
GetDerivedMap(Context, JSFunction, JSReceiver): Map;
@@ -1573,35 +1598,6 @@ transitioning builtin FastCreateDataProperty(implicit context: Context)(
return Undefined;
}
-@export
-transitioning macro ToStringImpl(context: Context, o: JSAny): String {
- let result: JSAny = o;
- while (true) {
- typeswitch (result) {
- case (num: Number): {
- return NumberToString(num);
- }
- case (str: String): {
- return str;
- }
- case (oddball: Oddball): {
- return oddball.to_string;
- }
- case (JSReceiver): {
- result = NonPrimitiveToPrimitive_String(context, result);
- continue;
- }
- case (Symbol): {
- ThrowTypeError(MessageTemplate::kSymbolToString);
- }
- case (JSAny): {
- return ToStringRT(context, o);
- }
- }
- }
- unreachable;
-}
-
macro VerifiedUnreachable(): never {
StaticAssert(false);
unreachable;
diff --git a/chromium/v8/src/builtins/bigint.tq b/chromium/v8/src/builtins/bigint.tq
index d52de7f84ea..409301dcc98 100644
--- a/chromium/v8/src/builtins/bigint.tq
+++ b/chromium/v8/src/builtins/bigint.tq
@@ -13,6 +13,7 @@ type BigInt extends BigIntBase;
@noVerifier
@hasSameInstanceTypeAsParent
+@doNotGenerateCast
extern class MutableBigInt extends BigIntBase generates 'TNode<BigInt>' {
}
diff --git a/chromium/v8/src/builtins/builtins-array-gen.cc b/chromium/v8/src/builtins/builtins-array-gen.cc
index 734b9b634a0..dfd52255830 100644
--- a/chromium/v8/src/builtins/builtins-array-gen.cc
+++ b/chromium/v8/src/builtins/builtins-array-gen.cc
@@ -438,7 +438,6 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
}
TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
- ParameterMode mode = OptimalParameterMode();
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
TNode<BInt> begin = SmiToBInt(CAST(Parameter(Descriptor::kBegin)));
@@ -446,7 +445,7 @@ TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) {
CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid()));
- Return(ExtractFastJSArray(context, array, begin, count, mode));
+ Return(ExtractFastJSArray(context, array, begin, count));
}
TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
@@ -477,7 +476,7 @@ TF_BUILTIN(CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) {
LoadElementsKind(array))),
Word32BinaryNot(IsNoElementsProtectorCellInvalid())));
- Return(CloneFastJSArray(context, array, {},
+ Return(CloneFastJSArray(context, array, base::nullopt,
HoleConversionMode::kConvertToUndefined));
}
@@ -1153,7 +1152,7 @@ TF_BUILTIN(ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) {
// ES #sec-array.prototype.values
TF_BUILTIN(ArrayPrototypeValues, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<NativeContext> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Return(CreateArrayIterator(context, ToObject_Inline(context, receiver),
IterationKind::kValues));
@@ -1161,7 +1160,7 @@ TF_BUILTIN(ArrayPrototypeValues, CodeStubAssembler) {
// ES #sec-array.prototype.entries
TF_BUILTIN(ArrayPrototypeEntries, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<NativeContext> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Return(CreateArrayIterator(context, ToObject_Inline(context, receiver),
IterationKind::kEntries));
@@ -1169,7 +1168,7 @@ TF_BUILTIN(ArrayPrototypeEntries, CodeStubAssembler) {
// ES #sec-array.prototype.keys
TF_BUILTIN(ArrayPrototypeKeys, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<NativeContext> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Return(CreateArrayIterator(context, ToObject_Inline(context, receiver),
IterationKind::kKeys));
@@ -1665,7 +1664,8 @@ void ArrayBuiltinsAssembler::TailCallArrayConstructorStub(
void ArrayBuiltinsAssembler::CreateArrayDispatchNoArgument(
TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
- AllocationSiteOverrideMode mode, TNode<AllocationSite> allocation_site) {
+ AllocationSiteOverrideMode mode,
+ base::Optional<TNode<AllocationSite>> allocation_site) {
if (mode == DISABLE_ALLOCATION_SITES) {
Callable callable = CodeFactory::ArrayNoArgumentConstructor(
isolate(), GetInitialFastElementsKind(), mode);
@@ -1674,7 +1674,8 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchNoArgument(
argc);
} else {
DCHECK_EQ(mode, DONT_OVERRIDE);
- TNode<Int32T> elements_kind = LoadElementsKind(allocation_site);
+ DCHECK(allocation_site);
+ TNode<Int32T> elements_kind = LoadElementsKind(*allocation_site);
// TODO(ishell): Compute the builtin index dynamically instead of
// iterating over all expected elements kinds.
@@ -1688,7 +1689,7 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchNoArgument(
Callable callable =
CodeFactory::ArrayNoArgumentConstructor(isolate(), kind, mode);
- TailCallArrayConstructorStub(callable, context, target, allocation_site,
+ TailCallArrayConstructorStub(callable, context, target, *allocation_site,
argc);
BIND(&next);
@@ -1701,7 +1702,8 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchNoArgument(
void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument(
TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
- AllocationSiteOverrideMode mode, TNode<AllocationSite> allocation_site) {
+ AllocationSiteOverrideMode mode,
+ base::Optional<TNode<AllocationSite>> allocation_site) {
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
@@ -1712,7 +1714,8 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument(
argc);
} else {
DCHECK_EQ(mode, DONT_OVERRIDE);
- TNode<Smi> transition_info = LoadTransitionInfo(allocation_site);
+ DCHECK(allocation_site);
+ TNode<Smi> transition_info = LoadTransitionInfo(*allocation_site);
// Least significant bit in fast array elements kind means holeyness.
STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
@@ -1735,7 +1738,7 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument(
// Make elements kind holey and update elements kind in the type info.
var_elements_kind = Word32Or(var_elements_kind.value(), Int32Constant(1));
StoreObjectFieldNoWriteBarrier(
- allocation_site, AllocationSite::kTransitionInfoOrBoilerplateOffset,
+ *allocation_site, AllocationSite::kTransitionInfoOrBoilerplateOffset,
SmiOr(transition_info, SmiConstant(fast_elements_kind_holey_mask)));
Goto(&normal_sequence);
}
@@ -1756,7 +1759,7 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument(
Callable callable =
CodeFactory::ArraySingleArgumentConstructor(isolate(), kind, mode);
- TailCallArrayConstructorStub(callable, context, target, allocation_site,
+ TailCallArrayConstructorStub(callable, context, target, *allocation_site,
argc);
BIND(&next);
@@ -1769,7 +1772,8 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument(
void ArrayBuiltinsAssembler::GenerateDispatchToArrayStub(
TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
- AllocationSiteOverrideMode mode, TNode<AllocationSite> allocation_site) {
+ AllocationSiteOverrideMode mode,
+ base::Optional<TNode<AllocationSite>> allocation_site) {
Label check_one_case(this), fallthrough(this);
GotoIfNot(Word32Equal(argc, Int32Constant(0)), &check_one_case);
CreateArrayDispatchNoArgument(context, target, argc, mode, allocation_site);
@@ -1862,8 +1866,9 @@ void ArrayBuiltinsAssembler::GenerateConstructor(
{
TNode<JSArray> array = AllocateJSArray(
elements_kind, array_map, array_size_smi, array_size_smi,
- mode == DONT_TRACK_ALLOCATION_SITE ? TNode<AllocationSite>()
- : CAST(allocation_site));
+ mode == DONT_TRACK_ALLOCATION_SITE
+ ? base::Optional<TNode<AllocationSite>>(base::nullopt)
+ : CAST(allocation_site));
Return(array);
}
}
@@ -1882,9 +1887,10 @@ void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor(
Parameter(Descriptor::kFunction), JSFunction::kContextOffset));
bool track_allocation_site =
AllocationSite::ShouldTrack(kind) && mode != DISABLE_ALLOCATION_SITES;
- TNode<AllocationSite> allocation_site =
- track_allocation_site ? CAST(Parameter(Descriptor::kAllocationSite))
- : TNode<AllocationSite>();
+ base::Optional<TNode<AllocationSite>> allocation_site =
+ track_allocation_site
+ ? CAST(Parameter(Descriptor::kAllocationSite))
+ : base::Optional<TNode<AllocationSite>>(base::nullopt);
TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
TNode<JSArray> array = AllocateJSArray(
kind, array_map, IntPtrConstant(JSArray::kPreallocatedArrayElements),
diff --git a/chromium/v8/src/builtins/builtins-array-gen.h b/chromium/v8/src/builtins/builtins-array-gen.h
index 088af90665d..96833d9dea2 100644
--- a/chromium/v8/src/builtins/builtins-array-gen.h
+++ b/chromium/v8/src/builtins/builtins-array-gen.h
@@ -72,20 +72,20 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<JSFunction> target, TNode<HeapObject> allocation_site_or_undefined,
TNode<Int32T> argc);
- void GenerateDispatchToArrayStub(TNode<Context> context,
- TNode<JSFunction> target, TNode<Int32T> argc,
- AllocationSiteOverrideMode mode,
- TNode<AllocationSite> allocation_site = {});
+ void GenerateDispatchToArrayStub(
+ TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
+ AllocationSiteOverrideMode mode,
+ base::Optional<TNode<AllocationSite>> allocation_site = base::nullopt);
void CreateArrayDispatchNoArgument(
TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
AllocationSiteOverrideMode mode,
- TNode<AllocationSite> allocation_site = {});
+ base::Optional<TNode<AllocationSite>> allocation_site);
void CreateArrayDispatchSingleArgument(
TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
AllocationSiteOverrideMode mode,
- TNode<AllocationSite> allocation_site = {});
+ base::Optional<TNode<AllocationSite>> allocation_site);
void GenerateConstructor(TNode<Context> context,
TNode<HeapObject> array_function,
diff --git a/chromium/v8/src/builtins/builtins-async-iterator-gen.cc b/chromium/v8/src/builtins/builtins-async-iterator-gen.cc
index b138515af65..73e5605ccc4 100644
--- a/chromium/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/chromium/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -16,6 +16,10 @@ namespace internal {
namespace {
class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
public:
+ // The 'next' and 'return' take an optional value parameter, and the 'throw'
+ // method take an optional reason parameter.
+ static const int kValueOrReasonArg = 0;
+
explicit AsyncFromSyncBuiltinsAssembler(compiler::CodeAssemblerState* state)
: AsyncBuiltinsAssembler(state) {}
@@ -31,8 +35,8 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
using SyncIteratorNodeGenerator =
std::function<TNode<Object>(TNode<JSReceiver>)>;
void Generate_AsyncFromSyncIteratorMethod(
- const TNode<Context> context, const TNode<Object> iterator,
- const TNode<Object> sent_value,
+ CodeStubArguments* args, const TNode<Context> context,
+ const TNode<Object> iterator, const TNode<Object> sent_value,
const SyncIteratorNodeGenerator& get_method,
const UndefinedMethodHandler& if_method_undefined,
const char* operation_name,
@@ -40,9 +44,9 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
base::Optional<TNode<Object>> initial_exception_value = base::nullopt);
void Generate_AsyncFromSyncIteratorMethod(
- const TNode<Context> context, const TNode<Object> iterator,
- const TNode<Object> sent_value, Handle<String> name,
- const UndefinedMethodHandler& if_method_undefined,
+ CodeStubArguments* args, const TNode<Context> context,
+ const TNode<Object> iterator, const TNode<Object> sent_value,
+ Handle<String> name, const UndefinedMethodHandler& if_method_undefined,
const char* operation_name,
Label::Type reject_label_type = Label::kDeferred,
base::Optional<TNode<Object>> initial_exception_value = base::nullopt) {
@@ -50,7 +54,7 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
return GetProperty(context, sync_iterator, name);
};
return Generate_AsyncFromSyncIteratorMethod(
- context, iterator, sent_value, get_method, if_method_undefined,
+ args, context, iterator, sent_value, get_method, if_method_undefined,
operation_name, reject_label_type, initial_exception_value);
}
@@ -97,8 +101,9 @@ void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator(
}
void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
- const TNode<Context> context, const TNode<Object> iterator,
- const TNode<Object> sent_value, const SyncIteratorNodeGenerator& get_method,
+ CodeStubArguments* args, const TNode<Context> context,
+ const TNode<Object> iterator, const TNode<Object> sent_value,
+ const SyncIteratorNodeGenerator& get_method,
const UndefinedMethodHandler& if_method_undefined,
const char* operation_name, Label::Type reject_label_type,
base::Optional<TNode<Object>> initial_exception_value) {
@@ -122,22 +127,37 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
if (if_method_undefined) {
Label if_isnotundefined(this);
- GotoIfNot(IsUndefined(method), &if_isnotundefined);
+ GotoIfNot(IsNullOrUndefined(method), &if_isnotundefined);
if_method_undefined(native_context, promise, &reject_promise);
BIND(&if_isnotundefined);
}
- TNode<Object> iter_result;
+ TVARIABLE(Object, iter_result);
{
+ Label has_sent_value(this), no_sent_value(this), merge(this);
ScopedExceptionHandler handler(this, &reject_promise, &var_exception);
- iter_result = Call(context, method, sync_iterator, sent_value);
+ Branch(
+ IntPtrGreaterThan(args->GetLength(), IntPtrConstant(kValueOrReasonArg)),
+ &has_sent_value, &no_sent_value);
+ BIND(&has_sent_value);
+ {
+ iter_result = Call(context, method, sync_iterator, sent_value);
+ Goto(&merge);
+ }
+ BIND(&no_sent_value);
+ {
+ iter_result = Call(context, method, sync_iterator);
+ Goto(&merge);
+ }
+ BIND(&merge);
}
TNode<Object> value;
TNode<Oddball> done;
- std::tie(value, done) = LoadIteratorResult(
- context, native_context, iter_result, &reject_promise, &var_exception);
+ std::tie(value, done) =
+ LoadIteratorResult(context, native_context, iter_result.value(),
+ &reject_promise, &var_exception);
const TNode<JSFunction> promise_fun =
CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX));
@@ -160,15 +180,16 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
// Perform ! PerformPromiseThen(valueWrapper,
// onFulfilled, undefined, promiseCapability).
- Return(CallBuiltin(Builtins::kPerformPromiseThen, context, value_wrapper,
- on_fulfilled, UndefinedConstant(), promise));
+ args->PopAndReturn(CallBuiltin(Builtins::kPerformPromiseThen, context,
+ value_wrapper, on_fulfilled,
+ UndefinedConstant(), promise));
BIND(&reject_promise);
{
const TNode<Object> exception = var_exception.value();
CallBuiltin(Builtins::kRejectPromise, context, promise, exception,
TrueConstant());
- Return(promise);
+ args->PopAndReturn(promise);
}
}
@@ -252,8 +273,12 @@ AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-%asyncfromsynciteratorprototype%.next
TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
- const TNode<Object> iterator = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ CodeStubArguments args(this, argc);
+
+ const TNode<Object> iterator = args.GetReceiver();
+ const TNode<Object> value = args.GetOptionalArgumentValue(kValueOrReasonArg);
const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
auto get_method = [=](const TNode<JSReceiver> unused) {
@@ -261,7 +286,7 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
JSAsyncFromSyncIterator::kNextOffset);
};
Generate_AsyncFromSyncIteratorMethod(
- context, iterator, value, get_method, UndefinedMethodHandler(),
+ &args, context, iterator, value, get_method, UndefinedMethodHandler(),
"[Async-from-Sync Iterator].prototype.next");
}
@@ -269,11 +294,16 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
// Section #sec-%asyncfromsynciteratorprototype%.return
TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
AsyncFromSyncBuiltinsAssembler) {
- const TNode<Object> iterator = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ CodeStubArguments args(this, argc);
+
+ const TNode<Object> iterator = args.GetReceiver();
+ const TNode<Object> value = args.GetOptionalArgumentValue(kValueOrReasonArg);
const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- auto if_return_undefined = [=](const TNode<NativeContext> native_context,
+ auto if_return_undefined = [=, &args](
+ const TNode<NativeContext> native_context,
const TNode<JSPromise> promise,
Label* if_exception) {
// If return is undefined, then
@@ -285,20 +315,24 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
// IfAbruptRejectPromise(nextDone, promiseCapability).
// Return promiseCapability.[[Promise]].
CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
- Return(promise);
+ args.PopAndReturn(promise);
};
Generate_AsyncFromSyncIteratorMethod(
- context, iterator, value, factory()->return_string(), if_return_undefined,
- "[Async-from-Sync Iterator].prototype.return");
+ &args, context, iterator, value, factory()->return_string(),
+ if_return_undefined, "[Async-from-Sync Iterator].prototype.return");
}
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-%asyncfromsynciteratorprototype%.throw
TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow,
AsyncFromSyncBuiltinsAssembler) {
- const TNode<Object> iterator = CAST(Parameter(Descriptor::kReceiver));
- const TNode<Object> reason = CAST(Parameter(Descriptor::kReason));
+ TNode<IntPtrT> argc = ChangeInt32ToIntPtr(
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)));
+ CodeStubArguments args(this, argc);
+
+ const TNode<Object> iterator = args.GetReceiver();
+ const TNode<Object> reason = args.GetOptionalArgumentValue(kValueOrReasonArg);
const TNode<Context> context = CAST(Parameter(Descriptor::kContext));
auto if_throw_undefined = [=](const TNode<NativeContext> native_context,
@@ -306,9 +340,9 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow,
Label* if_exception) { Goto(if_exception); };
Generate_AsyncFromSyncIteratorMethod(
- context, iterator, reason, factory()->throw_string(), if_throw_undefined,
- "[Async-from-Sync Iterator].prototype.throw", Label::kNonDeferred,
- reason);
+ &args, context, iterator, reason, factory()->throw_string(),
+ if_throw_undefined, "[Async-from-Sync Iterator].prototype.throw",
+ Label::kNonDeferred, reason);
}
} // namespace internal
diff --git a/chromium/v8/src/builtins/builtins-call-gen.cc b/chromium/v8/src/builtins/builtins-call-gen.cc
index d457e033149..f7919b78f47 100644
--- a/chromium/v8/src/builtins/builtins-call-gen.cc
+++ b/chromium/v8/src/builtins/builtins-call-gen.cc
@@ -64,6 +64,49 @@ void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
masm->isolate()->builtins()->CallFunction());
}
+TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback,
+ CallOrConstructBuiltinsAssembler) {
+ TNode<Object> target = CAST(Parameter(Descriptor::kFunction));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<HeapObject> maybe_feedback_vector =
+ CAST(Parameter(Descriptor::kMaybeFeedbackVector));
+ TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ CollectCallFeedback(target, context, maybe_feedback_vector,
+ Unsigned(ChangeInt32ToIntPtr(slot)));
+ TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target,
+ argc);
+}
+
+TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_WithFeedback,
+ CallOrConstructBuiltinsAssembler) {
+ TNode<Object> target = CAST(Parameter(Descriptor::kFunction));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<HeapObject> maybe_feedback_vector =
+ CAST(Parameter(Descriptor::kMaybeFeedbackVector));
+ TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ CollectCallFeedback(target, context, maybe_feedback_vector,
+ Unsigned(ChangeInt32ToIntPtr(slot)));
+ TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target,
+ argc);
+}
+
+TF_BUILTIN(Call_ReceiverIsAny_WithFeedback, CallOrConstructBuiltinsAssembler) {
+ TNode<Object> target = CAST(Parameter(Descriptor::kFunction));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<HeapObject> maybe_feedback_vector =
+ CAST(Parameter(Descriptor::kMaybeFeedbackVector));
+ TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ CollectCallFeedback(target, context, maybe_feedback_vector,
+ Unsigned(ChangeInt32ToIntPtr(slot)));
+ TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc);
+}
+
void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
TNode<Object> target, base::Optional<TNode<Object>> new_target,
TNode<Object> arguments_list, TNode<Context> context) {
@@ -387,6 +430,19 @@ TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) {
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
+TF_BUILTIN(CallWithArrayLike_WithFeedback, CallOrConstructBuiltinsAssembler) {
+ TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ base::Optional<TNode<Object>> new_target = base::nullopt;
+ TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<HeapObject> maybe_feedback_vector =
+ CAST(Parameter(Descriptor::kMaybeFeedbackVector));
+ TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ CollectCallFeedback(target, context, maybe_feedback_vector,
+ Unsigned(ChangeInt32ToIntPtr(slot)));
+ CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
+}
+
TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) {
TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
base::Optional<TNode<Object>> new_target = base::nullopt;
@@ -397,6 +453,21 @@ TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) {
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
+TF_BUILTIN(CallWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
+ TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ base::Optional<TNode<Object>> new_target = base::nullopt;
+ TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
+ TNode<Int32T> args_count =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<HeapObject> maybe_feedback_vector =
+ CAST(Parameter(Descriptor::kMaybeFeedbackVector));
+ TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+ CollectCallFeedback(target, context, maybe_feedback_vector,
+ Unsigned(ChangeInt32ToIntPtr(slot)));
+ CallOrConstructWithSpread(target, new_target, spread, args_count, context);
+}
+
TNode<JSReceiver> CallOrConstructBuiltinsAssembler::GetCompatibleReceiver(
TNode<JSReceiver> receiver, TNode<HeapObject> signature,
TNode<Context> context) {
@@ -535,7 +606,7 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate(
TNode<IntPtrT> function_template_info_flags = LoadAndUntagObjectField(
function_template_info, FunctionTemplateInfo::kFlagOffset);
Branch(IsSetWord(function_template_info_flags,
- 1 << FunctionTemplateInfo::kAcceptAnyReceiver),
+ 1 << FunctionTemplateInfo::AcceptAnyReceiverBit::kShift),
&receiver_done, &receiver_needs_access_check);
BIND(&receiver_needs_access_check);
diff --git a/chromium/v8/src/builtins/builtins-collections-gen.cc b/chromium/v8/src/builtins/builtins-collections-gen.cc
index 2f0e5a75602..3ab4392b87c 100644
--- a/chromium/v8/src/builtins/builtins-collections-gen.cc
+++ b/chromium/v8/src/builtins/builtins-collections-gen.cc
@@ -761,7 +761,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry(
const TNode<IntPtrT> number_of_buckets =
SmiUntag(CAST(UnsafeLoadFixedArrayElement(
table, CollectionType::NumberOfBucketsIndex())));
- const TNode<WordT> bucket =
+ const TNode<IntPtrT> bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
const TNode<IntPtrT> first_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement(
table, bucket, CollectionType::HashTableStartIndex() * kTaggedSize)));
diff --git a/chromium/v8/src/builtins/builtins-constructor-gen.cc b/chromium/v8/src/builtins/builtins-constructor-gen.cc
index c706ce9306c..4079bc75d14 100644
--- a/chromium/v8/src/builtins/builtins-constructor-gen.cc
+++ b/chromium/v8/src/builtins/builtins-constructor-gen.cc
@@ -36,6 +36,31 @@ void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
BUILTIN_CODE(masm->isolate(), ConstructFunction));
}
+TF_BUILTIN(Construct_WithFeedback, CallOrConstructBuiltinsAssembler) {
+ TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<Int32T> argc =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<HeapObject> maybe_feedback_vector =
+ CAST(Parameter(Descriptor::kMaybeFeedbackVector));
+ TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+
+ TVARIABLE(AllocationSite, allocation_site);
+ Label if_construct_generic(this), if_construct_array(this);
+ CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
+ Unsigned(ChangeInt32ToIntPtr(slot)),
+ &if_construct_generic, &if_construct_array,
+ &allocation_site);
+
+ BIND(&if_construct_generic);
+ TailCallBuiltin(Builtins::kConstruct, context, target, new_target, argc);
+
+ BIND(&if_construct_array);
+ TailCallBuiltin(Builtins::kArrayConstructorImpl, context, target, new_target,
+ argc, allocation_site.value());
+}
+
TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
@@ -44,6 +69,30 @@ TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) {
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
+TF_BUILTIN(ConstructWithArrayLike_WithFeedback,
+ CallOrConstructBuiltinsAssembler) {
+ TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<Object> arguments_list = CAST(Parameter(Descriptor::kArgumentsList));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<HeapObject> maybe_feedback_vector =
+ CAST(Parameter(Descriptor::kMaybeFeedbackVector));
+ TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+
+ TVARIABLE(AllocationSite, allocation_site);
+ Label if_construct_generic(this), if_construct_array(this);
+ CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
+ Unsigned(ChangeInt32ToIntPtr(slot)),
+ &if_construct_generic, &if_construct_array,
+ &allocation_site);
+
+ BIND(&if_construct_array);
+ Goto(&if_construct_generic); // Not implemented.
+
+ BIND(&if_construct_generic);
+ CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
+}
+
TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
@@ -54,6 +103,31 @@ TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
+TF_BUILTIN(ConstructWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
+ TNode<Object> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<Object> spread = CAST(Parameter(Descriptor::kSpread));
+ TNode<Int32T> args_count =
+ UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<HeapObject> maybe_feedback_vector =
+ CAST(Parameter(Descriptor::kMaybeFeedbackVector));
+ TNode<Int32T> slot = UncheckedCast<Int32T>(Parameter(Descriptor::kSlot));
+
+ TVARIABLE(AllocationSite, allocation_site);
+ Label if_construct_generic(this), if_construct_array(this);
+ CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
+ Unsigned(ChangeInt32ToIntPtr(slot)),
+ &if_construct_generic, &if_construct_array,
+ &allocation_site);
+
+ BIND(&if_construct_array);
+ Goto(&if_construct_generic); // Not implemented.
+
+ BIND(&if_construct_generic);
+ CallOrConstructWithSpread(target, new_target, spread, args_count, context);
+}
+
using Node = compiler::Node;
TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
@@ -691,6 +765,11 @@ TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
args.PopAndReturn(var_result.value());
}
+TF_BUILTIN(CreateEmptyLiteralObject, ConstructorBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ Return(EmitCreateEmptyObjectLiteral(context));
+}
+
// ES #sec-number-constructor
TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
diff --git a/chromium/v8/src/builtins/builtins-conversion-gen.cc b/chromium/v8/src/builtins/builtins-conversion-gen.cc
index e524f39b5fd..54fa752969e 100644
--- a/chromium/v8/src/builtins/builtins-conversion-gen.cc
+++ b/chromium/v8/src/builtins/builtins-conversion-gen.cc
@@ -12,181 +12,6 @@
namespace v8 {
namespace internal {
-class ConversionBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit ConversionBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- protected:
- void Generate_NonPrimitiveToPrimitive(TNode<Context> context,
- TNode<Object> input,
- ToPrimitiveHint hint);
-
- void Generate_OrdinaryToPrimitive(TNode<Context> context, TNode<Object> input,
- OrdinaryToPrimitiveHint hint);
-};
-
-// ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] )
-void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive(
- TNode<Context> context, TNode<Object> input, ToPrimitiveHint hint) {
- // Lookup the @@toPrimitive property on the {input}.
- TNode<Object> exotic_to_prim =
- GetProperty(context, input, factory()->to_primitive_symbol());
-
- // Check if {exotic_to_prim} is neither null nor undefined.
- Label ordinary_to_primitive(this);
- GotoIf(IsNullOrUndefined(exotic_to_prim), &ordinary_to_primitive);
- {
- // Invoke the {exotic_to_prim} method on the {input} with a string
- // representation of the {hint}.
- TNode<String> hint_string =
- HeapConstant(factory()->ToPrimitiveHintString(hint));
- TNode<Object> result = Call(context, exotic_to_prim, input, hint_string);
-
- // Verify that the {result} is actually a primitive.
- Label if_resultisprimitive(this),
- if_resultisnotprimitive(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(result), &if_resultisprimitive);
- TNode<Uint16T> result_instance_type = LoadInstanceType(CAST(result));
- Branch(IsPrimitiveInstanceType(result_instance_type), &if_resultisprimitive,
- &if_resultisnotprimitive);
-
- BIND(&if_resultisprimitive);
- {
- // Just return the {result}.
- Return(result);
- }
-
- BIND(&if_resultisnotprimitive);
- {
- // Somehow the @@toPrimitive method on {input} didn't yield a primitive.
- ThrowTypeError(context, MessageTemplate::kCannotConvertToPrimitive);
- }
- }
-
- // Convert using the OrdinaryToPrimitive algorithm instead.
- BIND(&ordinary_to_primitive);
- {
- Callable callable = CodeFactory::OrdinaryToPrimitive(
- isolate(), (hint == ToPrimitiveHint::kString)
- ? OrdinaryToPrimitiveHint::kString
- : OrdinaryToPrimitiveHint::kNumber);
- TailCallStub(callable, context, input);
- }
-}
-
-TF_BUILTIN(NonPrimitiveToPrimitive_Default, ConversionBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
-
- Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kDefault);
-}
-
-TF_BUILTIN(NonPrimitiveToPrimitive_Number, ConversionBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
-
- Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kNumber);
-}
-
-TF_BUILTIN(NonPrimitiveToPrimitive_String, ConversionBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
-
- Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kString);
-}
-
-TF_BUILTIN(StringToNumber, CodeStubAssembler) {
- TNode<String> input = CAST(Parameter(Descriptor::kArgument));
-
- Return(StringToNumber(input));
-}
-
-TF_BUILTIN(ToName, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
-
- TVARIABLE(Object, var_input, input);
- Label loop(this, &var_input);
- Goto(&loop);
- BIND(&loop);
- {
- // Load the current {input} value.
- TNode<Object> input = var_input.value();
-
- // Dispatch based on the type of the {input.}
- Label if_inputisbigint(this), if_inputisname(this), if_inputisnumber(this),
- if_inputisoddball(this), if_inputisreceiver(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(input), &if_inputisnumber);
- TNode<Uint16T> input_instance_type = LoadInstanceType(CAST(input));
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- GotoIf(IsNameInstanceType(input_instance_type), &if_inputisname);
- GotoIf(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver);
- GotoIf(IsHeapNumberInstanceType(input_instance_type), &if_inputisnumber);
- Branch(IsBigIntInstanceType(input_instance_type), &if_inputisbigint,
- &if_inputisoddball);
-
- BIND(&if_inputisbigint);
- {
- // We don't have a fast-path for BigInt currently, so just
- // tail call to the %ToString runtime function here for now.
- TailCallRuntime(Runtime::kToStringRT, context, input);
- }
-
- BIND(&if_inputisname);
- {
- // The {input} is already a Name.
- Return(input);
- }
-
- BIND(&if_inputisnumber);
- {
- // Convert the String {input} to a Number.
- TailCallBuiltin(Builtins::kNumberToString, context, input);
- }
-
- BIND(&if_inputisoddball);
- {
- // Just return the {input}'s string representation.
- CSA_ASSERT(this, IsOddballInstanceType(input_instance_type));
- Return(LoadObjectField(CAST(input), Oddball::kToStringOffset));
- }
-
- BIND(&if_inputisreceiver);
- {
- // Convert the JSReceiver {input} to a primitive first,
- // and then run the loop again with the new {input},
- // which is then a primitive value.
- var_input = CallBuiltin(Builtins::kNonPrimitiveToPrimitive_String,
- context, input);
- Goto(&loop);
- }
- }
-}
-
-TF_BUILTIN(NonNumberToNumber, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> input = CAST(Parameter(Descriptor::kArgument));
-
- Return(NonNumberToNumber(context, input));
-}
-
-TF_BUILTIN(NonNumberToNumeric, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> input = CAST(Parameter(Descriptor::kArgument));
-
- Return(NonNumberToNumeric(context, input));
-}
-
-TF_BUILTIN(ToNumeric, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
-
- Return(Select<Numeric>(
- IsNumber(input), [=] { return CAST(input); },
- [=] { return NonNumberToNumeric(context, CAST(input)); }));
-}
-
// ES6 section 7.1.3 ToNumber ( argument )
TF_BUILTIN(ToNumber, CodeStubAssembler) {
// TODO(solanes, v8:6949): Changing this to a TNode<Context> crashes with the
@@ -206,93 +31,6 @@ TF_BUILTIN(ToNumberConvertBigInt, CodeStubAssembler) {
Return(ToNumber(context, input, BigIntHandling::kConvertToNumber));
}
-// ES section #sec-tostring-applied-to-the-number-type
-TF_BUILTIN(NumberToString, CodeStubAssembler) {
- TNode<Number> input = CAST(Parameter(Descriptor::kArgument));
-
- Return(NumberToString(input));
-}
-
-// 7.1.1.1 OrdinaryToPrimitive ( O, hint )
-void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
- TNode<Context> context, TNode<Object> input, OrdinaryToPrimitiveHint hint) {
- TVARIABLE(Object, var_result);
- Label return_result(this, &var_result);
-
- Handle<String> method_names[2];
- switch (hint) {
- case OrdinaryToPrimitiveHint::kNumber:
- method_names[0] = factory()->valueOf_string();
- method_names[1] = factory()->toString_string();
- break;
- case OrdinaryToPrimitiveHint::kString:
- method_names[0] = factory()->toString_string();
- method_names[1] = factory()->valueOf_string();
- break;
- }
- for (Handle<String> name : method_names) {
- // Lookup the {name} on the {input}.
- TNode<Object> method = GetProperty(context, input, name);
-
- // Check if the {method} is callable.
- Label if_methodiscallable(this),
- if_methodisnotcallable(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(method), &if_methodisnotcallable);
- TNode<Map> method_map = LoadMap(CAST(method));
- Branch(IsCallableMap(method_map), &if_methodiscallable,
- &if_methodisnotcallable);
-
- BIND(&if_methodiscallable);
- {
- // Call the {method} on the {input}.
- TNode<Object> result = Call(context, method, input);
- var_result = result;
-
- // Return the {result} if it is a primitive.
- GotoIf(TaggedIsSmi(result), &return_result);
- TNode<Uint16T> result_instance_type = LoadInstanceType(CAST(result));
- GotoIf(IsPrimitiveInstanceType(result_instance_type), &return_result);
- }
-
- // Just continue with the next {name} if the {method} is not callable.
- Goto(&if_methodisnotcallable);
- BIND(&if_methodisnotcallable);
- }
-
- ThrowTypeError(context, MessageTemplate::kCannotConvertToPrimitive);
-
- BIND(&return_result);
- Return(var_result.value());
-}
-
-TF_BUILTIN(OrdinaryToPrimitive_Number, ConversionBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
- Generate_OrdinaryToPrimitive(context, input,
- OrdinaryToPrimitiveHint::kNumber);
-}
-
-TF_BUILTIN(OrdinaryToPrimitive_String, ConversionBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
- Generate_OrdinaryToPrimitive(context, input,
- OrdinaryToPrimitiveHint::kString);
-}
-
-// ES6 section 7.1.2 ToBoolean ( argument )
-TF_BUILTIN(ToBoolean, CodeStubAssembler) {
- TNode<Object> value = CAST(Parameter(Descriptor::kArgument));
-
- Label return_true(this), return_false(this);
- BranchIfToBooleanIsTrue(value, &return_true, &return_false);
-
- BIND(&return_true);
- Return(TrueConstant());
-
- BIND(&return_false);
- Return(FalseConstant());
-}
-
// ES6 section 7.1.2 ToBoolean ( argument )
// Requires parameter on stack so that it can be used as a continuation from a
// LAZY deopt.
@@ -309,130 +47,6 @@ TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) {
Return(FalseConstant());
}
-TF_BUILTIN(ToLength, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- // We might need to loop once for ToNumber conversion.
- TVARIABLE(Object, var_len, CAST(Parameter(Descriptor::kArgument)));
- Label loop(this, &var_len);
- Goto(&loop);
- BIND(&loop);
- {
- // Shared entry points.
- Label return_len(this), return_two53minus1(this, Label::kDeferred),
- return_zero(this, Label::kDeferred);
-
- // Load the current {len} value.
- TNode<Object> len = var_len.value();
-
- // Check if {len} is a positive Smi.
- GotoIf(TaggedIsPositiveSmi(len), &return_len);
-
- // Check if {len} is a (negative) Smi.
- GotoIf(TaggedIsSmi(len), &return_zero);
-
- // Check if {len} is a HeapNumber.
- TNode<HeapObject> len_heap_object = CAST(len);
- Label if_lenisheapnumber(this),
- if_lenisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumber(len_heap_object), &if_lenisheapnumber,
- &if_lenisnotheapnumber);
-
- BIND(&if_lenisheapnumber);
- {
- // Load the floating-point value of {len}.
- TNode<Float64T> len_value = LoadHeapNumberValue(len_heap_object);
-
- // Check if {len} is not greater than zero.
- GotoIfNot(Float64GreaterThan(len_value, Float64Constant(0.0)),
- &return_zero);
-
- // Check if {len} is greater than or equal to 2^53-1.
- GotoIf(Float64GreaterThanOrEqual(len_value,
- Float64Constant(kMaxSafeInteger)),
- &return_two53minus1);
-
- // Round the {len} towards -Infinity.
- TNode<Float64T> value = Float64Floor(len_value);
- TNode<Number> result = ChangeFloat64ToTagged(value);
- Return(result);
- }
-
- BIND(&if_lenisnotheapnumber);
- {
- // Need to convert {len} to a Number first.
- var_len = CallBuiltin(Builtins::kNonNumberToNumber, context, len);
- Goto(&loop);
- }
-
- BIND(&return_len);
- Return(var_len.value());
-
- BIND(&return_two53minus1);
- Return(NumberConstant(kMaxSafeInteger));
-
- BIND(&return_zero);
- Return(SmiConstant(0));
- }
-}
-
-// ES6 section 7.1.13 ToObject (argument)
-TF_BUILTIN(ToObject, CodeStubAssembler) {
- Label if_smi(this, Label::kDeferred), if_jsreceiver(this),
- if_noconstructor(this, Label::kDeferred),
- if_wrapjs_primitive_wrapper(this);
-
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> object = CAST(Parameter(Descriptor::kArgument));
-
- TVARIABLE(IntPtrT, constructor_function_index_var);
-
- GotoIf(TaggedIsSmi(object), &if_smi);
-
- TNode<Map> map = LoadMap(CAST(object));
- TNode<Uint16T> instance_type = LoadMapInstanceType(map);
- GotoIf(IsJSReceiverInstanceType(instance_type), &if_jsreceiver);
-
- TNode<IntPtrT> constructor_function_index =
- LoadMapConstructorFunctionIndex(map);
- GotoIf(WordEqual(constructor_function_index,
- IntPtrConstant(Map::kNoConstructorFunctionIndex)),
- &if_noconstructor);
- constructor_function_index_var = constructor_function_index;
- Goto(&if_wrapjs_primitive_wrapper);
-
- BIND(&if_smi);
- constructor_function_index_var =
- IntPtrConstant(Context::NUMBER_FUNCTION_INDEX);
- Goto(&if_wrapjs_primitive_wrapper);
-
- BIND(&if_wrapjs_primitive_wrapper);
- TNode<NativeContext> native_context = LoadNativeContext(context);
- TNode<JSFunction> constructor = CAST(LoadContextElement(
- native_context, constructor_function_index_var.value()));
- TNode<Map> initial_map = LoadObjectField<Map>(
- constructor, JSFunction::kPrototypeOrInitialMapOffset);
- TNode<HeapObject> js_primitive_wrapper =
- Allocate(JSPrimitiveWrapper::kHeaderSize);
- StoreMapNoWriteBarrier(js_primitive_wrapper, initial_map);
- StoreObjectFieldRoot(js_primitive_wrapper,
- JSPrimitiveWrapper::kPropertiesOrHashOffset,
- RootIndex::kEmptyFixedArray);
- StoreObjectFieldRoot(js_primitive_wrapper,
- JSPrimitiveWrapper::kElementsOffset,
- RootIndex::kEmptyFixedArray);
- StoreObjectField(js_primitive_wrapper, JSPrimitiveWrapper::kValueOffset,
- object);
- Return(js_primitive_wrapper);
-
- BIND(&if_noconstructor);
- ThrowTypeError(context, MessageTemplate::kUndefinedOrNullToObject,
- "ToObject");
-
- BIND(&if_jsreceiver);
- Return(object);
-}
-
// ES6 section 12.5.5 typeof operator
TF_BUILTIN(Typeof, CodeStubAssembler) {
TNode<Object> object = CAST(Parameter(Descriptor::kObject));
diff --git a/chromium/v8/src/builtins/builtins-date-gen.cc b/chromium/v8/src/builtins/builtins-date-gen.cc
index a3200330350..05fcc53f12b 100644
--- a/chromium/v8/src/builtins/builtins-date-gen.cc
+++ b/chromium/v8/src/builtins/builtins-date-gen.cc
@@ -197,7 +197,7 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
hint_is_invalid(this, Label::kDeferred);
// Fast cases for internalized strings.
- TNode<String> number_string = numberStringConstant();
+ TNode<String> number_string = NumberStringConstant();
GotoIf(TaggedEqual(hint, number_string), &hint_is_number);
TNode<String> default_string = DefaultStringConstant();
GotoIf(TaggedEqual(hint, default_string), &hint_is_string);
diff --git a/chromium/v8/src/builtins/builtins-definitions.h b/chromium/v8/src/builtins/builtins-definitions.h
index 84ddf55f6f4..b7286fa5657 100644
--- a/chromium/v8/src/builtins/builtins-definitions.h
+++ b/chromium/v8/src/builtins/builtins-definitions.h
@@ -51,12 +51,19 @@ namespace internal {
ASM(Call_ReceiverIsNullOrUndefined, CallTrampoline) \
ASM(Call_ReceiverIsNotNullOrUndefined, CallTrampoline) \
ASM(Call_ReceiverIsAny, CallTrampoline) \
+ TFC(Call_ReceiverIsNullOrUndefined_WithFeedback, \
+ CallTrampoline_WithFeedback) \
+ TFC(Call_ReceiverIsNotNullOrUndefined_WithFeedback, \
+ CallTrampoline_WithFeedback) \
+ TFC(Call_ReceiverIsAny_WithFeedback, CallTrampoline_WithFeedback) \
\
/* ES6 section 9.5.12[[Call]] ( thisArgument, argumentsList ) */ \
TFC(CallProxy, CallTrampoline) \
ASM(CallVarargs, CallVarargs) \
TFC(CallWithSpread, CallWithSpread) \
+ TFC(CallWithSpread_WithFeedback, CallWithSpread_WithFeedback) \
TFC(CallWithArrayLike, CallWithArrayLike) \
+ TFC(CallWithArrayLike_WithFeedback, CallWithArrayLike_WithFeedback) \
ASM(CallForwardVarargs, CallForwardVarargs) \
ASM(CallFunctionForwardVarargs, CallForwardVarargs) \
/* Call an API callback via a {FunctionTemplateInfo}, doing appropriate */ \
@@ -76,15 +83,20 @@ namespace internal {
ASM(Construct, JSTrampoline) \
ASM(ConstructVarargs, ConstructVarargs) \
TFC(ConstructWithSpread, ConstructWithSpread) \
+ TFC(ConstructWithSpread_WithFeedback, ConstructWithSpread_WithFeedback) \
TFC(ConstructWithArrayLike, ConstructWithArrayLike) \
+ TFC(ConstructWithArrayLike_WithFeedback, \
+ ConstructWithArrayLike_WithFeedback) \
ASM(ConstructForwardVarargs, ConstructForwardVarargs) \
ASM(ConstructFunctionForwardVarargs, ConstructForwardVarargs) \
+ TFC(Construct_WithFeedback, Construct_WithFeedback) \
ASM(JSConstructStubGeneric, Dummy) \
ASM(JSBuiltinsConstructStub, Dummy) \
TFC(FastNewObject, FastNewObject) \
TFS(FastNewClosure, kSharedFunctionInfo, kFeedbackCell) \
TFC(FastNewFunctionContextEval, FastNewFunctionContext) \
TFC(FastNewFunctionContextFunction, FastNewFunctionContext) \
+ TFS(CreateEmptyLiteralObject) \
TFS(CreateRegExpLiteral, kFeedbackVector, kSlot, kPattern, kFlags) \
TFS(CreateEmptyArrayLiteral, kFeedbackVector, kSlot) \
TFS(CreateShallowArrayLiteral, kFeedbackVector, kSlot, kConstantElements) \
@@ -185,22 +197,8 @@ namespace internal {
ASM(HandleDebuggerStatement, ContextOnly) \
\
/* Type conversions */ \
- TFC(ToObject, TypeConversion) \
- TFC(ToBoolean, TypeConversion) \
- TFC(OrdinaryToPrimitive_Number, TypeConversion) \
- TFC(OrdinaryToPrimitive_String, TypeConversion) \
- TFC(NonPrimitiveToPrimitive_Default, TypeConversion) \
- TFC(NonPrimitiveToPrimitive_Number, TypeConversion) \
- TFC(NonPrimitiveToPrimitive_String, TypeConversion) \
- TFC(StringToNumber, TypeConversion) \
- TFC(ToName, TypeConversion) \
- TFC(NonNumberToNumber, TypeConversion) \
- TFC(NonNumberToNumeric, TypeConversion) \
TFC(ToNumber, TypeConversion) \
TFC(ToNumberConvertBigInt, TypeConversion) \
- TFC(ToNumeric, TypeConversion) \
- TFC(NumberToString, TypeConversion) \
- TFC(ToLength, TypeConversion) \
TFC(Typeof, Typeof) \
TFC(GetSuperConstructor, Typeof) \
TFC(BigIntToI64, BigIntToI64) \
@@ -503,11 +501,7 @@ namespace internal {
CPP(FunctionConstructor) \
ASM(FunctionPrototypeApply, JSTrampoline) \
CPP(FunctionPrototypeBind) \
- /* ES6 #sec-function.prototype.bind */ \
- TFJ(FastFunctionPrototypeBind, kDontAdaptArgumentsSentinel) \
ASM(FunctionPrototypeCall, JSTrampoline) \
- /* ES6 #sec-function.prototype-@@hasinstance */ \
- TFJ(FunctionPrototypeHasInstance, 1, kReceiver, kV) \
/* ES6 #sec-function.prototype.tostring */ \
CPP(FunctionPrototypeToString) \
\
@@ -605,53 +599,42 @@ namespace internal {
TFJ(MapIteratorPrototypeNext, 0, kReceiver) \
TFS(MapIteratorToList, kSource) \
\
- /* Number */ \
- TFC(AllocateHeapNumber, AllocateHeapNumber) \
/* ES #sec-number-constructor */ \
TFJ(NumberConstructor, kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-number.isfinite */ \
- TFJ(NumberIsFinite, 1, kReceiver, kNumber) \
- /* ES6 #sec-number.isinteger */ \
- TFJ(NumberIsInteger, 1, kReceiver, kNumber) \
- /* ES6 #sec-number.isnan */ \
- TFJ(NumberIsNaN, 1, kReceiver, kNumber) \
- /* ES6 #sec-number.issafeinteger */ \
- TFJ(NumberIsSafeInteger, 1, kReceiver, kNumber) \
- /* ES6 #sec-number.parsefloat */ \
- TFJ(NumberParseFloat, 1, kReceiver, kString) \
- /* ES6 #sec-number.parseint */ \
- TFJ(NumberParseInt, 2, kReceiver, kString, kRadix) \
- TFS(ParseInt, kString, kRadix) \
CPP(NumberPrototypeToExponential) \
CPP(NumberPrototypeToFixed) \
CPP(NumberPrototypeToLocaleString) \
CPP(NumberPrototypeToPrecision) \
- /* ES6 #sec-number.prototype.valueof */ \
- TFJ(NumberPrototypeValueOf, 0, kReceiver) \
- TFC(Add, BinaryOp) \
- TFC(Subtract, BinaryOp) \
- TFC(Multiply, BinaryOp) \
- TFC(Divide, BinaryOp) \
- TFC(Modulus, BinaryOp) \
- TFC(Exponentiate, BinaryOp) \
- TFC(BitwiseAnd, BinaryOp) \
- TFC(BitwiseOr, BinaryOp) \
- TFC(BitwiseXor, BinaryOp) \
- TFC(ShiftLeft, BinaryOp) \
- TFC(ShiftRight, BinaryOp) \
- TFC(ShiftRightLogical, BinaryOp) \
- TFC(LessThan, Compare) \
- TFC(LessThanOrEqual, Compare) \
- TFC(GreaterThan, Compare) \
- TFC(GreaterThanOrEqual, Compare) \
- TFC(Equal, Compare) \
TFC(SameValue, Compare) \
TFC(SameValueNumbersOnly, Compare) \
- TFC(StrictEqual, Compare) \
- TFS(BitwiseNot, kValue) \
- TFS(Decrement, kValue) \
- TFS(Increment, kValue) \
- TFS(Negate, kValue) \
+ \
+ /* Binary ops with feedback collection */ \
+ TFC(Add_WithFeedback, BinaryOp_WithFeedback) \
+ TFC(Subtract_WithFeedback, BinaryOp_WithFeedback) \
+ TFC(Multiply_WithFeedback, BinaryOp_WithFeedback) \
+ TFC(Divide_WithFeedback, BinaryOp_WithFeedback) \
+ TFC(Modulus_WithFeedback, BinaryOp_WithFeedback) \
+ TFC(Exponentiate_WithFeedback, BinaryOp_WithFeedback) \
+ TFC(BitwiseAnd_WithFeedback, BinaryOp_WithFeedback) \
+ TFC(BitwiseOr_WithFeedback, BinaryOp_WithFeedback) \
+ TFC(BitwiseXor_WithFeedback, BinaryOp_WithFeedback) \
+ TFC(ShiftLeft_WithFeedback, BinaryOp_WithFeedback) \
+ TFC(ShiftRight_WithFeedback, BinaryOp_WithFeedback) \
+ TFC(ShiftRightLogical_WithFeedback, BinaryOp_WithFeedback) \
+ \
+ /* Compare ops with feedback collection */ \
+ TFC(Equal_WithFeedback, Compare_WithFeedback) \
+ TFC(StrictEqual_WithFeedback, Compare_WithFeedback) \
+ TFC(LessThan_WithFeedback, Compare_WithFeedback) \
+ TFC(GreaterThan_WithFeedback, Compare_WithFeedback) \
+ TFC(LessThanOrEqual_WithFeedback, Compare_WithFeedback) \
+ TFC(GreaterThanOrEqual_WithFeedback, Compare_WithFeedback) \
+ \
+ /* Unary ops with feedback collection */ \
+ TFC(BitwiseNot_WithFeedback, UnaryOp_WithFeedback) \
+ TFC(Decrement_WithFeedback, UnaryOp_WithFeedback) \
+ TFC(Increment_WithFeedback, UnaryOp_WithFeedback) \
+ TFC(Negate_WithFeedback, UnaryOp_WithFeedback) \
\
/* Object */ \
/* ES #sec-object-constructor */ \
@@ -813,12 +796,6 @@ namespace internal {
TFJ(TypedArrayPrototypeByteOffset, 0, kReceiver) \
/* ES6 #sec-get-%typedarray%.prototype.length */ \
TFJ(TypedArrayPrototypeLength, 0, kReceiver) \
- /* ES6 #sec-%typedarray%.prototype.entries */ \
- TFJ(TypedArrayPrototypeEntries, 0, kReceiver) \
- /* ES6 #sec-%typedarray%.prototype.keys */ \
- TFJ(TypedArrayPrototypeKeys, 0, kReceiver) \
- /* ES6 #sec-%typedarray%.prototype.values */ \
- TFJ(TypedArrayPrototypeValues, 0, kReceiver) \
/* ES6 #sec-%typedarray%.prototype.copywithin */ \
CPP(TypedArrayPrototypeCopyWithin) \
/* ES6 #sec-%typedarray%.prototype.fill */ \
@@ -842,14 +819,8 @@ namespace internal {
TFC(WasmFloat32ToNumber, WasmFloat32ToNumber) \
TFC(WasmFloat64ToNumber, WasmFloat64ToNumber) \
TFS(WasmAllocateArray, kMapIndex, kLength, kElementSize) \
- TFS(WasmAllocateStruct, kMapIndex) \
- TFC(WasmAtomicNotify, WasmAtomicNotify) \
TFC(WasmI32AtomicWait32, WasmI32AtomicWait32) \
- TFC(WasmI32AtomicWait64, WasmI32AtomicWait64) \
TFC(WasmI64AtomicWait32, WasmI64AtomicWait32) \
- TFC(WasmI64AtomicWait64, WasmI64AtomicWait64) \
- TFC(WasmTableInit, WasmTableInit) \
- TFC(WasmTableCopy, WasmTableCopy) \
\
/* WeakMap */ \
TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \
@@ -906,11 +877,11 @@ namespace internal {
/* %AsyncFromSyncIteratorPrototype% */ \
/* See tc39.github.io/proposal-async-iteration/ */ \
/* #sec-%asyncfromsynciteratorprototype%-object) */ \
- TFJ(AsyncFromSyncIteratorPrototypeNext, 1, kReceiver, kValue) \
+ TFJ(AsyncFromSyncIteratorPrototypeNext, kDontAdaptArgumentsSentinel) \
/* #sec-%asyncfromsynciteratorprototype%.throw */ \
- TFJ(AsyncFromSyncIteratorPrototypeThrow, 1, kReceiver, kReason) \
+ TFJ(AsyncFromSyncIteratorPrototypeThrow, kDontAdaptArgumentsSentinel) \
/* #sec-%asyncfromsynciteratorprototype%.return */ \
- TFJ(AsyncFromSyncIteratorPrototypeReturn, 1, kReceiver, kValue) \
+ TFJ(AsyncFromSyncIteratorPrototypeReturn, kDontAdaptArgumentsSentinel) \
/* #sec-async-iterator-value-unwrap-functions */ \
TFJ(AsyncIteratorValueUnwrap, 1, kReceiver, kValue) \
\
diff --git a/chromium/v8/src/builtins/builtins-descriptors.h b/chromium/v8/src/builtins/builtins-descriptors.h
index 174b89795f5..c2eb44debea 100644
--- a/chromium/v8/src/builtins/builtins-descriptors.h
+++ b/chromium/v8/src/builtins/builtins-descriptors.h
@@ -13,34 +13,7 @@
namespace v8 {
namespace internal {
-#define REVERSE_0(a) a,
-#define REVERSE_1(a, b) b, a,
-#define REVERSE_2(a, b, c) c, b, a,
-#define REVERSE_3(a, b, c, d) d, c, b, a,
-#define REVERSE_4(a, b, c, d, e) e, d, c, b, a,
-#define REVERSE_5(a, b, c, d, e, f) f, e, d, c, b, a,
-#define REVERSE_6(a, b, c, d, e, f, g) g, f, e, d, c, b, a,
-#define REVERSE_7(a, b, c, d, e, f, g, h) h, g, f, e, d, c, b, a,
-#define REVERSE_8(a, b, c, d, e, f, g, h, i) i, h, g, f, e, d, c, b, a,
-#define REVERSE_kDontAdaptArgumentsSentinel(...)
-#define REVERSE(N, ...) REVERSE_##N(__VA_ARGS__)
-
// Define interface descriptors for builtins with JS linkage.
-#ifdef V8_REVERSE_JSARGS
-#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
- struct Builtin_##Name##_InterfaceDescriptor { \
- enum ParameterIndices { \
- kJSTarget = compiler::CodeAssembler::kTargetParameterIndex, \
- REVERSE_##Argc(__VA_ARGS__) kJSNewTarget, \
- kJSActualArgumentsCount, \
- kContext, \
- kParameterCount, \
- }; \
- static_assert((Argc) == static_cast<uint16_t>(kParameterCount - 4), \
- "Inconsistent set of arguments"); \
- static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \
- };
-#else
#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
struct Builtin_##Name##_InterfaceDescriptor { \
enum ParameterIndices { \
@@ -55,7 +28,6 @@ namespace internal {
"Inconsistent set of arguments"); \
static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \
};
-#endif
// Define interface descriptors for builtins with StubCall linkage.
#define DEFINE_TFC_INTERFACE_DESCRIPTOR(Name, InterfaceDescriptor) \
diff --git a/chromium/v8/src/builtins/builtins-function-gen.cc b/chromium/v8/src/builtins/builtins-function-gen.cc
deleted file mode 100644
index 1d48ee84d1c..00000000000
--- a/chromium/v8/src/builtins/builtins-function-gen.cc
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins-utils-gen.h"
-#include "src/builtins/builtins.h"
-#include "src/codegen/code-stub-assembler.h"
-#include "src/execution/frame-constants.h"
-#include "src/objects/api-callbacks.h"
-#include "src/objects/descriptor-array.h"
-
-namespace v8 {
-namespace internal {
-
-TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
- Label slow(this);
-
- TNode<Int32T> argc =
- UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
-
- CodeStubArguments args(this, argc);
-
- // Check that receiver has instance type of JS_FUNCTION_TYPE
- TNode<Object> receiver = args.GetReceiver();
- GotoIf(TaggedIsSmi(receiver), &slow);
-
- TNode<Map> receiver_map = LoadMap(CAST(receiver));
- {
- TNode<Uint16T> instance_type = LoadMapInstanceType(receiver_map);
- GotoIfNot(
- Word32Or(InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE),
- InstanceTypeEqual(instance_type, JS_BOUND_FUNCTION_TYPE)),
- &slow);
- }
-
- // Disallow binding of slow-mode functions. We need to figure out whether the
- // length and name property are in the original state.
- Comment("Disallow binding of slow-mode functions");
- GotoIf(IsDictionaryMap(receiver_map), &slow);
-
- // Check whether the length and name properties are still present as
- // AccessorInfo objects. In that case, their value can be recomputed even if
- // the actual value on the object changes.
- Comment("Check descriptor array length");
- // Minimum descriptor array length required for fast path.
- const int min_nof_descriptors = i::Max(JSFunction::kLengthDescriptorIndex,
- JSFunction::kNameDescriptorIndex) +
- 1;
- TNode<Int32T> nof_descriptors = LoadNumberOfOwnDescriptors(receiver_map);
- GotoIf(Int32LessThan(nof_descriptors, Int32Constant(min_nof_descriptors)),
- &slow);
-
- // Check whether the length and name properties are still present as
- // AccessorInfo objects. In that case, their value can be recomputed even if
- // the actual value on the object changes.
- Comment("Check name and length properties");
- {
- TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
- const int length_index = JSFunction::kLengthDescriptorIndex;
- TNode<Name> maybe_length =
- LoadKeyByDescriptorEntry(descriptors, length_index);
- GotoIf(TaggedNotEqual(maybe_length, LengthStringConstant()), &slow);
-
- TNode<Object> maybe_length_accessor =
- LoadValueByDescriptorEntry(descriptors, length_index);
- GotoIf(TaggedIsSmi(maybe_length_accessor), &slow);
- TNode<Map> length_value_map = LoadMap(CAST(maybe_length_accessor));
- GotoIfNot(IsAccessorInfoMap(length_value_map), &slow);
-
- const int name_index = JSFunction::kNameDescriptorIndex;
- TNode<Name> maybe_name = LoadKeyByDescriptorEntry(descriptors, name_index);
- GotoIf(TaggedNotEqual(maybe_name, NameStringConstant()), &slow);
-
- TNode<Object> maybe_name_accessor =
- LoadValueByDescriptorEntry(descriptors, name_index);
- GotoIf(TaggedIsSmi(maybe_name_accessor), &slow);
- TNode<Map> name_value_map = LoadMap(CAST(maybe_name_accessor));
- GotoIfNot(IsAccessorInfoMap(name_value_map), &slow);
- }
-
- // Choose the right bound function map based on whether the target is
- // constructable.
- Comment("Choose the right bound function map");
- TVARIABLE(Map, bound_function_map);
- {
- Label with_constructor(this);
- TNode<NativeContext> native_context = LoadNativeContext(context);
-
- Label map_done(this, &bound_function_map);
- GotoIf(IsConstructorMap(receiver_map), &with_constructor);
-
- bound_function_map = CAST(LoadContextElement(
- native_context, Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX));
- Goto(&map_done);
-
- BIND(&with_constructor);
- bound_function_map = CAST(LoadContextElement(
- native_context, Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX));
- Goto(&map_done);
-
- BIND(&map_done);
- }
-
- // Verify that __proto__ matches that of a the target bound function.
- Comment("Verify that __proto__ matches target bound function");
- TNode<HeapObject> prototype = LoadMapPrototype(receiver_map);
- TNode<HeapObject> expected_prototype =
- LoadMapPrototype(bound_function_map.value());
- GotoIf(TaggedNotEqual(prototype, expected_prototype), &slow);
-
- // Allocate the arguments array.
- Comment("Allocate the arguments array");
- TVARIABLE(FixedArray, argument_array);
- {
- Label empty_arguments(this);
- Label arguments_done(this, &argument_array);
- GotoIf(Uint32LessThanOrEqual(argc, Int32Constant(1)), &empty_arguments);
- TNode<IntPtrT> elements_length =
- Signed(ChangeUint32ToWord(Unsigned(Int32Sub(argc, Int32Constant(1)))));
- argument_array = CAST(AllocateFixedArray(PACKED_ELEMENTS, elements_length,
- kAllowLargeObjectAllocation));
- TVARIABLE(IntPtrT, index, IntPtrConstant(0));
- VariableList foreach_vars({&index}, zone());
- args.ForEach(
- foreach_vars,
- [&](TNode<Object> arg) {
- StoreFixedArrayElement(argument_array.value(), index.value(), arg);
- Increment(&index);
- },
- IntPtrConstant(1));
- Goto(&arguments_done);
-
- BIND(&empty_arguments);
- argument_array = EmptyFixedArrayConstant();
- Goto(&arguments_done);
-
- BIND(&arguments_done);
- }
-
- // Determine bound receiver.
- Comment("Determine bound receiver");
- TVARIABLE(Object, bound_receiver);
- {
- Label has_receiver(this);
- Label receiver_done(this, &bound_receiver);
- GotoIf(Word32NotEqual(argc, Int32Constant(0)), &has_receiver);
- bound_receiver = UndefinedConstant();
- Goto(&receiver_done);
-
- BIND(&has_receiver);
- bound_receiver = args.AtIndex(0);
- Goto(&receiver_done);
-
- BIND(&receiver_done);
- }
-
- // Allocate the resulting bound function.
- Comment("Allocate the resulting bound function");
- {
- TNode<HeapObject> bound_function = Allocate(JSBoundFunction::kHeaderSize);
- StoreMapNoWriteBarrier(bound_function, bound_function_map.value());
- StoreObjectFieldNoWriteBarrier(
- bound_function, JSBoundFunction::kBoundTargetFunctionOffset, receiver);
- StoreObjectFieldNoWriteBarrier(bound_function,
- JSBoundFunction::kBoundThisOffset,
- bound_receiver.value());
- StoreObjectFieldNoWriteBarrier(bound_function,
- JSBoundFunction::kBoundArgumentsOffset,
- argument_array.value());
- TNode<FixedArray> empty_fixed_array = EmptyFixedArrayConstant();
- StoreObjectFieldNoWriteBarrier(
- bound_function, JSObject::kPropertiesOrHashOffset, empty_fixed_array);
- StoreObjectFieldNoWriteBarrier(bound_function, JSObject::kElementsOffset,
- empty_fixed_array);
-
- args.PopAndReturn(bound_function);
- }
-
- BIND(&slow);
- {
- // We are not using Parameter(Descriptor::kJSTarget) and loading the value
- // from the current frame here in order to reduce register pressure on the
- // fast path.
- TNode<JSFunction> target = LoadTargetFromFrame();
- TailCallBuiltin(Builtins::kFunctionPrototypeBind, context, target,
- new_target, argc);
- }
-}
-
-// ES6 #sec-function.prototype-@@hasinstance
-TF_BUILTIN(FunctionPrototypeHasInstance, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> f = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> v = CAST(Parameter(Descriptor::kV));
- TNode<Oddball> result = OrdinaryHasInstance(context, f, v);
- Return(result);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/builtins/builtins-handler-gen.cc b/chromium/v8/src/builtins/builtins-handler-gen.cc
index 0325ddab7c9..3f4a53a3461 100644
--- a/chromium/v8/src/builtins/builtins-handler-gen.cc
+++ b/chromium/v8/src/builtins/builtins-handler-gen.cc
@@ -9,6 +9,7 @@
#include "src/ic/ic.h"
#include "src/ic/keyed-store-generic.h"
#include "src/objects/objects-inl.h"
+#include "torque-generated/exported-macros-assembler-tq.h"
namespace v8 {
namespace internal {
@@ -78,37 +79,11 @@ TNode<Object> HandlerBuiltinsAssembler::EmitKeyedSloppyArguments(
TNode<JSObject> receiver, TNode<Object> tagged_key,
base::Optional<TNode<Object>> value, Label* bailout,
ArgumentsAccessMode access_mode) {
- // Mapped arguments are actual arguments. Unmapped arguments are values added
- // to the arguments object after it was created for the call. Mapped arguments
- // are stored in the context at indexes given by elements[key + 2]. Unmapped
- // arguments are stored as regular indexed properties in the arguments array,
- // held at elements[1]. See NewSloppyArguments() in runtime.cc for a detailed
- // look at argument object construction.
- //
- // The sloppy arguments elements array has a special format:
- //
- // 0: context
- // 1: unmapped arguments array
- // 2: mapped_index0,
- // 3: mapped_index1,
- // ...
- //
- // length is 2 + min(number_of_actual_arguments, number_of_formal_arguments).
- // If key + 2 >= elements.length then attempt to look in the unmapped
- // arguments array (given by elements[1]) and return the value at key, missing
- // to the runtime if the unmapped arguments array is not a fixed array or if
- // key >= unmapped_arguments_array.length.
- //
- // Otherwise, t = elements[key + 2]. If t is the hole, then look up the value
- // in the unmapped arguments array, as described above. Otherwise, t is a Smi
- // index into the context array given at elements[0]. Return the value at
- // context[t].
-
GotoIfNot(TaggedIsSmi(tagged_key), bailout);
TNode<IntPtrT> key = SmiUntag(CAST(tagged_key));
GotoIf(IntPtrLessThan(key, IntPtrConstant(0)), bailout);
- TNode<FixedArray> elements = CAST(LoadElements(receiver));
+ TNode<SloppyArgumentsElements> elements = CAST(LoadElements(receiver));
TNode<IntPtrT> elements_length = LoadAndUntagFixedArrayBaseLength(elements);
TVARIABLE(Object, var_result);
@@ -119,20 +94,18 @@ TNode<Object> HandlerBuiltinsAssembler::EmitKeyedSloppyArguments(
access_mode == ArgumentsAccessMode::kHas);
}
Label if_mapped(this), if_unmapped(this), end(this, &var_result);
- TNode<IntPtrT> intptr_two = IntPtrConstant(2);
- TNode<IntPtrT> adjusted_length = IntPtrSub(elements_length, intptr_two);
- GotoIf(UintPtrGreaterThanOrEqual(key, adjusted_length), &if_unmapped);
+ GotoIf(UintPtrGreaterThanOrEqual(key, elements_length), &if_unmapped);
TNode<Object> mapped_index =
- LoadFixedArrayElement(elements, IntPtrAdd(key, intptr_two));
+ LoadSloppyArgumentsElementsMappedEntries(elements, key);
Branch(TaggedEqual(mapped_index, TheHoleConstant()), &if_unmapped,
&if_mapped);
BIND(&if_mapped);
{
TNode<IntPtrT> mapped_index_intptr = SmiUntag(CAST(mapped_index));
- TNode<Context> the_context = CAST(LoadFixedArrayElement(elements, 0));
+ TNode<Context> the_context = LoadSloppyArgumentsElementsContext(elements);
if (access_mode == ArgumentsAccessMode::kLoad) {
TNode<Object> result =
LoadContextElement(the_context, mapped_index_intptr);
@@ -151,7 +124,7 @@ TNode<Object> HandlerBuiltinsAssembler::EmitKeyedSloppyArguments(
BIND(&if_unmapped);
{
TNode<HeapObject> backing_store_ho =
- CAST(LoadFixedArrayElement(elements, 1));
+ LoadSloppyArgumentsElementsArguments(elements);
GotoIf(TaggedNotEqual(LoadMap(backing_store_ho), FixedArrayMapConstant()),
bailout);
TNode<FixedArray> backing_store = CAST(backing_store_ho);
diff --git a/chromium/v8/src/builtins/builtins-internal-gen.cc b/chromium/v8/src/builtins/builtins-internal-gen.cc
index 61f03b3f993..214f94802f3 100644
--- a/chromium/v8/src/builtins/builtins-internal-gen.cc
+++ b/chromium/v8/src/builtins/builtins-internal-gen.cc
@@ -72,7 +72,7 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
}
TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
- TNode<IntPtrT> frame = UncheckedCast<IntPtrT>(Parameter(Descriptor::kFrame));
+ TNode<RawPtrT> frame = UncheckedCast<RawPtrT>(Parameter(Descriptor::kFrame));
TNode<IntPtrT> length = SmiToIntPtr(Parameter(Descriptor::kLength));
TNode<IntPtrT> mapped_count =
SmiToIntPtr(Parameter(Descriptor::kMappedCount));
@@ -127,10 +127,8 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
}
BIND(&done_loop1);
- // Compute the effective {offset} into the {frame}.
- TNode<IntPtrT> offset = IntPtrAdd(length, IntPtrConstant(1));
-
// Copy the parameters from {frame} (starting at {offset}) to {result}.
+ CodeStubArguments args(this, length, frame);
Label loop2(this, &var_index), done_loop2(this);
Goto(&loop2);
BIND(&loop2);
@@ -142,9 +140,7 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
GotoIf(IntPtrEqual(index, length), &done_loop2);
// Load the parameter at the given {index}.
- TNode<Object> value = BitcastWordToTagged(
- Load(MachineType::Pointer(), frame,
- TimesSystemPointerSize(IntPtrSub(offset, index))));
+ TNode<Object> value = args.AtIndex(index);
// Store the {value} into the {result}.
StoreFixedArrayElement(result, index, value, SKIP_WRITE_BARRIER);
@@ -221,9 +217,9 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
TNode<BoolT> IsPageFlagSet(TNode<IntPtrT> object, int mask) {
TNode<IntPtrT> page = PageFromAddress(object);
- TNode<IntPtrT> flags =
- UncheckedCast<IntPtrT>(Load(MachineType::Pointer(), page,
- IntPtrConstant(MemoryChunk::kFlagsOffset)));
+ TNode<IntPtrT> flags = UncheckedCast<IntPtrT>(
+ Load(MachineType::Pointer(), page,
+ IntPtrConstant(BasicMemoryChunk::kFlagsOffset)));
return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)),
IntPtrConstant(0));
}
@@ -242,8 +238,8 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
void GetMarkBit(TNode<IntPtrT> object, TNode<IntPtrT>* cell,
TNode<IntPtrT>* mask) {
TNode<IntPtrT> page = PageFromAddress(object);
- TNode<IntPtrT> bitmap =
- Load<IntPtrT>(page, IntPtrConstant(MemoryChunk::kMarkBitmapOffset));
+ TNode<IntPtrT> bitmap = Load<IntPtrT>(
+ page, IntPtrConstant(BasicMemoryChunk::kMarkBitmapOffset));
{
// Temp variable to calculate cell offset in bitmap.
diff --git a/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc b/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc
index e613ae9c08c..1da6f54c820 100644
--- a/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -198,11 +198,18 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
const TNode<Object> thenable = LoadObjectField(
microtask, PromiseResolveThenableJobTask::kThenableOffset);
+ RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
+ CAST(promise_to_resolve));
+
{
ScopedExceptionHandler handler(this, &if_exception, &var_exception);
CallBuiltin(Builtins::kPromiseResolveThenableJob, native_context,
promise_to_resolve, thenable, then);
}
+
+ RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
+ CAST(promise_to_resolve));
+
RewindEnteredContext(saved_entered_context_count);
SetCurrentContext(current_context);
Goto(&done);
diff --git a/chromium/v8/src/builtins/builtins-number-gen.cc b/chromium/v8/src/builtins/builtins-number-gen.cc
index 4513d73abc7..4e8bcae60be 100644
--- a/chromium/v8/src/builtins/builtins-number-gen.cc
+++ b/chromium/v8/src/builtins/builtins-number-gen.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/codegen/code-stub-assembler.h"
#include "src/ic/binary-op-assembler.h"
+#include "src/ic/unary-op-assembler.h"
namespace v8 {
namespace internal {
@@ -13,983 +14,108 @@ namespace internal {
// -----------------------------------------------------------------------------
// ES6 section 20.1 Number Objects
-class NumberBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit NumberBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- protected:
- template <typename Descriptor>
- void EmitBitwiseOp(Operation op) {
- TNode<Object> left = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> right = CAST(Parameter(Descriptor::kRight));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- BinaryOpAssembler binop_asm(state());
- Return(binop_asm.Generate_BitwiseBinaryOp(op, left, right, context));
- }
-
- template <typename Descriptor>
- void RelationalComparisonBuiltin(Operation op) {
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- Return(RelationalComparison(op, lhs, rhs, context));
- }
-
- template <typename Descriptor>
- void UnaryOp(TVariable<Object>* var_input, Label* do_smi, Label* do_double,
- TVariable<Float64T>* var_input_double, Label* do_bigint);
-
- template <typename Descriptor>
- void BinaryOp(Label* smis, TVariable<Object>* var_left,
- TVariable<Object>* var_right, Label* doubles,
- TVariable<Float64T>* var_left_double,
- TVariable<Float64T>* var_right_double, Label* bigints);
-};
-
-// ES6 #sec-number.isfinite
-TF_BUILTIN(NumberIsFinite, CodeStubAssembler) {
- TNode<Object> number = CAST(Parameter(Descriptor::kNumber));
-
- Label return_true(this), return_false(this);
-
- // Check if {number} is a Smi.
- GotoIf(TaggedIsSmi(number), &return_true);
-
- // Check if {number} is a HeapNumber.
- TNode<HeapObject> number_heap_object = CAST(number);
- GotoIfNot(IsHeapNumber(number_heap_object), &return_false);
-
- // Check if {number} contains a finite, non-NaN value.
- TNode<Float64T> number_value = LoadHeapNumberValue(number_heap_object);
- BranchIfFloat64IsNaN(Float64Sub(number_value, number_value), &return_false,
- &return_true);
-
- BIND(&return_true);
- Return(TrueConstant());
-
- BIND(&return_false);
- Return(FalseConstant());
-}
-
-TF_BUILTIN(AllocateHeapNumber, CodeStubAssembler) {
- TNode<HeapNumber> result = AllocateHeapNumber();
- Return(result);
-}
-
-// ES6 #sec-number.isinteger
-TF_BUILTIN(NumberIsInteger, CodeStubAssembler) {
- TNode<Object> number = CAST(Parameter(Descriptor::kNumber));
- Return(SelectBooleanConstant(IsInteger(number)));
-}
-
-// ES6 #sec-number.isnan
-TF_BUILTIN(NumberIsNaN, CodeStubAssembler) {
- TNode<Object> number = CAST(Parameter(Descriptor::kNumber));
-
- Label return_true(this), return_false(this);
-
- // Check if {number} is a Smi.
- GotoIf(TaggedIsSmi(number), &return_false);
-
- // Check if {number} is a HeapNumber.
- TNode<HeapObject> number_heap_object = CAST(number);
- GotoIfNot(IsHeapNumber(number_heap_object), &return_false);
-
- // Check if {number} contains a NaN value.
- TNode<Float64T> number_value = LoadHeapNumberValue(number_heap_object);
- BranchIfFloat64IsNaN(number_value, &return_true, &return_false);
-
- BIND(&return_true);
- Return(TrueConstant());
-
- BIND(&return_false);
- Return(FalseConstant());
-}
-
-// ES6 #sec-number.issafeinteger
-TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) {
- TNode<Object> number = CAST(Parameter(Descriptor::kNumber));
- Return(SelectBooleanConstant(IsSafeInteger(number)));
-}
-
-// ES6 #sec-number.parsefloat
-TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- // We might need to loop once for ToString conversion.
- TVARIABLE(Object, var_input, CAST(Parameter(Descriptor::kString)));
- Label loop(this, &var_input);
- Goto(&loop);
- BIND(&loop);
- {
- // Load the current {input} value.
- TNode<Object> input = var_input.value();
-
- // Check if the {input} is a HeapObject or a Smi.
- Label if_inputissmi(this), if_inputisnotsmi(this);
- Branch(TaggedIsSmi(input), &if_inputissmi, &if_inputisnotsmi);
-
- BIND(&if_inputissmi);
- {
- // The {input} is already a Number, no need to do anything.
- Return(input);
- }
-
- BIND(&if_inputisnotsmi);
- {
- // The {input} is a HeapObject, check if it's already a String.
- TNode<HeapObject> input_heap_object = CAST(input);
- Label if_inputisstring(this), if_inputisnotstring(this);
- TNode<Map> input_map = LoadMap(input_heap_object);
- TNode<Uint16T> input_instance_type = LoadMapInstanceType(input_map);
- Branch(IsStringInstanceType(input_instance_type), &if_inputisstring,
- &if_inputisnotstring);
-
- BIND(&if_inputisstring);
- {
- // The {input} is already a String, check if {input} contains
- // a cached array index.
- Label if_inputcached(this), if_inputnotcached(this);
- TNode<Uint32T> input_hash = LoadNameHashField(CAST(input));
- Branch(IsClearWord32(input_hash,
- Name::kDoesNotContainCachedArrayIndexMask),
- &if_inputcached, &if_inputnotcached);
-
- BIND(&if_inputcached);
- {
- // Just return the {input}s cached array index.
- TNode<UintPtrT> input_array_index =
- DecodeWordFromWord32<String::ArrayIndexValueBits>(input_hash);
- Return(SmiTag(Signed(input_array_index)));
- }
-
- BIND(&if_inputnotcached);
- {
- // Need to fall back to the runtime to convert {input} to double.
- Return(CallRuntime(Runtime::kStringParseFloat, context, input));
- }
- }
-
- BIND(&if_inputisnotstring);
- {
- // The {input} is neither a String nor a Smi, check for HeapNumber.
- Label if_inputisnumber(this),
- if_inputisnotnumber(this, Label::kDeferred);
- Branch(IsHeapNumberMap(input_map), &if_inputisnumber,
- &if_inputisnotnumber);
-
- BIND(&if_inputisnumber);
- {
- // The {input} is already a Number, take care of -0.
- Label if_inputiszero(this), if_inputisnotzero(this);
- TNode<Float64T> input_value = LoadHeapNumberValue(input_heap_object);
- Branch(Float64Equal(input_value, Float64Constant(0.0)),
- &if_inputiszero, &if_inputisnotzero);
-
- BIND(&if_inputiszero);
- Return(SmiConstant(0));
-
- BIND(&if_inputisnotzero);
- Return(input);
- }
-
- BIND(&if_inputisnotnumber);
- {
- // Need to convert the {input} to String first.
- // TODO(bmeurer): This could be more efficient if necessary.
- var_input = CallBuiltin(Builtins::kToString, context, input);
- Goto(&loop);
- }
- }
- }
- }
-}
-
-// ES6 #sec-number.parseint
-TF_BUILTIN(ParseInt, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kString));
- TNode<Object> radix = CAST(Parameter(Descriptor::kRadix));
-
- // Check if {radix} is treated as 10 (i.e. undefined, 0 or 10).
- Label if_radix10(this), if_generic(this, Label::kDeferred);
- GotoIf(IsUndefined(radix), &if_radix10);
- GotoIf(TaggedEqual(radix, SmiConstant(10)), &if_radix10);
- GotoIf(TaggedEqual(radix, SmiConstant(0)), &if_radix10);
- Goto(&if_generic);
-
- BIND(&if_radix10);
- {
- // Check if we can avoid the ToString conversion on {input}.
- Label if_inputissmi(this), if_inputisheapnumber(this),
- if_inputisstring(this);
- GotoIf(TaggedIsSmi(input), &if_inputissmi);
- TNode<Map> input_map = LoadMap(CAST(input));
- GotoIf(IsHeapNumberMap(input_map), &if_inputisheapnumber);
- TNode<Uint16T> input_instance_type = LoadMapInstanceType(input_map);
- Branch(IsStringInstanceType(input_instance_type), &if_inputisstring,
- &if_generic);
-
- BIND(&if_inputissmi);
- {
- // Just return the {input}.
- Return(input);
- }
-
- BIND(&if_inputisheapnumber);
- {
- // Check if the {input} value is in Signed32 range.
- Label if_inputissigned32(this);
- TNode<Float64T> input_value = LoadHeapNumberValue(CAST(input));
- TNode<Int32T> input_value32 =
- Signed(TruncateFloat64ToWord32(input_value));
- GotoIf(Float64Equal(input_value, ChangeInt32ToFloat64(input_value32)),
- &if_inputissigned32);
-
- // Check if the absolute {input} value is in the [1,1<<31[ range.
- // Take the generic path for the range [0,1[ because the result
- // could be -0.
- TNode<Float64T> input_value_abs = Float64Abs(input_value);
-
- GotoIfNot(Float64LessThan(input_value_abs, Float64Constant(1u << 31)),
- &if_generic);
- Branch(Float64LessThanOrEqual(Float64Constant(1), input_value_abs),
- &if_inputissigned32, &if_generic);
-
- // Return the truncated int32 value, and return the tagged result.
- BIND(&if_inputissigned32);
- TNode<Number> result = ChangeInt32ToTagged(input_value32);
- Return(result);
- }
-
- BIND(&if_inputisstring);
- {
- // Check if the String {input} has a cached array index.
- TNode<Uint32T> input_hash = LoadNameHashField(CAST(input));
- GotoIf(IsSetWord32(input_hash, Name::kDoesNotContainCachedArrayIndexMask),
- &if_generic);
-
- // Return the cached array index as result.
- TNode<UintPtrT> input_index =
- DecodeWordFromWord32<String::ArrayIndexValueBits>(input_hash);
- TNode<Smi> result = SmiTag(Signed(input_index));
- Return(result);
- }
- }
-
- BIND(&if_generic);
- {
- TNode<Object> result =
- CallRuntime(Runtime::kStringParseInt, context, input, radix);
- Return(result);
- }
-}
-
-// ES6 #sec-number.parseint
-TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
+#define DEF_BINOP(Name, Generator) \
+ TF_BUILTIN(Name, CodeStubAssembler) { \
+ TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft)); \
+ TNode<Object> rhs = CAST(Parameter(Descriptor::kRight)); \
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext)); \
+ TNode<HeapObject> maybe_feedback_vector = \
+ CAST(Parameter(Descriptor::kMaybeFeedbackVector)); \
+ TNode<UintPtrT> slot = \
+ UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot)); \
+ \
+ BinaryOpAssembler binop_asm(state()); \
+ TNode<Object> result = binop_asm.Generator(context, lhs, rhs, slot, \
+ maybe_feedback_vector, false); \
+ \
+ Return(result); \
+ }
+DEF_BINOP(Add_WithFeedback, Generate_AddWithFeedback)
+DEF_BINOP(Subtract_WithFeedback, Generate_SubtractWithFeedback)
+DEF_BINOP(Multiply_WithFeedback, Generate_MultiplyWithFeedback)
+DEF_BINOP(Divide_WithFeedback, Generate_DivideWithFeedback)
+DEF_BINOP(Modulus_WithFeedback, Generate_ModulusWithFeedback)
+DEF_BINOP(Exponentiate_WithFeedback, Generate_ExponentiateWithFeedback)
+DEF_BINOP(BitwiseOr_WithFeedback, Generate_BitwiseOrWithFeedback)
+DEF_BINOP(BitwiseXor_WithFeedback, Generate_BitwiseXorWithFeedback)
+DEF_BINOP(BitwiseAnd_WithFeedback, Generate_BitwiseAndWithFeedback)
+DEF_BINOP(ShiftLeft_WithFeedback, Generate_ShiftLeftWithFeedback)
+DEF_BINOP(ShiftRight_WithFeedback, Generate_ShiftRightWithFeedback)
+DEF_BINOP(ShiftRightLogical_WithFeedback,
+ Generate_ShiftRightLogicalWithFeedback)
+#undef DEF_BINOP
+
+#define DEF_UNOP(Name, Generator) \
+ TF_BUILTIN(Name, CodeStubAssembler) { \
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue)); \
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext)); \
+ TNode<HeapObject> maybe_feedback_vector = \
+ CAST(Parameter(Descriptor::kMaybeFeedbackVector)); \
+ TNode<UintPtrT> slot = \
+ UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot)); \
+ \
+ UnaryOpAssembler a(state()); \
+ TNode<Object> result = \
+ a.Generator(context, value, slot, maybe_feedback_vector); \
+ \
+ Return(result); \
+ }
+DEF_UNOP(BitwiseNot_WithFeedback, Generate_BitwiseNotWithFeedback)
+DEF_UNOP(Decrement_WithFeedback, Generate_DecrementWithFeedback)
+DEF_UNOP(Increment_WithFeedback, Generate_IncrementWithFeedback)
+DEF_UNOP(Negate_WithFeedback, Generate_NegateWithFeedback)
+#undef DEF_UNOP
+
+#define DEF_COMPARE(Name) \
+ TF_BUILTIN(Name##_WithFeedback, CodeStubAssembler) { \
+ TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft)); \
+ TNode<Object> rhs = CAST(Parameter(Descriptor::kRight)); \
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext)); \
+ TNode<HeapObject> maybe_feedback_vector = \
+ CAST(Parameter(Descriptor::kMaybeFeedbackVector)); \
+ TNode<UintPtrT> slot = \
+ UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot)); \
+ \
+ TVARIABLE(Smi, var_type_feedback); \
+ TNode<Oddball> result = RelationalComparison(Operation::k##Name, lhs, rhs, \
+ context, &var_type_feedback); \
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot); \
+ \
+ Return(result); \
+ }
+DEF_COMPARE(LessThan)
+DEF_COMPARE(LessThanOrEqual)
+DEF_COMPARE(GreaterThan)
+DEF_COMPARE(GreaterThanOrEqual)
+#undef DEF_COMPARE
+
+TF_BUILTIN(Equal_WithFeedback, CodeStubAssembler) {
+ TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
+ TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> input = CAST(Parameter(Descriptor::kString));
- TNode<Object> radix = CAST(Parameter(Descriptor::kRadix));
- Return(CallBuiltin(Builtins::kParseInt, context, input, radix));
-}
+ TNode<HeapObject> maybe_feedback_vector =
+ CAST(Parameter(Descriptor::kMaybeFeedbackVector));
+ TNode<UintPtrT> slot = UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot));
-// ES6 #sec-number.prototype.valueof
-TF_BUILTIN(NumberPrototypeValueOf, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TVARIABLE(Smi, var_type_feedback);
+ TNode<Oddball> result = Equal(lhs, rhs, context, &var_type_feedback);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot);
- TNode<Object> result = ToThisValue(context, receiver, PrimitiveType::kNumber,
- "Number.prototype.valueOf");
Return(result);
}
-class AddStubAssembler : public CodeStubAssembler {
- public:
- explicit AddStubAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
-
- protected:
- TNode<Object> ConvertReceiver(TNode<JSReceiver> js_receiver,
- TNode<Context> context) {
- // Call ToPrimitive explicitly without hint (whereas ToNumber
- // would pass a "number" hint).
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
- return CallStub(callable, context, js_receiver);
- }
-
- void ConvertNonReceiverAndLoop(TVariable<Object>* var_value, Label* loop,
- TNode<Context> context) {
- *var_value =
- CallBuiltin(Builtins::kNonNumberToNumeric, context, var_value->value());
- Goto(loop);
- }
-
- void ConvertAndLoop(TVariable<Object>* var_value,
- TNode<Uint16T> instance_type, Label* loop,
- TNode<Context> context) {
- Label is_not_receiver(this, Label::kDeferred);
- GotoIfNot(IsJSReceiverInstanceType(instance_type), &is_not_receiver);
-
- *var_value = ConvertReceiver(CAST(var_value->value()), context);
- Goto(loop);
-
- BIND(&is_not_receiver);
- ConvertNonReceiverAndLoop(var_value, loop, context);
- }
-};
-
-TF_BUILTIN(Add, AddStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TVARIABLE(Object, var_left, CAST(Parameter(Descriptor::kLeft)));
- TVARIABLE(Object, var_right, CAST(Parameter(Descriptor::kRight)));
-
- // Shared entry for floating point addition.
- Label do_double_add(this);
- TVARIABLE(Float64T, var_left_double);
- TVARIABLE(Float64T, var_right_double);
-
- // We might need to loop several times due to ToPrimitive, ToString and/or
- // ToNumeric conversions.
- Label loop(this, {&var_left, &var_right}),
- string_add_convert_left(this, Label::kDeferred),
- string_add_convert_right(this, Label::kDeferred),
- do_bigint_add(this, Label::kDeferred);
- Goto(&loop);
- BIND(&loop);
- {
- TNode<Object> left = var_left.value();
- TNode<Object> right = var_right.value();
-
- Label if_left_smi(this), if_left_heapobject(this);
- Branch(TaggedIsSmi(left), &if_left_smi, &if_left_heapobject);
-
- BIND(&if_left_smi);
- {
- Label if_right_smi(this), if_right_heapobject(this);
- Branch(TaggedIsSmi(right), &if_right_smi, &if_right_heapobject);
-
- BIND(&if_right_smi);
- {
- Label if_overflow(this);
- TNode<Smi> left_smi = CAST(left);
- TNode<Smi> right_smi = CAST(right);
- TNode<Smi> result = TrySmiAdd(left_smi, right_smi, &if_overflow);
- Return(result);
-
- BIND(&if_overflow);
- {
- var_left_double = SmiToFloat64(left_smi);
- var_right_double = SmiToFloat64(right_smi);
- Goto(&do_double_add);
- }
- } // if_right_smi
-
- BIND(&if_right_heapobject);
- {
- TNode<HeapObject> right_heap_object = CAST(right);
- TNode<Map> right_map = LoadMap(right_heap_object);
-
- Label if_right_not_number(this, Label::kDeferred);
- GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number);
-
- // {right} is a HeapNumber.
- var_left_double = SmiToFloat64(CAST(left));
- var_right_double = LoadHeapNumberValue(right_heap_object);
- Goto(&do_double_add);
-
- BIND(&if_right_not_number);
- {
- TNode<Uint16T> right_instance_type = LoadMapInstanceType(right_map);
- GotoIf(IsStringInstanceType(right_instance_type),
- &string_add_convert_left);
- GotoIf(IsBigIntInstanceType(right_instance_type), &do_bigint_add);
- ConvertAndLoop(&var_right, right_instance_type, &loop, context);
- }
- } // if_right_heapobject
- } // if_left_smi
-
- BIND(&if_left_heapobject);
- {
- TNode<HeapObject> left_heap_object = CAST(left);
- TNode<Map> left_map = LoadMap(left_heap_object);
- Label if_right_smi(this), if_right_heapobject(this);
- Branch(TaggedIsSmi(right), &if_right_smi, &if_right_heapobject);
-
- BIND(&if_right_smi);
- {
- Label if_left_not_number(this, Label::kDeferred);
- GotoIfNot(IsHeapNumberMap(left_map), &if_left_not_number);
-
- // {left} is a HeapNumber, {right} is a Smi.
- var_left_double = LoadHeapNumberValue(left_heap_object);
- var_right_double = SmiToFloat64(CAST(right));
- Goto(&do_double_add);
-
- BIND(&if_left_not_number);
- {
- TNode<Uint16T> left_instance_type = LoadMapInstanceType(left_map);
- GotoIf(IsStringInstanceType(left_instance_type),
- &string_add_convert_right);
- GotoIf(IsBigIntInstanceType(left_instance_type), &do_bigint_add);
- // {left} is neither a Numeric nor a String, and {right} is a Smi.
- ConvertAndLoop(&var_left, left_instance_type, &loop, context);
- }
- } // if_right_smi
-
- BIND(&if_right_heapobject);
- {
- TNode<HeapObject> right_heap_object = CAST(right);
- TNode<Map> right_map = LoadMap(right_heap_object);
-
- Label if_left_number(this), if_left_not_number(this, Label::kDeferred);
- Branch(IsHeapNumberMap(left_map), &if_left_number, &if_left_not_number);
-
- BIND(&if_left_number);
- {
- Label if_right_not_number(this, Label::kDeferred);
- GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number);
-
- // Both {left} and {right} are HeapNumbers.
- var_left_double = LoadHeapNumberValue(CAST(left));
- var_right_double = LoadHeapNumberValue(right_heap_object);
- Goto(&do_double_add);
-
- BIND(&if_right_not_number);
- {
- TNode<Uint16T> right_instance_type = LoadMapInstanceType(right_map);
- GotoIf(IsStringInstanceType(right_instance_type),
- &string_add_convert_left);
- GotoIf(IsBigIntInstanceType(right_instance_type), &do_bigint_add);
- // {left} is a HeapNumber, {right} is neither Number nor String.
- ConvertAndLoop(&var_right, right_instance_type, &loop, context);
- }
- } // if_left_number
-
- BIND(&if_left_not_number);
- {
- Label if_left_bigint(this);
- TNode<Uint16T> left_instance_type = LoadMapInstanceType(left_map);
- GotoIf(IsStringInstanceType(left_instance_type),
- &string_add_convert_right);
- TNode<Uint16T> right_instance_type = LoadMapInstanceType(right_map);
- GotoIf(IsStringInstanceType(right_instance_type),
- &string_add_convert_left);
- GotoIf(IsBigIntInstanceType(left_instance_type), &if_left_bigint);
- Label if_left_not_receiver(this, Label::kDeferred);
- Label if_right_not_receiver(this, Label::kDeferred);
- GotoIfNot(IsJSReceiverInstanceType(left_instance_type),
- &if_left_not_receiver);
- // {left} is a JSReceiver, convert it first.
- var_left = ConvertReceiver(CAST(var_left.value()), context);
- Goto(&loop);
-
- BIND(&if_left_bigint);
- {
- // {right} is a HeapObject, but not a String. Jump to
- // {do_bigint_add} if {right} is already a Numeric.
- GotoIf(IsBigIntInstanceType(right_instance_type), &do_bigint_add);
- GotoIf(IsHeapNumberMap(right_map), &do_bigint_add);
- ConvertAndLoop(&var_right, right_instance_type, &loop, context);
- }
-
- BIND(&if_left_not_receiver);
- GotoIfNot(IsJSReceiverInstanceType(right_instance_type),
- &if_right_not_receiver);
- // {left} is a Primitive, but {right} is a JSReceiver, so convert
- // {right} with priority.
- var_right = ConvertReceiver(CAST(var_right.value()), context);
- Goto(&loop);
-
- BIND(&if_right_not_receiver);
- // Neither {left} nor {right} are JSReceivers.
- ConvertNonReceiverAndLoop(&var_left, &loop, context);
- }
- } // if_right_heapobject
- } // if_left_heapobject
- }
- BIND(&string_add_convert_left);
- {
- // Convert {left} to a String and concatenate it with the String {right}.
- TailCallBuiltin(Builtins::kStringAddConvertLeft, context, var_left.value(),
- var_right.value());
- }
-
- BIND(&string_add_convert_right);
- {
- // Convert {right} to a String and concatenate it with the String {left}.
- TailCallBuiltin(Builtins::kStringAddConvertRight, context, var_left.value(),
- var_right.value());
- }
-
- BIND(&do_bigint_add);
- {
- TailCallBuiltin(Builtins::kBigIntAdd, context, var_left.value(),
- var_right.value());
- }
-
- BIND(&do_double_add);
- {
- TNode<Float64T> value =
- Float64Add(var_left_double.value(), var_right_double.value());
- Return(AllocateHeapNumberWithValue(value));
- }
-}
-
-template <typename Descriptor>
-void NumberBuiltinsAssembler::UnaryOp(TVariable<Object>* var_input,
- Label* do_smi, Label* do_double,
- TVariable<Float64T>* var_input_double,
- Label* do_bigint) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- *var_input = CAST(Parameter(Descriptor::kValue));
-
- // We might need to loop for ToNumeric conversion.
- Label loop(this, {var_input});
- Goto(&loop);
- BIND(&loop);
- TNode<Object> input = var_input->value();
-
- Label not_number(this);
- GotoIf(TaggedIsSmi(input), do_smi);
- TNode<HeapObject> input_heap_object = CAST(input);
- GotoIfNot(IsHeapNumber(input_heap_object), &not_number);
- if (var_input_double != nullptr) {
- *var_input_double = LoadHeapNumberValue(input_heap_object);
- }
- Goto(do_double);
-
- BIND(&not_number);
- GotoIf(IsBigInt(input_heap_object), do_bigint);
- *var_input = CallBuiltin(Builtins::kNonNumberToNumeric, context, input);
- Goto(&loop);
-}
-
-template <typename Descriptor>
-void NumberBuiltinsAssembler::BinaryOp(Label* smis, TVariable<Object>* var_left,
- TVariable<Object>* var_right,
- Label* doubles,
- TVariable<Float64T>* var_left_double,
- TVariable<Float64T>* var_right_double,
- Label* bigints) {
- DCHECK_EQ(var_left_double == nullptr, var_right_double == nullptr);
-
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- *var_left = CAST(Parameter(Descriptor::kLeft));
- *var_right = CAST(Parameter(Descriptor::kRight));
-
- // We might need to loop for ToNumeric conversions.
- Label loop(this, {var_left, var_right});
- Goto(&loop);
- BIND(&loop);
-
- Label left_not_smi(this), right_not_smi(this);
- Label left_not_number(this), right_not_number(this);
- GotoIfNot(TaggedIsSmi(var_left->value()), &left_not_smi);
- GotoIf(TaggedIsSmi(var_right->value()), smis);
-
- // At this point, var_left is a Smi but var_right is not.
- TNode<Smi> var_left_smi = CAST(var_left->value());
- TNode<HeapObject> var_right_heap_object = CAST(var_right->value());
- GotoIfNot(IsHeapNumber(var_right_heap_object), &right_not_number);
- if (var_left_double != nullptr) {
- *var_left_double = SmiToFloat64(var_left_smi);
- *var_right_double = LoadHeapNumberValue(var_right_heap_object);
- }
- Goto(doubles);
-
- BIND(&left_not_smi);
- {
- TNode<HeapObject> var_left_heap_object = CAST(var_left->value());
- GotoIfNot(IsHeapNumber(var_left_heap_object), &left_not_number);
- GotoIfNot(TaggedIsSmi(var_right->value()), &right_not_smi);
-
- // At this point, var_left is a HeapNumber and var_right is a Smi.
- if (var_left_double != nullptr) {
- *var_left_double = LoadHeapNumberValue(var_left_heap_object);
- *var_right_double = SmiToFloat64(CAST(var_right->value()));
- }
- Goto(doubles);
- }
-
- BIND(&right_not_smi);
- {
- TNode<HeapObject> var_right_heap_object = CAST(var_right->value());
- GotoIfNot(IsHeapNumber(var_right_heap_object), &right_not_number);
- if (var_left_double != nullptr) {
- *var_left_double = LoadHeapNumberValue(CAST(var_left->value()));
- *var_right_double = LoadHeapNumberValue(var_right_heap_object);
- }
- Goto(doubles);
- }
-
- BIND(&left_not_number);
- {
- Label left_bigint(this);
- GotoIf(IsBigInt(CAST(var_left->value())), &left_bigint);
- *var_left =
- CallBuiltin(Builtins::kNonNumberToNumeric, context, var_left->value());
- Goto(&loop);
-
- BIND(&left_bigint);
- {
- // Jump to {bigints} if {var_right} is already a Numeric.
- GotoIf(TaggedIsSmi(var_right->value()), bigints);
- TNode<HeapObject> var_right_heap_object = CAST(var_right->value());
- GotoIf(IsBigInt(var_right_heap_object), bigints);
- GotoIf(IsHeapNumber(var_right_heap_object), bigints);
- *var_right = CallBuiltin(Builtins::kNonNumberToNumeric, context,
- var_right->value());
- Goto(&loop);
- }
- }
-
- BIND(&right_not_number);
- {
- GotoIf(IsBigInt(CAST(var_right->value())), bigints);
- *var_right =
- CallBuiltin(Builtins::kNonNumberToNumeric, context, var_right->value());
- Goto(&loop);
- }
-}
-
-TF_BUILTIN(Subtract, NumberBuiltinsAssembler) {
- TVARIABLE(Object, var_left);
- TVARIABLE(Object, var_right);
- TVARIABLE(Float64T, var_left_double);
- TVARIABLE(Float64T, var_right_double);
- Label do_smi_sub(this), do_double_sub(this), do_bigint_sub(this);
-
- BinaryOp<Descriptor>(&do_smi_sub, &var_left, &var_right, &do_double_sub,
- &var_left_double, &var_right_double, &do_bigint_sub);
-
- BIND(&do_smi_sub);
- {
- Label if_overflow(this);
- TNode<Smi> var_left_smi = CAST(var_left.value());
- TNode<Smi> var_right_smi = CAST(var_right.value());
- TNode<Smi> result = TrySmiSub(var_left_smi, var_right_smi, &if_overflow);
- Return(result);
-
- BIND(&if_overflow);
- {
- var_left_double = SmiToFloat64(var_left_smi);
- var_right_double = SmiToFloat64(var_right_smi);
- Goto(&do_double_sub);
- }
- }
-
- BIND(&do_double_sub);
- {
- TNode<Float64T> value =
- Float64Sub(var_left_double.value(), var_right_double.value());
- Return(AllocateHeapNumberWithValue(value));
- }
-
- BIND(&do_bigint_sub);
- {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TailCallBuiltin(Builtins::kBigIntSubtract, context, var_left.value(),
- var_right.value());
- }
-}
-
-TF_BUILTIN(BitwiseNot, NumberBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TVARIABLE(Object, var_input);
- Label do_number(this), do_bigint(this);
-
- UnaryOp<Descriptor>(&var_input, &do_number, &do_number, nullptr, &do_bigint);
-
- BIND(&do_number);
- {
- TailCallBuiltin(Builtins::kBitwiseXor, context, var_input.value(),
- SmiConstant(-1));
- }
-
- BIND(&do_bigint);
- {
- Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(),
- SmiConstant(Operation::kBitwiseNot)));
- }
-}
-
-TF_BUILTIN(Decrement, NumberBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TVARIABLE(Object, var_input);
- Label do_number(this), do_bigint(this);
-
- UnaryOp<Descriptor>(&var_input, &do_number, &do_number, nullptr, &do_bigint);
-
- BIND(&do_number);
- {
- TailCallBuiltin(Builtins::kSubtract, context, var_input.value(),
- SmiConstant(1));
- }
-
- BIND(&do_bigint);
- {
- Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(),
- SmiConstant(Operation::kDecrement)));
- }
-}
-
-TF_BUILTIN(Increment, NumberBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TVARIABLE(Object, var_input);
- Label do_number(this), do_bigint(this);
-
- UnaryOp<Descriptor>(&var_input, &do_number, &do_number, nullptr, &do_bigint);
-
- BIND(&do_number);
- {
- TailCallBuiltin(Builtins::kAdd, context, var_input.value(), SmiConstant(1));
- }
-
- BIND(&do_bigint);
- {
- Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(),
- SmiConstant(Operation::kIncrement)));
- }
-}
-
-TF_BUILTIN(Negate, NumberBuiltinsAssembler) {
- TVARIABLE(Object, var_input);
- TVARIABLE(Float64T, var_input_double);
- Label do_smi(this), do_double(this), do_bigint(this);
-
- UnaryOp<Descriptor>(&var_input, &do_smi, &do_double, &var_input_double,
- &do_bigint);
-
- BIND(&do_smi);
- { Return(SmiMul(CAST(var_input.value()), SmiConstant(-1))); }
-
- BIND(&do_double);
- {
- TNode<Float64T> value =
- Float64Mul(var_input_double.value(), Float64Constant(-1));
- Return(AllocateHeapNumberWithValue(value));
- }
-
- BIND(&do_bigint);
- {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(),
- SmiConstant(Operation::kNegate)));
- }
-}
-
-TF_BUILTIN(Multiply, NumberBuiltinsAssembler) {
- TVARIABLE(Object, var_left);
- TVARIABLE(Object, var_right);
- TVARIABLE(Float64T, var_left_double);
- TVARIABLE(Float64T, var_right_double);
- Label do_smi_mul(this), do_double_mul(this), do_bigint_mul(this);
-
- BinaryOp<Descriptor>(&do_smi_mul, &var_left, &var_right, &do_double_mul,
- &var_left_double, &var_right_double, &do_bigint_mul);
-
- BIND(&do_smi_mul);
- // The result is not necessarily a smi, in case of overflow.
- Return(SmiMul(CAST(var_left.value()), CAST(var_right.value())));
-
- BIND(&do_double_mul);
- TNode<Float64T> value =
- Float64Mul(var_left_double.value(), var_right_double.value());
- Return(AllocateHeapNumberWithValue(value));
-
- BIND(&do_bigint_mul);
- {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
- var_right.value(), SmiConstant(Operation::kMultiply)));
- }
-}
-
-TF_BUILTIN(Divide, NumberBuiltinsAssembler) {
- TVARIABLE(Object, var_left);
- TVARIABLE(Object, var_right);
- TVARIABLE(Float64T, var_left_double);
- TVARIABLE(Float64T, var_right_double);
- Label do_smi_div(this), do_double_div(this), do_bigint_div(this);
-
- BinaryOp<Descriptor>(&do_smi_div, &var_left, &var_right, &do_double_div,
- &var_left_double, &var_right_double, &do_bigint_div);
-
- BIND(&do_smi_div);
- {
- // TODO(jkummerow): Consider just always doing a double division.
- Label bailout(this);
- TNode<Smi> dividend = CAST(var_left.value());
- TNode<Smi> divisor = CAST(var_right.value());
-
- // Do floating point division if {divisor} is zero.
- GotoIf(SmiEqual(divisor, SmiConstant(0)), &bailout);
-
- // Do floating point division if {dividend} is zero and {divisor} is
- // negative.
- Label dividend_is_zero(this), dividend_is_not_zero(this);
- Branch(SmiEqual(dividend, SmiConstant(0)), &dividend_is_zero,
- &dividend_is_not_zero);
-
- BIND(&dividend_is_zero);
- {
- GotoIf(SmiLessThan(divisor, SmiConstant(0)), &bailout);
- Goto(&dividend_is_not_zero);
- }
- BIND(&dividend_is_not_zero);
-
- TNode<Int32T> untagged_divisor = SmiToInt32(divisor);
- TNode<Int32T> untagged_dividend = SmiToInt32(dividend);
-
- // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
- // if the Smi size is 31) and {divisor} is -1.
- Label divisor_is_minus_one(this), divisor_is_not_minus_one(this);
- Branch(Word32Equal(untagged_divisor, Int32Constant(-1)),
- &divisor_is_minus_one, &divisor_is_not_minus_one);
-
- BIND(&divisor_is_minus_one);
- {
- GotoIf(Word32Equal(
- untagged_dividend,
- Int32Constant(kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
- &bailout);
- Goto(&divisor_is_not_minus_one);
- }
- BIND(&divisor_is_not_minus_one);
-
- // TODO(epertoso): consider adding a machine instruction that returns
- // both the result and the remainder.
- TNode<Int32T> untagged_result =
- Int32Div(untagged_dividend, untagged_divisor);
- TNode<Int32T> truncated = Int32Mul(untagged_result, untagged_divisor);
- // Do floating point division if the remainder is not 0.
- GotoIf(Word32NotEqual(untagged_dividend, truncated), &bailout);
- Return(SmiFromInt32(untagged_result));
-
- // Bailout: convert {dividend} and {divisor} to double and do double
- // division.
- BIND(&bailout);
- {
- var_left_double = SmiToFloat64(dividend);
- var_right_double = SmiToFloat64(divisor);
- Goto(&do_double_div);
- }
- }
-
- BIND(&do_double_div);
- {
- TNode<Float64T> value =
- Float64Div(var_left_double.value(), var_right_double.value());
- Return(AllocateHeapNumberWithValue(value));
- }
-
- BIND(&do_bigint_div);
- {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
- var_right.value(), SmiConstant(Operation::kDivide)));
- }
-}
-
-TF_BUILTIN(Modulus, NumberBuiltinsAssembler) {
- TVARIABLE(Object, var_left);
- TVARIABLE(Object, var_right);
- TVARIABLE(Float64T, var_left_double);
- TVARIABLE(Float64T, var_right_double);
- Label do_smi_mod(this), do_double_mod(this), do_bigint_mod(this);
-
- BinaryOp<Descriptor>(&do_smi_mod, &var_left, &var_right, &do_double_mod,
- &var_left_double, &var_right_double, &do_bigint_mod);
-
- BIND(&do_smi_mod);
- Return(SmiMod(CAST(var_left.value()), CAST(var_right.value())));
-
- BIND(&do_double_mod);
- TNode<Float64T> value =
- Float64Mod(var_left_double.value(), var_right_double.value());
- Return(AllocateHeapNumberWithValue(value));
-
- BIND(&do_bigint_mod);
- {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
- var_right.value(), SmiConstant(Operation::kModulus)));
- }
-}
-
-TF_BUILTIN(Exponentiate, NumberBuiltinsAssembler) {
- TVARIABLE(Object, var_left);
- TVARIABLE(Object, var_right);
- Label do_number_exp(this), do_bigint_exp(this);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- BinaryOp<Descriptor>(&do_number_exp, &var_left, &var_right, &do_number_exp,
- nullptr, nullptr, &do_bigint_exp);
-
- BIND(&do_number_exp);
- { Return(MathPowImpl(context, var_left.value(), var_right.value())); }
-
- BIND(&do_bigint_exp);
- Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
- var_right.value(), SmiConstant(Operation::kExponentiate)));
-}
-
-TF_BUILTIN(ShiftLeft, NumberBuiltinsAssembler) {
- EmitBitwiseOp<Descriptor>(Operation::kShiftLeft);
-}
-
-TF_BUILTIN(ShiftRight, NumberBuiltinsAssembler) {
- EmitBitwiseOp<Descriptor>(Operation::kShiftRight);
-}
-
-TF_BUILTIN(ShiftRightLogical, NumberBuiltinsAssembler) {
- EmitBitwiseOp<Descriptor>(Operation::kShiftRightLogical);
-}
-
-TF_BUILTIN(BitwiseAnd, NumberBuiltinsAssembler) {
- EmitBitwiseOp<Descriptor>(Operation::kBitwiseAnd);
-}
-
-TF_BUILTIN(BitwiseOr, NumberBuiltinsAssembler) {
- EmitBitwiseOp<Descriptor>(Operation::kBitwiseOr);
-}
-
-TF_BUILTIN(BitwiseXor, NumberBuiltinsAssembler) {
- EmitBitwiseOp<Descriptor>(Operation::kBitwiseXor);
-}
-
-TF_BUILTIN(LessThan, NumberBuiltinsAssembler) {
- RelationalComparisonBuiltin<Descriptor>(Operation::kLessThan);
-}
-
-TF_BUILTIN(LessThanOrEqual, NumberBuiltinsAssembler) {
- RelationalComparisonBuiltin<Descriptor>(Operation::kLessThanOrEqual);
-}
-
-TF_BUILTIN(GreaterThan, NumberBuiltinsAssembler) {
- RelationalComparisonBuiltin<Descriptor>(Operation::kGreaterThan);
-}
-
-TF_BUILTIN(GreaterThanOrEqual, NumberBuiltinsAssembler) {
- RelationalComparisonBuiltin<Descriptor>(Operation::kGreaterThanOrEqual);
-}
-
-TF_BUILTIN(Equal, CodeStubAssembler) {
+TF_BUILTIN(StrictEqual_WithFeedback, CodeStubAssembler) {
TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<HeapObject> maybe_feedback_vector =
+ CAST(Parameter(Descriptor::kMaybeFeedbackVector));
+ TNode<UintPtrT> slot = UncheckedCast<UintPtrT>(Parameter(Descriptor::kSlot));
- Return(Equal(lhs, rhs, context));
-}
+ TVARIABLE(Smi, var_type_feedback);
+ TNode<Oddball> result = StrictEqual(lhs, rhs, &var_type_feedback);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot);
-TF_BUILTIN(StrictEqual, CodeStubAssembler) {
- TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
- TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
-
- Return(StrictEqual(lhs, rhs));
+ Return(result);
}
} // namespace internal
diff --git a/chromium/v8/src/builtins/builtins-object-gen.cc b/chromium/v8/src/builtins/builtins-object-gen.cc
index 06045495581..43a0a6953e8 100644
--- a/chromium/v8/src/builtins/builtins-object-gen.cc
+++ b/chromium/v8/src/builtins/builtins-object-gen.cc
@@ -314,7 +314,8 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
TNode<JSArray> array;
TNode<FixedArrayBase> elements;
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- PACKED_ELEMENTS, array_map, SmiConstant(2), {}, IntPtrConstant(2));
+ PACKED_ELEMENTS, array_map, SmiConstant(2), base::nullopt,
+ IntPtrConstant(2));
StoreFixedArrayElement(CAST(elements), 0, next_key, SKIP_WRITE_BARRIER);
StoreFixedArrayElement(CAST(elements), 1, value, SKIP_WRITE_BARRIER);
value = TNode<JSArray>::UncheckedCast(array);
@@ -499,7 +500,7 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context);
TNode<Smi> array_length = SmiTag(Signed(object_enum_length));
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- PACKED_ELEMENTS, array_map, array_length, {},
+ PACKED_ELEMENTS, array_map, array_length, base::nullopt,
Signed(object_enum_length));
CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_keys, elements,
object_enum_length, SKIP_WRITE_BARRIER);
@@ -595,7 +596,7 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
TNode<JSArray> array;
TNode<FixedArrayBase> elements;
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- PACKED_ELEMENTS, array_map, array_length, {},
+ PACKED_ELEMENTS, array_map, array_length, base::nullopt,
Signed(object_enum_length));
CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_keys, elements,
object_enum_length, SKIP_WRITE_BARRIER);
diff --git a/chromium/v8/src/builtins/builtins-promise.h b/chromium/v8/src/builtins/builtins-promise.h
index fd938ff8418..a775ea20411 100644
--- a/chromium/v8/src/builtins/builtins-promise.h
+++ b/chromium/v8/src/builtins/builtins-promise.h
@@ -35,7 +35,7 @@ class PromiseBuiltins {
kPromiseAllResolveElementCapabilitySlot,
// Values array from Promise.all
- kPromiseAllResolveElementValuesArraySlot,
+ kPromiseAllResolveElementValuesSlot,
kPromiseAllResolveElementLength
};
@@ -48,7 +48,7 @@ class PromiseBuiltins {
kPromiseAnyRejectElementCapabilitySlot,
// errors array from Promise.any
- kPromiseAnyRejectElementErrorsArraySlot,
+ kPromiseAnyRejectElementErrorsSlot,
kPromiseAnyRejectElementLength
};
diff --git a/chromium/v8/src/builtins/builtins-regexp-gen.cc b/chromium/v8/src/builtins/builtins-regexp-gen.cc
index b9c1b8980ea..584111cc760 100644
--- a/chromium/v8/src/builtins/builtins-regexp-gen.cc
+++ b/chromium/v8/src/builtins/builtins-regexp-gen.cc
@@ -89,7 +89,7 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
const ElementsKind elements_kind = PACKED_ELEMENTS;
TNode<Map> map = CAST(LoadContextElement(LoadNativeContext(context),
Context::REGEXP_RESULT_MAP_INDEX));
- TNode<AllocationSite> no_allocation_site = {};
+ base::Optional<TNode<AllocationSite>> no_allocation_site = base::nullopt;
TNode<IntPtrT> length_intptr = SmiUntag(length);
// Note: The returned `elements` may be in young large object space, but
@@ -1354,9 +1354,7 @@ TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
const TNode<IntPtrT> int_limit = SmiUntag(limit);
const ElementsKind kind = PACKED_ELEMENTS;
- const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
- TNode<AllocationSite> allocation_site = {};
const TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
@@ -1396,6 +1394,7 @@ TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
{
TNode<Smi> length = SmiConstant(1);
TNode<IntPtrT> capacity = IntPtrConstant(1);
+ base::Optional<TNode<AllocationSite>> allocation_site = base::nullopt;
var_result =
AllocateJSArray(kind, array_map, capacity, length, allocation_site);
@@ -1508,10 +1507,10 @@ TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
const TNode<IntPtrT> reg = var_reg.value();
const TNode<Object> from = LoadFixedArrayElement(
match_indices, reg,
- RegExpMatchInfo::kFirstCaptureIndex * kTaggedSize, mode);
+ RegExpMatchInfo::kFirstCaptureIndex * kTaggedSize);
const TNode<Smi> to = CAST(LoadFixedArrayElement(
match_indices, reg,
- (RegExpMatchInfo::kFirstCaptureIndex + 1) * kTaggedSize, mode));
+ (RegExpMatchInfo::kFirstCaptureIndex + 1) * kTaggedSize));
Label select_capture(this), select_undefined(this), store_value(this);
TVARIABLE(Object, var_value);
@@ -1570,6 +1569,7 @@ TNode<JSArray> RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(
{
TNode<Smi> length = SmiZero();
TNode<IntPtrT> capacity = IntPtrZero();
+ base::Optional<TNode<AllocationSite>> allocation_site = base::nullopt;
var_result =
AllocateJSArray(kind, array_map, capacity, length, allocation_site);
Goto(&done);
diff --git a/chromium/v8/src/builtins/builtins-string-gen.cc b/chromium/v8/src/builtins/builtins-string-gen.cc
index 7ccb99792ed..9920369136a 100644
--- a/chromium/v8/src/builtins/builtins-string-gen.cc
+++ b/chromium/v8/src/builtins/builtins-string-gen.cc
@@ -1164,10 +1164,11 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
DescriptorIndexNameValue additional_property_to_check,
const NodeFunction0& regexp_call, const NodeFunction1& generic_call) {
Label out(this);
+ Label get_property_lookup(this);
- // Smis definitely don't have an attached symbol.
- GotoIf(TaggedIsSmi(object), &out);
- TNode<HeapObject> heap_object = CAST(object);
+ // Smis have to go through the GetProperty lookup in case Number.prototype or
+ // Object.prototype was modified.
+ GotoIf(TaggedIsSmi(object), &get_property_lookup);
// Take the fast path for RegExps.
// There's two conditions: {object} needs to be a fast regexp, and
@@ -1176,6 +1177,8 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
{
Label stub_call(this), slow_lookup(this);
+ TNode<HeapObject> heap_object = CAST(object);
+
GotoIf(TaggedIsSmi(maybe_string), &slow_lookup);
GotoIfNot(IsString(CAST(maybe_string)), &slow_lookup);
@@ -1196,10 +1199,10 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
regexp_call();
BIND(&slow_lookup);
+ // Special case null and undefined to skip the property lookup.
+ Branch(IsNullOrUndefined(heap_object), &out, &get_property_lookup);
}
- GotoIf(IsNullOrUndefined(heap_object), &out);
-
// Fall back to a slow lookup of {heap_object[symbol]}.
//
// The spec uses GetMethod({heap_object}, {symbol}), which has a few quirks:
@@ -1208,7 +1211,8 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
// We handle the former by jumping to {out} for null values as well, while
// the latter is already handled by the Call({maybe_func}) operation.
- const TNode<Object> maybe_func = GetProperty(context, heap_object, symbol);
+ BIND(&get_property_lookup);
+ const TNode<Object> maybe_func = GetProperty(context, object, symbol);
GotoIf(IsUndefined(maybe_func), &out);
GotoIf(IsNull(maybe_func), &out);
diff --git a/chromium/v8/src/builtins/builtins-string.cc b/chromium/v8/src/builtins/builtins-string.cc
index 8a897765c83..df5ba93a59e 100644
--- a/chromium/v8/src/builtins/builtins-string.cc
+++ b/chromium/v8/src/builtins/builtins-string.cc
@@ -40,14 +40,16 @@ bool IsValidCodePoint(Isolate* isolate, Handle<Object> value) {
return true;
}
+static constexpr uc32 kInvalidCodePoint = static_cast<uc32>(-1);
+
uc32 NextCodePoint(Isolate* isolate, BuiltinArguments args, int index) {
Handle<Object> value = args.at(1 + index);
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value,
- Object::ToNumber(isolate, value), -1);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, Object::ToNumber(isolate, value), kInvalidCodePoint);
if (!IsValidCodePoint(isolate, value)) {
isolate->Throw(*isolate->factory()->NewRangeError(
MessageTemplate::kInvalidCodePoint, value));
- return -1;
+ return kInvalidCodePoint;
}
return DoubleToUint32(value->Number());
}
@@ -69,7 +71,7 @@ BUILTIN(StringFromCodePoint) {
int index;
for (index = 0; index < length; index++) {
code = NextCodePoint(isolate, args, index);
- if (code < 0) {
+ if (code == kInvalidCodePoint) {
return ReadOnlyRoots(isolate).exception();
}
if (code > String::kMaxOneByteCharCode) {
@@ -99,7 +101,7 @@ BUILTIN(StringFromCodePoint) {
break;
}
code = NextCodePoint(isolate, args, index);
- if (code < 0) {
+ if (code == kInvalidCodePoint) {
return ReadOnlyRoots(isolate).exception();
}
}
diff --git a/chromium/v8/src/builtins/builtins-string.tq b/chromium/v8/src/builtins/builtins-string.tq
index a4edc94418c..6ec7bc9d0ac 100644
--- a/chromium/v8/src/builtins/builtins-string.tq
+++ b/chromium/v8/src/builtins/builtins-string.tq
@@ -5,6 +5,43 @@
#include 'src/builtins/builtins-string-gen.h'
namespace string {
+
+// TODO(bbudge) Remove the 'RT' suffix on this runtime function.
+extern transitioning runtime ToStringRT(Context, JSAny): String;
+
+@export
+transitioning macro ToStringImpl(context: Context, o: JSAny): String {
+ let result: JSAny = o;
+ while (true) {
+ typeswitch (result) {
+ case (num: Number): {
+ return NumberToString(num);
+ }
+ case (str: String): {
+ return str;
+ }
+ case (oddball: Oddball): {
+ return oddball.to_string;
+ }
+ case (JSReceiver): {
+ result = NonPrimitiveToPrimitive_String(context, result);
+ continue;
+ }
+ case (Symbol): {
+ ThrowTypeError(MessageTemplate::kSymbolToString);
+ }
+ case (JSAny): {
+ return ToStringRT(context, o);
+ }
+ }
+ }
+ unreachable;
+}
+
+transitioning builtin ToString(context: Context, o: JSAny): String {
+ return ToStringImpl(context, o);
+}
+
extern macro StringBuiltinsAssembler::SubString(
String, uintptr, uintptr): String;
diff --git a/chromium/v8/src/builtins/builtins-typed-array-gen.cc b/chromium/v8/src/builtins/builtins-typed-array-gen.cc
index a6d3887ad31..b359b438c74 100644
--- a/chromium/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/chromium/v8/src/builtins/builtins-typed-array-gen.cc
@@ -505,49 +505,5 @@ TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
BIND(&return_undefined);
Return(UndefinedConstant());
}
-
-void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
- TNode<Context> context, TNode<Object> receiver, const char* method_name,
- IterationKind kind) {
- Label throw_bad_receiver(this, Label::kDeferred);
-
- GotoIf(TaggedIsSmi(receiver), &throw_bad_receiver);
- GotoIfNot(IsJSTypedArray(CAST(receiver)), &throw_bad_receiver);
-
- // Check if the {receiver}'s JSArrayBuffer was detached.
- ThrowIfArrayBufferViewBufferIsDetached(context, CAST(receiver), method_name);
-
- Return(CreateArrayIterator(context, receiver, kind));
-
- BIND(&throw_bad_receiver);
- ThrowTypeError(context, MessageTemplate::kNotTypedArray, method_name);
-}
-
-// ES #sec-%typedarray%.prototype.values
-TF_BUILTIN(TypedArrayPrototypeValues, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- GenerateTypedArrayPrototypeIterationMethod(context, receiver,
- "%TypedArray%.prototype.values()",
- IterationKind::kValues);
-}
-
-// ES #sec-%typedarray%.prototype.entries
-TF_BUILTIN(TypedArrayPrototypeEntries, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- GenerateTypedArrayPrototypeIterationMethod(context, receiver,
- "%TypedArray%.prototype.entries()",
- IterationKind::kEntries);
-}
-
-// ES #sec-%typedarray%.prototype.keys
-TF_BUILTIN(TypedArrayPrototypeKeys, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- GenerateTypedArrayPrototypeIterationMethod(
- context, receiver, "%TypedArray%.prototype.keys()", IterationKind::kKeys);
-}
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/builtins/builtins-typed-array-gen.h b/chromium/v8/src/builtins/builtins-typed-array-gen.h
index 1008b6bdd73..7b725ffc41f 100644
--- a/chromium/v8/src/builtins/builtins-typed-array-gen.h
+++ b/chromium/v8/src/builtins/builtins-typed-array-gen.h
@@ -16,11 +16,6 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- void GenerateTypedArrayPrototypeIterationMethod(TNode<Context> context,
- TNode<Object> receiver,
- const char* method_name,
- IterationKind iteration_kind);
-
void SetupTypedArrayEmbedderFields(TNode<JSTypedArray> holder);
void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
TNode<Map> map, TNode<Smi> length,
diff --git a/chromium/v8/src/builtins/builtins-wasm-gen.cc b/chromium/v8/src/builtins/builtins-wasm-gen.cc
index 28efa39c67d..1932db3ff40 100644
--- a/chromium/v8/src/builtins/builtins-wasm-gen.cc
+++ b/chromium/v8/src/builtins/builtins-wasm-gen.cc
@@ -37,12 +37,10 @@ TNode<FixedArray> WasmBuiltinsAssembler::LoadExternalFunctionsFromInstance(
instance, WasmInstanceObject::kWasmExternalFunctionsOffset);
}
-TNode<Smi> WasmBuiltinsAssembler::SmiFromUint32WithSaturation(
- TNode<Uint32T> value, uint32_t max) {
- DCHECK_LE(max, static_cast<uint32_t>(Smi::kMaxValue));
- TNode<Uint32T> capped_value = SelectConstant(
- Uint32LessThan(value, Uint32Constant(max)), value, Uint32Constant(max));
- return SmiFromUint32(capped_value);
+TNode<FixedArray> WasmBuiltinsAssembler::LoadManagedObjectMapsFromInstance(
+ TNode<WasmInstanceObject> instance) {
+ return LoadObjectField<FixedArray>(
+ instance, WasmInstanceObject::kManagedObjectMapsOffset);
}
TF_BUILTIN(WasmFloat32ToNumber, WasmBuiltinsAssembler) {
@@ -55,22 +53,6 @@ TF_BUILTIN(WasmFloat64ToNumber, WasmBuiltinsAssembler) {
Return(ChangeFloat64ToTagged(val));
}
-TF_BUILTIN(WasmAtomicNotify, WasmBuiltinsAssembler) {
- TNode<Uint32T> address =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
- TNode<Uint32T> count = UncheckedCast<Uint32T>(Parameter(Descriptor::kCount));
-
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Number> address_number = ChangeUint32ToTagged(address);
- TNode<Number> count_number = ChangeUint32ToTagged(count);
- TNode<Context> context = LoadContextFromInstance(instance);
-
- TNode<Smi> result_smi =
- CAST(CallRuntime(Runtime::kWasmAtomicNotify, context, instance,
- address_number, count_number));
- Return(Unsigned(SmiToInt32(result_smi)));
-}
-
TF_BUILTIN(WasmI32AtomicWait32, WasmBuiltinsAssembler) {
if (!Is32()) {
Unreachable();
@@ -100,33 +82,6 @@ TF_BUILTIN(WasmI32AtomicWait32, WasmBuiltinsAssembler) {
Return(Unsigned(SmiToInt32(result_smi)));
}
-TF_BUILTIN(WasmI32AtomicWait64, WasmBuiltinsAssembler) {
- if (!Is64()) {
- Unreachable();
- return;
- }
-
- TNode<Uint32T> address =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
- TNode<Number> address_number = ChangeUint32ToTagged(address);
-
- TNode<Int32T> expected_value =
- UncheckedCast<Int32T>(Parameter(Descriptor::kExpectedValue));
- TNode<Number> expected_value_number = ChangeInt32ToTagged(expected_value);
-
- TNode<IntPtrT> timeout_raw =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeout));
- TNode<BigInt> timeout = BigIntFromInt64(timeout_raw);
-
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Context> context = LoadContextFromInstance(instance);
-
- TNode<Smi> result_smi =
- CAST(CallRuntime(Runtime::kWasmI32AtomicWait, context, instance,
- address_number, expected_value_number, timeout));
- Return(Unsigned(SmiToInt32(result_smi)));
-}
-
TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
if (!Is32()) {
Unreachable();
@@ -159,93 +114,6 @@ TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
Return(Unsigned(SmiToInt32(result_smi)));
}
-TF_BUILTIN(WasmI64AtomicWait64, WasmBuiltinsAssembler) {
- if (!Is64()) {
- Unreachable();
- return;
- }
-
- TNode<Uint32T> address =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
- TNode<Number> address_number = ChangeUint32ToTagged(address);
-
- TNode<IntPtrT> expected_value_raw =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValue));
- TNode<BigInt> expected_value = BigIntFromInt64(expected_value_raw);
-
- TNode<IntPtrT> timeout_raw =
- UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeout));
- TNode<BigInt> timeout = BigIntFromInt64(timeout_raw);
-
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Context> context = LoadContextFromInstance(instance);
-
- TNode<Smi> result_smi =
- CAST(CallRuntime(Runtime::kWasmI64AtomicWait, context, instance,
- address_number, expected_value, timeout));
- Return(Unsigned(SmiToInt32(result_smi)));
-}
-
-TF_BUILTIN(WasmTableInit, WasmBuiltinsAssembler) {
- TNode<Uint32T> dst_raw =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kDestination));
- // We cap {dst}, {src}, and {size} by {wasm::kV8MaxWasmTableSize + 1} to make
- // sure that the values fit into a Smi.
- STATIC_ASSERT(static_cast<size_t>(Smi::kMaxValue) >=
- wasm::kV8MaxWasmTableSize + 1);
- constexpr uint32_t kCap =
- static_cast<uint32_t>(wasm::kV8MaxWasmTableSize + 1);
- TNode<Smi> dst = SmiFromUint32WithSaturation(dst_raw, kCap);
- TNode<Uint32T> src_raw =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kSource));
- TNode<Smi> src = SmiFromUint32WithSaturation(src_raw, kCap);
- TNode<Uint32T> size_raw =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kSize));
- TNode<Smi> size = SmiFromUint32WithSaturation(size_raw, kCap);
- TNode<Smi> table_index =
- UncheckedCast<Smi>(Parameter(Descriptor::kTableIndex));
- TNode<Smi> segment_index =
- UncheckedCast<Smi>(Parameter(Descriptor::kSegmentIndex));
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Context> context = LoadContextFromInstance(instance);
-
- TailCallRuntime(Runtime::kWasmTableInit, context, instance, table_index,
- segment_index, dst, src, size);
-}
-
-TF_BUILTIN(WasmTableCopy, WasmBuiltinsAssembler) {
- // We cap {dst}, {src}, and {size} by {wasm::kV8MaxWasmTableSize + 1} to make
- // sure that the values fit into a Smi.
- STATIC_ASSERT(static_cast<size_t>(Smi::kMaxValue) >=
- wasm::kV8MaxWasmTableSize + 1);
- constexpr uint32_t kCap =
- static_cast<uint32_t>(wasm::kV8MaxWasmTableSize + 1);
-
- TNode<Uint32T> dst_raw =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kDestination));
- TNode<Smi> dst = SmiFromUint32WithSaturation(dst_raw, kCap);
-
- TNode<Uint32T> src_raw =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kSource));
- TNode<Smi> src = SmiFromUint32WithSaturation(src_raw, kCap);
-
- TNode<Uint32T> size_raw =
- UncheckedCast<Uint32T>(Parameter(Descriptor::kSize));
- TNode<Smi> size = SmiFromUint32WithSaturation(size_raw, kCap);
-
- TNode<Smi> dst_table =
- UncheckedCast<Smi>(Parameter(Descriptor::kDestinationTable));
-
- TNode<Smi> src_table =
- UncheckedCast<Smi>(Parameter(Descriptor::kSourceTable));
-
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Context> context = LoadContextFromInstance(instance);
-
- TailCallRuntime(Runtime::kWasmTableCopy, context, instance, dst_table,
- src_table, dst, src, size);
-}
-
TF_BUILTIN(WasmAllocateArray, WasmBuiltinsAssembler) {
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Smi> map_index = CAST(Parameter(Descriptor::kMapIndex));
@@ -270,18 +138,5 @@ TF_BUILTIN(WasmAllocateArray, WasmBuiltinsAssembler) {
Return(result);
}
-TF_BUILTIN(WasmAllocateStruct, WasmBuiltinsAssembler) {
- TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
- TNode<Smi> map_index = CAST(Parameter(Descriptor::kMapIndex));
- TNode<FixedArray> maps_list = LoadObjectField<FixedArray>(
- instance, WasmInstanceObject::kManagedObjectMapsOffset);
- TNode<Map> map = CAST(LoadFixedArrayElement(maps_list, map_index));
- TNode<IntPtrT> instance_size =
- TimesTaggedSize(LoadMapInstanceSizeInWords(map));
- TNode<WasmStruct> result = UncheckedCast<WasmStruct>(Allocate(instance_size));
- StoreMap(result, map);
- Return(result);
-}
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/builtins/builtins-wasm-gen.h b/chromium/v8/src/builtins/builtins-wasm-gen.h
index 3740560666d..ccf5bae7a15 100644
--- a/chromium/v8/src/builtins/builtins-wasm-gen.h
+++ b/chromium/v8/src/builtins/builtins-wasm-gen.h
@@ -25,8 +25,8 @@ class WasmBuiltinsAssembler : public CodeStubAssembler {
TNode<FixedArray> LoadExternalFunctionsFromInstance(
TNode<WasmInstanceObject> instance);
- protected:
- TNode<Smi> SmiFromUint32WithSaturation(TNode<Uint32T> value, uint32_t max);
+ TNode<FixedArray> LoadManagedObjectMapsFromInstance(
+ TNode<WasmInstanceObject> instance);
};
} // namespace internal
diff --git a/chromium/v8/src/builtins/cast.tq b/chromium/v8/src/builtins/cast.tq
index dfac2035784..9adbd7ecc4f 100644
--- a/chromium/v8/src/builtins/cast.tq
+++ b/chromium/v8/src/builtins/cast.tq
@@ -2,56 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-extern macro IsAllocationSite(HeapObject): bool;
extern macro IsBigInt(HeapObject): bool;
extern macro IsConstructor(HeapObject): bool;
extern macro IsContext(HeapObject): bool;
extern macro IsCustomElementsReceiverInstanceType(int32): bool;
extern macro IsExtensibleMap(Map): bool;
-extern macro IsFeedbackCell(HeapObject): bool;
-extern macro IsFeedbackVector(HeapObject): bool;
extern macro IsFixedArray(HeapObject): bool;
extern macro IsHeapNumber(HeapObject): bool;
-extern macro IsJSAggregateError(HeapObject): bool;
-extern macro IsJSArray(HeapObject): bool;
extern macro IsJSArrayMap(Map): bool;
-extern macro IsJSBoundFunction(HeapObject): bool;
-extern macro IsJSFinalizationRegistry(HeapObject): bool;
-extern macro IsJSFunction(HeapObject): bool;
-extern macro IsJSObject(HeapObject): bool;
-extern macro IsJSPrimitiveWrapper(HeapObject): bool;
-extern macro IsJSPromise(HeapObject): bool;
extern macro IsJSProxy(HeapObject): bool;
-extern macro IsJSReceiver(HeapObject): bool;
-extern macro IsJSRegExp(HeapObject): bool;
-extern macro IsJSRegExpStringIterator(HeapObject): bool;
-extern macro IsJSTypedArray(HeapObject): bool;
-extern macro IsMap(HeapObject): bool;
extern macro IsName(HeapObject): bool;
extern macro IsNativeContext(HeapObject): bool;
-extern macro IsNumberDictionary(HeapObject): bool;
extern macro IsNumberNormalized(Number): bool;
extern macro IsNumber(Object): bool;
-extern macro IsOddball(HeapObject): bool;
extern macro IsPrivateSymbol(HeapObject): bool;
-extern macro IsPromiseCapability(HeapObject): bool;
-extern macro IsPromiseFulfillReactionJobTask(HeapObject): bool;
-extern macro IsPromiseReaction(HeapObject): bool;
-extern macro IsPromiseReactionJobTask(HeapObject): bool;
-extern macro IsPromiseRejectReactionJobTask(HeapObject): bool;
extern macro IsSafeInteger(Object): bool;
-extern macro IsSharedFunctionInfo(HeapObject): bool;
-extern macro IsSymbol(HeapObject): bool;
-extern macro IsTuple2(HeapObject): bool;
-extern macro HeapObjectToJSDataView(HeapObject): JSDataView
- labels CastError;
-extern macro HeapObjectToJSProxy(HeapObject): JSProxy
- labels CastError;
-extern macro HeapObjectToJSStringIterator(HeapObject): JSStringIterator
- labels CastError;
-extern macro HeapObjectToJSArrayBuffer(HeapObject): JSArrayBuffer
- labels CastError;
+@export
+macro IsAllocationSite(o: HeapObject): bool {
+ Cast<AllocationSite>(o) otherwise return false;
+ return true;
+}
+
extern macro TaggedToHeapObject(Object): HeapObject
labels CastError;
extern macro TaggedToSmi(Object): Smi
@@ -60,28 +32,13 @@ extern macro TaggedToPositiveSmi(Object): PositiveSmi
labels CastError;
extern macro TaggedToDirectString(Object): DirectString
labels CastError;
-extern macro HeapObjectToJSAggregateError(HeapObject): JSAggregateError
- labels CastError;
-extern macro HeapObjectToJSArray(HeapObject): JSArray
- labels CastError;
extern macro HeapObjectToCallable(HeapObject): Callable
labels CastError;
-extern macro HeapObjectToFixedArray(HeapObject): FixedArray
- labels CastError;
-extern macro HeapObjectToFixedDoubleArray(HeapObject): FixedDoubleArray
- labels CastError;
-extern macro HeapObjectToString(HeapObject): String
- labels CastError;
extern macro HeapObjectToConstructor(HeapObject): Constructor
labels CastError;
extern macro HeapObjectToJSFunctionWithPrototypeSlot(HeapObject):
JSFunctionWithPrototypeSlot
labels CastError;
-extern macro HeapObjectToHeapNumber(HeapObject): HeapNumber
- labels CastError;
-extern macro HeapObjectToSloppyArgumentsElements(HeapObject):
- SloppyArgumentsElements
- labels CastError;
extern macro TaggedToNumber(Object): Number
labels CastError;
@@ -90,7 +47,17 @@ macro Cast<A : type extends WeakHeapObject>(o: A|Object): A labels CastError {
return %RawDownCast<A>(o);
}
-macro Cast<A: type>(o: MaybeObject): A labels CastError;
+macro Cast<A : type extends Object>(implicit context: Context)(o: MaybeObject):
+ A labels CastError {
+ typeswitch (o) {
+ case (WeakHeapObject): {
+ goto CastError;
+ }
+ case (o: Object): {
+ return Cast<A>(o) otherwise CastError;
+ }
+ }
+}
Cast<Undefined>(o: MaybeObject): Undefined labels CastError {
if (TaggedNotEqual(o, Undefined)) goto CastError;
@@ -283,22 +250,6 @@ Cast<Undefined>(o: HeapObject): Undefined
return Cast<Undefined>(o) otherwise CastError;
}
-Cast<AllocationSite>(o: HeapObject): AllocationSite
- labels CastError {
- if (IsAllocationSite(o)) return %RawDownCast<AllocationSite>(o);
- goto CastError;
-}
-
-Cast<FixedArray>(o: HeapObject): FixedArray
- labels CastError {
- return HeapObjectToFixedArray(o) otherwise CastError;
-}
-
-Cast<FixedDoubleArray>(o: HeapObject): FixedDoubleArray
- labels CastError {
- return HeapObjectToFixedDoubleArray(o) otherwise CastError;
-}
-
Cast<EmptyFixedArray>(o: Object): EmptyFixedArray
labels CastError {
if (o != kEmptyFixedArray) goto CastError;
@@ -325,46 +276,6 @@ Cast<(FixedDoubleArray | EmptyFixedArray)>(o: HeapObject): FixedDoubleArray|
}
}
-Cast<SloppyArgumentsElements>(o: HeapObject): SloppyArgumentsElements
- labels CastError {
- return HeapObjectToSloppyArgumentsElements(o) otherwise CastError;
-}
-
-Cast<JSDataView>(o: HeapObject): JSDataView
- labels CastError {
- return HeapObjectToJSDataView(o) otherwise CastError;
-}
-
-Cast<JSProxy>(o: HeapObject): JSProxy
- labels CastError {
- return HeapObjectToJSProxy(o) otherwise CastError;
-}
-
-Cast<JSStringIterator>(o: HeapObject): JSStringIterator
- labels CastError {
- return HeapObjectToJSStringIterator(o) otherwise CastError;
-}
-
-Cast<JSRegExpStringIterator>(o: HeapObject): JSRegExpStringIterator
- labels CastError {
- if (IsJSRegExpStringIterator(o)) {
- return %RawDownCast<JSRegExpStringIterator>(o);
- }
- goto CastError;
-}
-
-Cast<JSTypedArray>(o: HeapObject): JSTypedArray
- labels CastError {
- if (IsJSTypedArray(o)) return %RawDownCast<JSTypedArray>(o);
- goto CastError;
-}
-
-Cast<JSTypedArray>(implicit context: Context)(o: Object): JSTypedArray
- labels CastError {
- const heapObject = Cast<HeapObject>(o) otherwise CastError;
- return Cast<JSTypedArray>(heapObject) otherwise CastError;
-}
-
Cast<Callable>(o: HeapObject): Callable
labels CastError {
return HeapObjectToCallable(o) otherwise CastError;
@@ -376,62 +287,6 @@ Cast<Undefined|Callable>(o: HeapObject): Undefined|Callable
return HeapObjectToCallable(o) otherwise CastError;
}
-Cast<JSAggregateError>(o: HeapObject): JSAggregateError
- labels CastError {
- return HeapObjectToJSAggregateError(o) otherwise CastError;
-}
-
-Cast<JSArray>(o: HeapObject): JSArray
- labels CastError {
- return HeapObjectToJSArray(o) otherwise CastError;
-}
-
-Cast<JSArrayBuffer>(o: HeapObject): JSArrayBuffer
- labels CastError {
- return HeapObjectToJSArrayBuffer(o) otherwise CastError;
-}
-
-Cast<Context>(o: HeapObject): Context
- labels CastError {
- if (IsContext(o)) return %RawDownCast<Context>(o);
- goto CastError;
-}
-
-Cast<NativeContext>(o: HeapObject): NativeContext
- labels CastError {
- if (IsNativeContext(o)) return %RawDownCast<NativeContext>(o);
- goto CastError;
-}
-
-Cast<JSObject>(o: HeapObject): JSObject
- labels CastError {
- if (IsJSObject(o)) return %RawDownCast<JSObject>(o);
- goto CastError;
-}
-
-Cast<NumberDictionary>(o: HeapObject): NumberDictionary
- labels CastError {
- if (IsNumberDictionary(o)) return %RawDownCast<NumberDictionary>(o);
- goto CastError;
-}
-
-Cast<String>(o: HeapObject): String
- labels CastError {
- return HeapObjectToString(o) otherwise CastError;
-}
-
-Cast<Oddball>(o: HeapObject): Oddball
- labels CastError {
- if (IsOddball(o)) return %RawDownCast<Oddball>(o);
- goto CastError;
-}
-
-Cast<Symbol>(o: HeapObject): Symbol
- labels CastError {
- if (IsSymbol(o)) return %RawDownCast<Symbol>(o);
- goto CastError;
-}
-
macro Cast<T : type extends Symbol>(o: Symbol): T labels CastError;
Cast<PublicSymbol>(o: Symbol): PublicSymbol labels CastError {
if (IsPrivateSymbol(o)) goto CastError;
@@ -468,53 +323,17 @@ Cast<JSFunctionWithPrototypeSlot>(o: HeapObject): JSFunctionWithPrototypeSlot
return HeapObjectToJSFunctionWithPrototypeSlot(o) otherwise CastError;
}
-Cast<HeapNumber>(o: HeapObject): HeapNumber
- labels CastError {
- if (IsHeapNumber(o)) return %RawDownCast<HeapNumber>(o);
- goto CastError;
-}
-
Cast<BigInt>(o: HeapObject): BigInt labels CastError {
if (IsBigInt(o)) return %RawDownCast<BigInt>(o);
goto CastError;
}
-Cast<JSRegExp>(o: HeapObject): JSRegExp
- labels CastError {
- if (IsJSRegExp(o)) return %RawDownCast<JSRegExp>(o);
- goto CastError;
-}
-
Cast<JSRegExpResult>(implicit context: Context)(o: HeapObject): JSRegExpResult
labels CastError {
if (regexp::IsRegExpResult(o)) return %RawDownCast<JSRegExpResult>(o);
goto CastError;
}
-Cast<Map>(implicit context: Context)(o: HeapObject): Map
- labels CastError {
- if (IsMap(o)) return %RawDownCast<Map>(o);
- goto CastError;
-}
-
-Cast<FeedbackCell>(implicit context: Context)(o: HeapObject): FeedbackCell
- labels CastError {
- if (IsFeedbackCell(o)) return %RawDownCast<FeedbackCell>(o);
- goto CastError;
-}
-
-Cast<FeedbackVector>(implicit context: Context)(o: HeapObject): FeedbackVector
- labels CastError {
- if (IsFeedbackVector(o)) return %RawDownCast<FeedbackVector>(o);
- goto CastError;
-}
-
-Cast<JSPrimitiveWrapper>(o: HeapObject): JSPrimitiveWrapper
- labels CastError {
- if (IsJSPrimitiveWrapper(o)) return %RawDownCast<JSPrimitiveWrapper>(o);
- goto CastError;
-}
-
Cast<JSSloppyArgumentsObject>(implicit context: Context)(o: HeapObject):
JSSloppyArgumentsObject
labels CastError {
@@ -623,32 +442,6 @@ Cast<FastJSArrayForReadWithNoCustomIteration>(implicit context: Context)(
return %RawDownCast<FastJSArrayForReadWithNoCustomIteration>(a);
}
-Cast<JSReceiver>(o: HeapObject): JSReceiver
- labels CastError {
- if (IsJSReceiver(o)) return %RawDownCast<JSReceiver>(o);
- goto CastError;
-}
-
-Cast<JSFunction>(implicit context: Context)(o: HeapObject): JSFunction
- labels CastError {
- if (IsJSFunction(o)) return %RawDownCast<JSFunction>(o);
- goto CastError;
-}
-
-extern macro IsDebugInfo(HeapObject): bool;
-Cast<DebugInfo>(implicit context: Context)(o: HeapObject): DebugInfo
- labels CastError {
- if (IsDebugInfo(o)) return %RawDownCast<DebugInfo>(o);
- goto CastError;
-}
-
-extern macro IsCoverageInfo(HeapObject): bool;
-Cast<CoverageInfo>(implicit context: Context)(o: HeapObject): CoverageInfo
- labels CastError {
- if (IsCoverageInfo(o)) return %RawDownCast<CoverageInfo>(o);
- goto CastError;
-}
-
Cast<JSReceiver|Null>(o: HeapObject): JSReceiver|Null
labels CastError {
typeswitch (o) {
@@ -664,35 +457,6 @@ Cast<JSReceiver|Null>(o: HeapObject): JSReceiver|Null
}
}
-Cast<PromiseReactionJobTask>(o: HeapObject):
- PromiseReactionJobTask labels CastError {
- if (IsPromiseReactionJobTask(o)) {
- return %RawDownCast<PromiseReactionJobTask>(o);
- }
- goto CastError;
-}
-
-Cast<PromiseFulfillReactionJobTask>(o: HeapObject):
- PromiseFulfillReactionJobTask labels CastError {
- if (IsPromiseFulfillReactionJobTask(o)) {
- return %RawDownCast<PromiseFulfillReactionJobTask>(o);
- }
- goto CastError;
-}
-
-Cast<PromiseRejectReactionJobTask>(o: HeapObject):
- PromiseRejectReactionJobTask labels CastError {
- if (IsPromiseRejectReactionJobTask(o)) {
- return %RawDownCast<PromiseRejectReactionJobTask>(o);
- }
- goto CastError;
-}
-
-Cast<PromiseReaction>(o: HeapObject): PromiseReaction labels CastError {
- if (IsPromiseReaction(o)) return %RawDownCast<PromiseReaction>(o);
- goto CastError;
-}
-
Cast<Smi|PromiseReaction>(o: Object): Smi|PromiseReaction labels CastError {
typeswitch (o) {
case (o: Smi): {
@@ -737,32 +501,35 @@ Cast<Zero|PromiseReaction>(implicit context: Context)(o: Object): Zero|
}
}
-Cast<JSBoundFunction>(o: HeapObject): JSBoundFunction labels CastError {
- if (IsJSBoundFunction(o)) return %RawDownCast<JSBoundFunction>(o);
- goto CastError;
-}
-
-Cast<PromiseCapability>(o: HeapObject): PromiseCapability labels CastError {
- if (IsPromiseCapability(o)) return %RawDownCast<PromiseCapability>(o);
- goto CastError;
+Cast<JSFunction|JSBoundFunction>(implicit context: Context)(o: Object):
+ JSFunction|JSBoundFunction labels CastError {
+ typeswitch (o) {
+ case (o: JSFunction): {
+ return o;
+ }
+ case (o: JSBoundFunction): {
+ return o;
+ }
+ case (Object): {
+ goto CastError;
+ }
+ }
}
-Cast<SharedFunctionInfo>(o: HeapObject): SharedFunctionInfo labels CastError {
- if (IsSharedFunctionInfo(o)) return %RawDownCast<SharedFunctionInfo>(o);
- goto CastError;
+macro Is<A : type extends Object, B : type extends Object>(
+ implicit context: Context)(o: B): bool {
+ Cast<A>(o) otherwise return false;
+ return true;
}
-Cast<JSPromise>(o: HeapObject): JSPromise labels CastError {
- if (IsJSPromise(o)) return %RawDownCast<JSPromise>(o);
- goto CastError;
+macro UnsafeCast<A : type extends Object>(implicit context: Context)(o: Object):
+ A {
+ assert(Is<A>(o));
+ return %RawDownCast<A>(o);
}
-Cast<JSFinalizationRegistry>(o: HeapObject):
- JSFinalizationRegistry labels CastError {
- if (IsJSFinalizationRegistry(o)) {
- return %RawDownCast<JSFinalizationRegistry>(o);
- }
- goto CastError;
+macro UnsafeConstCast<T: type>(r: const &T):&T {
+ return %RawDownCast<&T>(r);
}
UnsafeCast<RegExpMatchInfo>(implicit context: Context)(o: Object):
diff --git a/chromium/v8/src/builtins/constants-table-builder.cc b/chromium/v8/src/builtins/constants-table-builder.cc
index 94e8dc05ec7..fa333726e79 100644
--- a/chromium/v8/src/builtins/constants-table-builder.cc
+++ b/chromium/v8/src/builtins/constants-table-builder.cc
@@ -57,24 +57,30 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
}
}
-void BuiltinsConstantsTableBuilder::PatchSelfReference(
- Handle<Object> self_reference, Handle<Code> code_object) {
-#ifdef DEBUG
+namespace {
+void CheckPreconditionsForPatching(Isolate* isolate,
+ Handle<Object> replacement_object) {
// Roots must not be inserted into the constants table as they are already
- // accessibly from the root list.
+ // accessible from the root list.
RootIndex root_list_index;
- DCHECK(!isolate_->roots_table().IsRootHandle(code_object, &root_list_index));
+ DCHECK(!isolate->roots_table().IsRootHandle(replacement_object,
+ &root_list_index));
+ USE(root_list_index);
// Not yet finalized.
- DCHECK_EQ(ReadOnlyRoots(isolate_).empty_fixed_array(),
- isolate_->heap()->builtins_constants_table());
+ DCHECK_EQ(ReadOnlyRoots(isolate).empty_fixed_array(),
+ isolate->heap()->builtins_constants_table());
- DCHECK(isolate_->IsGeneratingEmbeddedBuiltins());
+ DCHECK(isolate->IsGeneratingEmbeddedBuiltins());
+}
+} // namespace
+void BuiltinsConstantsTableBuilder::PatchSelfReference(
+ Handle<Object> self_reference, Handle<Code> code_object) {
+ CheckPreconditionsForPatching(isolate_, code_object);
DCHECK(self_reference->IsOddball());
DCHECK(Oddball::cast(*self_reference).kind() ==
Oddball::kSelfReferenceMarker);
-#endif
uint32_t key;
if (map_.Delete(self_reference, &key)) {
@@ -83,6 +89,17 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
}
}
+void BuiltinsConstantsTableBuilder::PatchBasicBlockCountersReference(
+ Handle<ByteArray> counters) {
+ CheckPreconditionsForPatching(isolate_, counters);
+
+ uint32_t key;
+ if (map_.Delete(ReadOnlyRoots(isolate_).basic_block_counters_marker(),
+ &key)) {
+ map_.Set(counters, key);
+ }
+}
+
void BuiltinsConstantsTableBuilder::Finalize() {
HandleScope handle_scope(isolate_);
@@ -117,6 +134,8 @@ void BuiltinsConstantsTableBuilder::Finalize() {
DCHECK(table->get(i).IsHeapObject());
DCHECK_NE(ReadOnlyRoots(isolate_).undefined_value(), table->get(i));
DCHECK_NE(ReadOnlyRoots(isolate_).self_reference_marker(), table->get(i));
+ DCHECK_NE(ReadOnlyRoots(isolate_).basic_block_counters_marker(),
+ table->get(i));
}
#endif
diff --git a/chromium/v8/src/builtins/constants-table-builder.h b/chromium/v8/src/builtins/constants-table-builder.h
index 89c95912a1e..fa9d7dee3a3 100644
--- a/chromium/v8/src/builtins/constants-table-builder.h
+++ b/chromium/v8/src/builtins/constants-table-builder.h
@@ -34,6 +34,11 @@ class BuiltinsConstantsTableBuilder final {
void PatchSelfReference(Handle<Object> self_reference,
Handle<Code> code_object);
+ // References to the array that stores basic block usage counters start out as
+ // references to a unique oddball. Once the actual array has been allocated,
+ // such entries in the constants map must be patched up.
+ void PatchBasicBlockCountersReference(Handle<ByteArray> counters);
+
// Should be called after all affected code (e.g. builtins and bytecode
// handlers) has been generated.
void Finalize();
diff --git a/chromium/v8/src/builtins/conversion.tq b/chromium/v8/src/builtins/conversion.tq
new file mode 100644
index 00000000000..7f634d94905
--- /dev/null
+++ b/chromium/v8/src/builtins/conversion.tq
@@ -0,0 +1,232 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace runtime {
+extern transitioning runtime ToStringRT(Context, BigInt): String;
+}
+
+extern enum OrdinaryToPrimitiveHint { kString, kNumber }
+
+extern macro OrdinaryToPrimitive(implicit context: Context)(
+ JSAny, constexpr OrdinaryToPrimitiveHint): JSPrimitive;
+
+namespace conversion {
+
+builtin StringToNumber(implicit context: Context)(input: String): Number {
+ return ::StringToNumber(input);
+}
+
+transitioning builtin NonNumberToNumber(implicit context: Context)(
+ input: JSAnyNotNumber): Number {
+ return ::NonNumberToNumber(input);
+}
+
+transitioning builtin NonNumberToNumeric(implicit context: Context)(
+ input: JSAnyNotNumber): Numeric {
+ return ::NonNumberToNumeric(input);
+}
+
+transitioning builtin ToNumeric(implicit context: Context)(input: JSAny):
+ Numeric {
+ typeswitch (input) {
+ case (n: Number): {
+ return n;
+ }
+ case (h: JSAnyNotNumber): {
+ return conversion::NonNumberToNumeric(h);
+ }
+ }
+}
+
+// ES section #sec-tostring-applied-to-the-number-type
+builtin NumberToString(implicit context: Context)(input: Number): String {
+ return ::NumberToString(input);
+}
+
+// ES6 section 7.1.2 ToBoolean ( argument )
+builtin ToBoolean(implicit context: Context)(input: JSAny): Boolean {
+ BranchIfToBooleanIsTrue(input) otherwise return TrueConstant(),
+ return FalseConstant();
+}
+
+transitioning builtin ToLength(implicit context: Context)(input: JSAny):
+ Number {
+ // We might need to loop once for ToNumber conversion.
+ let x: JSAny = input;
+ while (true) {
+ typeswitch (x) {
+ case (s: Smi): {
+ if (s < 0) return 0;
+ return s;
+ }
+ case (h: HeapNumber): {
+ let value: float64 = Convert<float64>(h);
+ // The sense of this test is important for the NaN and -0 cases.
+ if (!(value > 0)) return 0;
+ if (value > kMaxSafeInteger) return kMaxSafeInteger;
+ value = math::Float64Floor(value);
+ return ChangeFloat64ToTagged(value);
+ }
+ case (h: JSAnyNotNumber): {
+ x = ::NonNumberToNumber(h);
+ }
+ }
+ }
+ VerifiedUnreachable();
+}
+
+transitioning builtin ToName(implicit context: Context)(input: JSAny): Name {
+ // We might need to loop once for ToNumber conversion.
+ let x: JSAny = input;
+ while (true) {
+ typeswitch (x) {
+ case (n: Name): {
+ return n;
+ }
+ case (n: Number): {
+ return ::NumberToString(n);
+ }
+ case (b: BigInt): {
+ // We don't have a fast-path for BigInt currently, so just
+ // tail call to the %ToString runtime function here for now.
+ tail runtime::ToStringRT(context, b);
+ }
+ case (o: Oddball): {
+ return o.to_string;
+ }
+ case (o: JSReceiver): {
+ x = NonPrimitiveToPrimitive_String(o);
+ }
+ }
+ }
+ VerifiedUnreachable();
+}
+
+const kNoConstructorFunctionIndex:
+ constexpr int31 generates 'Map::kNoConstructorFunctionIndex';
+
+// ES6 section 7.1.13 ToObject (argument)
+transitioning builtin ToObject(implicit context: Context)(input: JSAny):
+ JSReceiver {
+ try {
+ typeswitch (input) {
+ case (Smi): {
+ goto WrapPrimitive(NativeContextSlot::NUMBER_FUNCTION_INDEX);
+ }
+ case (o: JSReceiver): {
+ return o;
+ }
+ case (o: JSAnyNotSmi): {
+ const index: intptr = Convert<intptr>(
+ o.map.in_object_properties_start_or_constructor_function_index);
+ if (index != kNoConstructorFunctionIndex) goto WrapPrimitive(index);
+ ThrowTypeError(MessageTemplate::kUndefinedOrNullToObject, 'ToObject');
+ }
+ }
+ } label WrapPrimitive(constructorIndex: intptr) {
+ const nativeContext = LoadNativeContext(context);
+ const constructor = UnsafeCast<JSFunction>(nativeContext[constructorIndex]);
+ const map: Map = UnsafeCast<Map>(constructor.prototype_or_initial_map);
+ const wrapper =
+ UnsafeCast<JSPrimitiveWrapper>(AllocateFastOrSlowJSObjectFromMap(map));
+ wrapper.value = input;
+ return wrapper;
+ }
+}
+
+// ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] )
+
+transitioning macro TryGetExoticToPrimitive(implicit context: Context)(
+ input: JSAny): JSAny labels OrdinaryToPrimitive {
+ // Look up the @@toPrimitive property.
+ const exoticToPrimitive: JSAny =
+ GetProperty(input, ToPrimitiveSymbolConstant());
+ if (IsNullOrUndefined(exoticToPrimitive)) goto OrdinaryToPrimitive;
+ return exoticToPrimitive;
+}
+
+transitioning macro CallExoticToPrimitive(implicit context: Context)(
+ input: JSAny, exoticToPrimitive: JSAny, hint: String): JSPrimitive {
+ // Invoke the exoticToPrimitive method on the input with a string
+ // representation of the hint.
+ const result: JSAny = Call(context, exoticToPrimitive, input, hint);
+
+ // Verify that the result is primitive.
+ typeswitch (result) {
+ case (o: JSPrimitive): {
+ return o;
+ }
+ case (JSReceiver): {
+ // Somehow the @@toPrimitive method on input didn't yield a primitive.
+ ThrowTypeError(MessageTemplate::kCannotConvertToPrimitive);
+ }
+ }
+}
+
+transitioning builtin NonPrimitiveToPrimitive_Default(
+ implicit context: Context)(input: JSReceiver): JSPrimitive {
+ const exoticToPrimitive: JSAny = TryGetExoticToPrimitive(input)
+ otherwise return OrdinaryToPrimitive_Number(input);
+ return CallExoticToPrimitive(
+ input, exoticToPrimitive, DefaultStringConstant());
+}
+
+transitioning builtin NonPrimitiveToPrimitive_Number(implicit context: Context)(
+ input: JSReceiver): JSPrimitive {
+ const exoticToPrimitive: JSAny = TryGetExoticToPrimitive(input)
+ otherwise return OrdinaryToPrimitive_Number(input);
+ return CallExoticToPrimitive(
+ input, exoticToPrimitive, NumberStringConstant());
+}
+
+transitioning builtin NonPrimitiveToPrimitive_String(implicit context: Context)(
+ input: JSReceiver): JSPrimitive {
+ const exoticToPrimitive: JSAny = TryGetExoticToPrimitive(input)
+ otherwise return OrdinaryToPrimitive_String(input);
+ return CallExoticToPrimitive(
+ input, exoticToPrimitive, StringStringConstant());
+}
+
+// 7.1.1.1 OrdinaryToPrimitive ( O, hint )
+
+transitioning macro TryToPrimitiveMethod(implicit context: Context)(
+ input: JSAny, name: String): JSPrimitive labels Continue {
+ const method: JSAny = GetProperty(input, name);
+ typeswitch (method) {
+ case (Callable): {
+ const value: JSAny = Call(context, method, input);
+ return Cast<JSPrimitive>(value) otherwise Continue;
+ }
+ case (JSAny): {
+ goto Continue;
+ }
+ }
+}
+
+transitioning builtin OrdinaryToPrimitive_Number(implicit context: Context)(
+ input: JSAny): JSPrimitive {
+ try {
+ return TryToPrimitiveMethod(input, ValueOfStringConstant())
+ otherwise String;
+ } label String {
+ return TryToPrimitiveMethod(input, ToStringStringConstant())
+ otherwise Throw;
+ } label Throw {
+ ThrowTypeError(MessageTemplate::kCannotConvertToPrimitive);
+ }
+}
+
+transitioning builtin OrdinaryToPrimitive_String(implicit context: Context)(
+ input: JSAny): JSPrimitive {
+ try {
+ return TryToPrimitiveMethod(input, ToStringStringConstant())
+ otherwise String;
+ } label String {
+ return TryToPrimitiveMethod(input, ValueOfStringConstant()) otherwise Throw;
+ } label Throw {
+ ThrowTypeError(MessageTemplate::kCannotConvertToPrimitive);
+ }
+}
+
+} // namespace conversion
diff --git a/chromium/v8/src/builtins/convert.tq b/chromium/v8/src/builtins/convert.tq
index e2c11120381..03440a183ad 100644
--- a/chromium/v8/src/builtins/convert.tq
+++ b/chromium/v8/src/builtins/convert.tq
@@ -94,6 +94,15 @@ FromConstexpr<PromiseState, constexpr PromiseState>(c: constexpr PromiseState):
PromiseState {
return %RawDownCast<PromiseState>(Int32Constant(c));
}
+FromConstexpr<InstanceType, constexpr InstanceType>(c: constexpr InstanceType):
+ InstanceType {
+ return %RawDownCast<InstanceType>(Uint16Constant(c));
+}
+
+FromConstexpr<IterationKind, constexpr IterationKind>(
+ c: constexpr IterationKind): IterationKind {
+ return %RawDownCast<IterationKind>(Unsigned(%FromConstexpr<int32>(c)));
+}
macro Convert<To: type, From: type>(i: From): To {
return i;
@@ -103,6 +112,9 @@ macro Convert<To: type, From: type>(i: From): To labels Overflow {
return i;
}
+Convert<Boolean, bool>(b: bool): Boolean {
+ return b ? True : False;
+}
extern macro ConvertElementsKindToInt(ElementsKind): int32;
Convert<int32, ElementsKind>(elementsKind: ElementsKind): int32 {
return ConvertElementsKindToInt(elementsKind);
@@ -201,6 +213,9 @@ Convert<PositiveSmi, intptr>(i: intptr): PositiveSmi labels IfOverflow {
goto IfOverflow;
}
}
+Convert<PositiveSmi, uint32>(ui: uint32): PositiveSmi labels IfOverflow {
+ return Convert<PositiveSmi>(Convert<uintptr>(ui)) otherwise IfOverflow;
+}
Convert<int32, Smi>(s: Smi): int32 {
return SmiToInt32(s);
}
diff --git a/chromium/v8/src/builtins/function.tq b/chromium/v8/src/builtins/function.tq
new file mode 100644
index 00000000000..8266714c7be
--- /dev/null
+++ b/chromium/v8/src/builtins/function.tq
@@ -0,0 +1,109 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace function {
+
+extern macro OrdinaryHasInstance(Context, Object, Object): JSAny;
+
+// ES6 section 19.2.3.6 Function.prototype[@@hasInstance]
+javascript builtin FunctionPrototypeHasInstance(
+ js-implicit context: NativeContext, receiver: JSAny)(value: JSAny): JSAny {
+ return OrdinaryHasInstance(context, receiver, value);
+}
+
+extern transitioning builtin
+FunctionPrototypeBind(implicit context: Context)(
+ JSFunction, JSAny, int32): JSAny;
+
+const kLengthDescriptorIndex:
+ constexpr int32 generates 'JSFunction::kLengthDescriptorIndex';
+const kNameDescriptorIndex:
+ constexpr int32 generates 'JSFunction::kNameDescriptorIndex';
+const kMinDescriptorsForFastBind:
+ constexpr int31 generates 'JSFunction::kMinDescriptorsForFastBind';
+
+macro CheckAccessor(implicit context: Context)(
+ array: DescriptorArray, index: constexpr int32, name: Name) labels Slow {
+ const descriptor: DescriptorEntry = array.descriptors[index];
+ const key: Name|Undefined = descriptor.key;
+ if (!TaggedEqual(key, name)) goto Slow;
+
+ // The descriptor value must be an AccessorInfo.
+ Cast<AccessorInfo>(descriptor.value) otherwise goto Slow;
+}
+
+// ES6 section 19.2.3.2 Function.prototype.bind
+transitioning javascript builtin
+FastFunctionPrototypeBind(
+ js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny,
+ target: JSFunction)(...arguments): JSAny {
+ const argc: intptr = arguments.length;
+ try {
+ typeswitch (receiver) {
+ case (fn: JSFunction|JSBoundFunction): {
+ // Disallow binding of slow-mode functions. We need to figure out
+ // whether the length and name property are in the original state.
+ Comment('Disallow binding of slow-mode functions');
+ if (IsDictionaryMap(fn.map)) goto Slow;
+
+ // Check whether the length and name properties are still present as
+ // AccessorInfo objects. If so, their value can be recomputed even if
+ // the actual value on the object changes.
+
+ if (fn.map.bit_field3.number_of_own_descriptors <
+ kMinDescriptorsForFastBind) {
+ goto Slow;
+ }
+
+ const descriptors: DescriptorArray = fn.map.instance_descriptors;
+ CheckAccessor(
+ descriptors, kLengthDescriptorIndex, LengthStringConstant())
+ otherwise Slow;
+ CheckAccessor(descriptors, kNameDescriptorIndex, NameStringConstant())
+ otherwise Slow;
+
+ // Choose the right bound function map based on whether the target is
+ // constructable.
+
+ const boundFunctionMap: Map = UnsafeCast<Map>(
+ IsConstructor(fn) ?
+ context[NativeContextSlot::
+ BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX] :
+ context[NativeContextSlot::
+ BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX]);
+
+ // Verify that prototype matches that of the target bound function.
+
+ if (fn.map.prototype != boundFunctionMap.prototype) goto Slow;
+
+ // Allocate the arguments array.
+
+ const argumentsArray = arguments.length <= 1 ?
+ kEmptyFixedArray :
+ NewFixedArray(
+ arguments.length - 1, ArgumentsIterator{arguments, current: 1});
+
+ const boundReceiver: JSAny = arguments[0];
+
+ const result = new JSBoundFunction{
+ map: boundFunctionMap,
+ properties_or_hash: kEmptyFixedArray,
+ elements: kEmptyFixedArray,
+ bound_target_function: fn,
+ bound_this: boundReceiver,
+ bound_arguments: argumentsArray
+ };
+ return result;
+ }
+
+ case (JSAny): {
+ goto Slow;
+ }
+ }
+ } label Slow {
+ tail FunctionPrototypeBind(
+ LoadTargetFromFrame(), newTarget, Convert<int32>(argc));
+ }
+}
+} // namespace function
diff --git a/chromium/v8/src/builtins/growable-fixed-array.tq b/chromium/v8/src/builtins/growable-fixed-array.tq
index 094e051a65a..af9418b0c91 100644
--- a/chromium/v8/src/builtins/growable-fixed-array.tq
+++ b/chromium/v8/src/builtins/growable-fixed-array.tq
@@ -25,9 +25,6 @@ struct GrowableFixedArray {
this.array = this.ResizeFixedArray(this.capacity);
}
}
- macro ToFixedArray(): FixedArray {
- return this.ResizeFixedArray(this.length);
- }
macro ToJSArray(implicit context: Context)(): JSArray {
const nativeContext: NativeContext = LoadNativeContext(context);
diff --git a/chromium/v8/src/builtins/internal.tq b/chromium/v8/src/builtins/internal.tq
new file mode 100644
index 00000000000..9e7e4240ba2
--- /dev/null
+++ b/chromium/v8/src/builtins/internal.tq
@@ -0,0 +1,35 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace internal {
+
+namespace runtime {
+extern runtime GetTemplateObject(implicit context: Context)(
+ TemplateObjectDescription, SharedFunctionInfo, Smi): JSAny;
+}
+
+builtin GetTemplateObject(
+ context: Context, shared: SharedFunctionInfo,
+ description: TemplateObjectDescription, slot: uintptr,
+ maybeFeedbackVector: FeedbackVector|Undefined): JSArray {
+ // TODO(jgruber): Consider merging with the GetTemplateObject bytecode
+ // handler; the current advantage of the split implementation is that the
+ // bytecode can skip most work if feedback exists.
+
+ try {
+ const vector =
+ Cast<FeedbackVector>(maybeFeedbackVector) otherwise CallRuntime;
+ return Cast<JSArray>(ic::LoadFeedbackVectorSlot(vector, slot))
+ otherwise CallRuntime;
+ } label CallRuntime deferred {
+ const result = UnsafeCast<JSArray>(runtime::GetTemplateObject(
+ description, shared, Convert<Smi>(Signed(slot))));
+ const vector =
+ Cast<FeedbackVector>(maybeFeedbackVector) otherwise return result;
+ ic::StoreFeedbackVectorSlot(vector, slot, result);
+ return result;
+ }
+}
+
+} // namespace internal
diff --git a/chromium/v8/src/builtins/math.tq b/chromium/v8/src/builtins/math.tq
index 0586f432f5b..50bd3e2201b 100644
--- a/chromium/v8/src/builtins/math.tq
+++ b/chromium/v8/src/builtins/math.tq
@@ -4,9 +4,6 @@
namespace math {
-extern transitioning builtin
-NonNumberToNumber(implicit context: Context)(HeapObject): Number;
-
transitioning macro ReduceToSmiOrFloat64(implicit context: Context)(x: JSAny):
never
labels SmiResult(Smi), Float64Result(float64) {
@@ -20,7 +17,7 @@ transitioning macro ReduceToSmiOrFloat64(implicit context: Context)(x: JSAny):
goto Float64Result(Convert<float64>(h));
}
case (a: JSAnyNotNumber): {
- x1 = NonNumberToNumber(a);
+ x1 = conversion::NonNumberToNumber(a);
}
}
}
@@ -29,6 +26,7 @@ transitioning macro ReduceToSmiOrFloat64(implicit context: Context)(x: JSAny):
// ES6 #sec-math.abs
extern macro IsIntPtrAbsWithOverflowSupported(): constexpr bool;
+extern macro TrySmiAdd(Smi, Smi): Smi labels Overflow;
extern macro TrySmiSub(Smi, Smi): Smi labels Overflow;
extern macro TrySmiAbs(Smi): Smi labels Overflow;
extern macro Float64Abs(float64): float64;
diff --git a/chromium/v8/src/builtins/number.tq b/chromium/v8/src/builtins/number.tq
index 98680cf5533..753998424fa 100644
--- a/chromium/v8/src/builtins/number.tq
+++ b/chromium/v8/src/builtins/number.tq
@@ -2,9 +2,48 @@
// source code is governed by a BSD-style license that can be found in the
// LICENSE file.
+#include 'src/ic/binary-op-assembler.h'
+
+extern enum Operation extends uint31 {
+ // Binary operations.
+ kAdd,
+ kSubtract,
+ kMultiply,
+ kDivide,
+ kModulus,
+ kExponentiate,
+ kBitwiseAnd,
+ kBitwiseOr,
+ kBitwiseXor,
+ kShiftLeft,
+ kShiftRight,
+ kShiftRightLogical,
+ // Unary operations.
+ kBitwiseNot,
+ kNegate,
+ kIncrement,
+ kDecrement,
+ // Compare operations.
+ kEqual,
+ kStrictEqual,
+ kLessThan,
+ kLessThanOrEqual,
+ kGreaterThan,
+ kGreaterThanOrEqual
+}
+
namespace runtime {
extern transitioning runtime
DoubleToStringWithRadix(implicit context: Context)(Number, Number): String;
+
+extern transitioning runtime StringParseFloat(implicit context: Context)(
+ String): Number;
+extern transitioning runtime StringParseInt(implicit context: Context)(
+ JSAny, JSAny): Number;
+
+extern runtime BigIntUnaryOp(Context, BigInt, SmiTagged<Operation>): BigInt;
+extern runtime BigIntBinaryOp(
+ Context, Numeric, Numeric, SmiTagged<Operation>): BigInt;
} // namespace runtime
namespace number {
@@ -60,14 +99,649 @@ transitioning javascript builtin NumberPrototypeToString(
if (x == -0) {
return ZeroStringConstant();
- } else if (NumberIsNaN(x)) {
+ } else if (::NumberIsNaN(x)) {
return NaNStringConstant();
} else if (x == V8_INFINITY) {
return InfinityStringConstant();
} else if (x == MINUS_V8_INFINITY) {
return MinusInfinityStringConstant();
}
-
return runtime::DoubleToStringWithRadix(x, radixNumber);
}
+
+// ES6 #sec-number.isfinite
+javascript builtin NumberIsFinite(
+ js-implicit context: NativeContext,
+ receiver: JSAny)(value: JSAny): Boolean {
+ typeswitch (value) {
+ case (Smi): {
+ return True;
+ }
+ case (h: HeapNumber): {
+ const number: float64 = Convert<float64>(h);
+ const infiniteOrNaN: bool = Float64IsNaN(number - number);
+ return Convert<Boolean>(!infiniteOrNaN);
+ }
+ case (JSAnyNotNumber): {
+ return False;
+ }
+ }
+}
+
+// ES6 #sec-number.isinteger
+javascript builtin NumberIsInteger(js-implicit context: NativeContext)(
+ value: JSAny): Boolean {
+ return SelectBooleanConstant(IsInteger(value));
+}
+
+// ES6 #sec-number.isnan
+javascript builtin NumberIsNaN(js-implicit context: NativeContext)(
+ value: JSAny): Boolean {
+ typeswitch (value) {
+ case (Smi): {
+ return False;
+ }
+ case (h: HeapNumber): {
+ const number: float64 = Convert<float64>(h);
+ return Convert<Boolean>(Float64IsNaN(number));
+ }
+ case (JSAnyNotNumber): {
+ return False;
+ }
+ }
+}
+
+// ES6 #sec-number.issafeinteger
+javascript builtin NumberIsSafeInteger(js-implicit context: NativeContext)(
+ value: JSAny): Boolean {
+ return SelectBooleanConstant(IsSafeInteger(value));
+}
+
+// ES6 #sec-number.prototype.valueof
+transitioning javascript builtin NumberPrototypeValueOf(
+ js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
+ return ToThisValue(
+ receiver, PrimitiveType::kNumber, 'Number.prototype.valueOf');
+}
+
+// ES6 #sec-number.parsefloat
+transitioning javascript builtin NumberParseFloat(
+ js-implicit context: NativeContext)(value: JSAny): Number {
+ try {
+ typeswitch (value) {
+ case (s: Smi): {
+ return s;
+ }
+ case (h: HeapNumber): {
+ // The input is already a Number. Take care of -0.
+ // The sense of comparison is important for the NaN case.
+ return (Convert<float64>(h) == 0) ? SmiConstant(0) : h;
+ }
+ case (s: String): {
+ goto String(s);
+ }
+ case (HeapObject): {
+ goto String(string::ToString(context, value));
+ }
+ }
+ } label String(s: String) {
+ // Check if the string is a cached array index.
+ const hash: NameHash = s.hash_field;
+ if (!hash.is_not_integer_index_mask &&
+ hash.array_index_length < kMaxCachedArrayIndexLength) {
+ const arrayIndex: uint32 = hash.array_index_value;
+ return SmiFromUint32(arrayIndex);
+ }
+ // Fall back to the runtime to convert string to a number.
+ return runtime::StringParseFloat(s);
+ }
+}
+
+extern macro TruncateFloat64ToWord32(float64): uint32;
+
+transitioning builtin ParseInt(implicit context: Context)(
+ input: JSAny, radix: JSAny): Number {
+ try {
+ // Check if radix should be 10 (i.e. undefined, 0 or 10).
+ if (radix != Undefined && !TaggedEqual(radix, SmiConstant(10)) &&
+ !TaggedEqual(radix, SmiConstant(0))) {
+ goto CallRuntime;
+ }
+
+ typeswitch (input) {
+ case (s: Smi): {
+ return s;
+ }
+ case (h: HeapNumber): {
+ // Check if the input value is in Signed32 range.
+ const asFloat64: float64 = Convert<float64>(h);
+ const asInt32: int32 = Signed(TruncateFloat64ToWord32(asFloat64));
+ // The sense of comparison is important for the NaN case.
+ if (asFloat64 == ChangeInt32ToFloat64(asInt32)) goto Int32(asInt32);
+
+ // Check if the absolute value of input is in the [1,1<<31[ range. Call
+ // the runtime for the range [0,1[ because the result could be -0.
+ const kMaxAbsValue: float64 = 2147483648.0;
+ const absInput: float64 = math::Float64Abs(asFloat64);
+ if (absInput < kMaxAbsValue && absInput >= 1) goto Int32(asInt32);
+ goto CallRuntime;
+ }
+ case (s: String): {
+ goto String(s);
+ }
+ case (HeapObject): {
+ goto CallRuntime;
+ }
+ }
+ } label Int32(i: int32) {
+ return ChangeInt32ToTagged(i);
+ } label String(s: String) {
+ // Check if the string is a cached array index.
+ const hash: NameHash = s.hash_field;
+ if (!hash.is_not_integer_index_mask &&
+ hash.array_index_length < kMaxCachedArrayIndexLength) {
+ const arrayIndex: uint32 = hash.array_index_value;
+ return SmiFromUint32(arrayIndex);
+ }
+ // Fall back to the runtime.
+ goto CallRuntime;
+ } label CallRuntime {
+ tail runtime::StringParseInt(input, radix);
+ }
+}
+
+// ES6 #sec-number.parseint
+transitioning javascript builtin NumberParseInt(
+ js-implicit context: NativeContext)(value: JSAny, radix: JSAny): Number {
+ return ParseInt(value, radix);
+}
+
+extern builtin NonNumberToNumeric(implicit context: Context)(JSAny): Numeric;
+extern builtin BitwiseXor(implicit context: Context)(Number, Number): Number;
+extern builtin Subtract(implicit context: Context)(Number, Number): Number;
+extern builtin Add(implicit context: Context)(Number, Number): Number;
+extern builtin StringAddConvertLeft(implicit context: Context)(
+ JSAny, String): JSAny;
+extern builtin StringAddConvertRight(implicit context: Context)(
+ String, JSAny): JSAny;
+
+extern macro BitwiseOp(int32, int32, constexpr Operation): Number;
+extern macro RelationalComparison(
+ constexpr Operation, JSAny, JSAny, Context): Boolean;
+
+// TODO(bbudge) Use a simpler macro structure that doesn't loop when converting
+// non-numbers, if such a code sequence doesn't make the builtin bigger.
+
+transitioning macro ToNumericOrPrimitive(implicit context: Context)(
+ value: JSAny): JSAny {
+ typeswitch (value) {
+ case (v: JSReceiver): {
+ return NonPrimitiveToPrimitive_Default(context, v);
+ }
+ case (v: JSPrimitive): {
+ return NonNumberToNumeric(v);
+ }
+ }
+}
+
+transitioning builtin Add(implicit context: Context)(
+ leftArg: JSAny, rightArg: JSAny): JSAny {
+ let left: JSAny = leftArg;
+ let right: JSAny = rightArg;
+ try {
+ while (true) {
+ typeswitch (left) {
+ case (left: Smi): {
+ typeswitch (right) {
+ case (right: Smi): {
+ return math::TrySmiAdd(left, right) otherwise goto Float64s(
+ SmiToFloat64(left), SmiToFloat64(right));
+ }
+ case (right: HeapNumber): {
+ goto Float64s(SmiToFloat64(left), Convert<float64>(right));
+ }
+ case (right: BigInt): {
+ goto Numerics(left, right);
+ }
+ case (right: String): {
+ goto StringAddConvertLeft(left, right);
+ }
+ case (HeapObject): {
+ right = ToNumericOrPrimitive(right);
+ continue;
+ }
+ }
+ }
+ case (left: HeapNumber): {
+ typeswitch (right) {
+ case (right: Smi): {
+ goto Float64s(Convert<float64>(left), SmiToFloat64(right));
+ }
+ case (right: HeapNumber): {
+ goto Float64s(Convert<float64>(left), Convert<float64>(right));
+ }
+ case (right: BigInt): {
+ goto Numerics(left, right);
+ }
+ case (right: String): {
+ goto StringAddConvertLeft(left, right);
+ }
+ case (HeapObject): {
+ right = ToNumericOrPrimitive(right);
+ continue;
+ }
+ }
+ }
+ case (left: BigInt): {
+ typeswitch (right) {
+ case (right: Numeric): {
+ goto Numerics(left, right);
+ }
+ case (right: String): {
+ goto StringAddConvertLeft(left, right);
+ }
+ case (HeapObject): {
+ right = ToNumericOrPrimitive(right);
+ continue;
+ }
+ }
+ }
+ case (left: String): {
+ goto StringAddConvertRight(left, right);
+ }
+ case (leftReceiver: JSReceiver): {
+ left = ToPrimitiveDefault(leftReceiver);
+ }
+ case (HeapObject): {
+ // left: HeapObject
+ typeswitch (right) {
+ case (right: String): {
+ goto StringAddConvertLeft(left, right);
+ }
+ case (rightReceiver: JSReceiver): {
+ // left is JSPrimitive and right is JSReceiver, convert right
+ // with priority.
+ right = ToPrimitiveDefault(rightReceiver);
+ continue;
+ }
+ case (JSPrimitive): {
+ // Neither left or right is JSReceiver, convert left.
+ left = NonNumberToNumeric(left);
+ continue;
+ }
+ }
+ }
+ }
+ }
+ } label StringAddConvertLeft(left: JSAny, right: String) {
+ tail StringAddConvertLeft(left, right);
+ } label StringAddConvertRight(left: String, right: JSAny) {
+ tail StringAddConvertRight(left, right);
+ } label Numerics(left: Numeric, right: Numeric) {
+ tail bigint::BigIntAdd(left, right);
+ } label Float64s(left: float64, right: float64) {
+ return AllocateHeapNumberWithValue(left + right);
+ }
+ unreachable;
+}
+
+// Unary type switch on Number | BigInt.
+macro UnaryOp1(implicit context: Context)(value: JSAny): never labels
+Number(Number), BigInt(BigInt) {
+ let x: JSAny = value;
+ while (true) {
+ typeswitch (x) {
+ case (n: Number): {
+ goto Number(n);
+ }
+ case (b: BigInt): {
+ goto BigInt(b);
+ }
+ case (JSAnyNotNumeric): {
+ x = NonNumberToNumeric(x);
+ }
+ }
+ }
+ unreachable;
+}
+
+// Unary type switch on Smi | HeapNumber | BigInt.
+macro UnaryOp2(implicit context: Context)(value: JSAny): never labels
+Smi(Smi), HeapNumber(HeapNumber), BigInt(BigInt) {
+ let x: JSAny = value;
+ while (true) {
+ typeswitch (x) {
+ case (s: Smi): {
+ goto Smi(s);
+ }
+ case (h: HeapNumber): {
+ goto HeapNumber(h);
+ }
+ case (b: BigInt): {
+ goto BigInt(b);
+ }
+ case (JSAnyNotNumeric): {
+ x = NonNumberToNumeric(x);
+ }
+ }
+ }
+ unreachable;
+}
+
+// Binary type switch on Number | BigInt.
+macro BinaryOp1(implicit context: Context)(
+ leftVal: JSAny, rightVal: JSAny): never labels
+Number(Number, Number), AtLeastOneBigInt(Numeric, Numeric) {
+ let left: JSAny = leftVal;
+ let right: JSAny = rightVal;
+ while (true) {
+ try {
+ typeswitch (left) {
+ case (left: Number): {
+ typeswitch (right) {
+ case (right: Number): {
+ goto Number(left, right);
+ }
+ case (right: BigInt): {
+ goto AtLeastOneBigInt(left, right);
+ }
+ case (JSAnyNotNumeric): {
+ goto RightNotNumeric;
+ }
+ }
+ }
+ case (left: BigInt): {
+ typeswitch (right) {
+ case (right: Numeric): {
+ goto AtLeastOneBigInt(left, right);
+ }
+ case (JSAnyNotNumeric): {
+ goto RightNotNumeric;
+ }
+ }
+ }
+ case (JSAnyNotNumeric): {
+ left = NonNumberToNumeric(left);
+ }
+ }
+ } label RightNotNumeric {
+ right = NonNumberToNumeric(right);
+ }
+ }
+ unreachable;
+}
+
+// Binary type switch on Smi | HeapNumber | BigInt.
+macro BinaryOp2(implicit context: Context)(leftVal: JSAny, rightVal: JSAny):
+ never labels Smis(Smi, Smi), Float64s(float64, float64),
+ AtLeastOneBigInt(Numeric, Numeric) {
+ let left: JSAny = leftVal;
+ let right: JSAny = rightVal;
+ while (true) {
+ try {
+ typeswitch (left) {
+ case (left: Smi): {
+ typeswitch (right) {
+ case (right: Smi): {
+ goto Smis(left, right);
+ }
+ case (right: HeapNumber): {
+ goto Float64s(SmiToFloat64(left), Convert<float64>(right));
+ }
+ case (right: BigInt): {
+ goto AtLeastOneBigInt(left, right);
+ }
+ case (JSAnyNotNumeric): {
+ goto RightNotNumeric;
+ }
+ }
+ }
+ case (left: HeapNumber): {
+ typeswitch (right) {
+ case (right: Smi): {
+ goto Float64s(Convert<float64>(left), SmiToFloat64(right));
+ }
+ case (right: HeapNumber): {
+ goto Float64s(Convert<float64>(left), Convert<float64>(right));
+ }
+ case (right: BigInt): {
+ goto AtLeastOneBigInt(left, right);
+ }
+ case (JSAnyNotNumeric): {
+ goto RightNotNumeric;
+ }
+ }
+ }
+ case (left: BigInt): {
+ typeswitch (right) {
+ case (right: Numeric): {
+ goto AtLeastOneBigInt(left, right);
+ }
+ case (JSAnyNotNumeric): {
+ goto RightNotNumeric;
+ }
+ }
+ }
+ case (JSAnyNotNumeric): {
+ left = NonNumberToNumeric(left);
+ }
+ }
+ } label RightNotNumeric {
+ right = NonNumberToNumeric(right);
+ }
+ }
+ unreachable;
+}
+
+builtin Subtract(implicit context: Context)(
+ left: JSAny, right: JSAny): Numeric {
+ try {
+ BinaryOp2(left, right) otherwise Smis, Float64s, AtLeastOneBigInt;
+ } label Smis(left: Smi, right: Smi) {
+ try {
+ return math::TrySmiSub(left, right) otherwise Overflow;
+ } label Overflow {
+ goto Float64s(SmiToFloat64(left), SmiToFloat64(right));
+ }
+ } label Float64s(left: float64, right: float64) {
+ return AllocateHeapNumberWithValue(left - right);
+ } label AtLeastOneBigInt(left: Numeric, right: Numeric) {
+ tail bigint::BigIntSubtract(left, right);
+ }
+}
+
+builtin Multiply(implicit context: Context)(
+ left: JSAny, right: JSAny): Numeric {
+ try {
+ BinaryOp2(left, right) otherwise Smis, Float64s, AtLeastOneBigInt;
+ } label Smis(left: Smi, right: Smi) {
+ // The result is not necessarily a smi, in case of overflow.
+ return SmiMul(left, right);
+ } label Float64s(left: float64, right: float64) {
+ return AllocateHeapNumberWithValue(left * right);
+ } label AtLeastOneBigInt(left: Numeric, right: Numeric) {
+ tail runtime::BigIntBinaryOp(
+ context, left, right, SmiTag<Operation>(Operation::kMultiply));
+ }
+}
+
+const kSmiValueSize: constexpr int32 generates 'kSmiValueSize';
+const kMinInt32: constexpr int32 generates 'kMinInt';
+const kMinInt31: constexpr int32 generates 'kMinInt31';
+const kMinimumDividend: int32 = (kSmiValueSize == 32) ? kMinInt32 : kMinInt31;
+
+builtin Divide(implicit context: Context)(left: JSAny, right: JSAny): Numeric {
+ try {
+ BinaryOp2(left, right) otherwise Smis, Float64s, AtLeastOneBigInt;
+ } label Smis(left: Smi, right: Smi) {
+ // TODO(jkummerow): Consider just always doing a double division.
+ // Bail out if {divisor} is zero.
+ if (right == 0) goto SmiBailout(left, right);
+
+ // Bail out if dividend is zero and divisor is negative.
+ if (left == 0 && right < 0) goto SmiBailout(left, right);
+
+ const dividend: int32 = SmiToInt32(left);
+ const divisor: int32 = SmiToInt32(right);
+
+ // Bail out if dividend is kMinInt31 (or kMinInt32 if Smis are 32 bits)
+ // and divisor is -1.
+ if (divisor == -1 && dividend == kMinimumDividend) {
+ goto SmiBailout(left, right);
+ }
+ // TODO(epertoso): consider adding a machine instruction that returns
+ // both the result and the remainder.
+ const result: int32 = dividend / divisor;
+ const truncated: int32 = result * divisor;
+ if (dividend != truncated) goto SmiBailout(left, right);
+ return SmiFromInt32(result);
+ } label SmiBailout(left: Smi, right: Smi) {
+ goto Float64s(SmiToFloat64(left), SmiToFloat64(right));
+ } label Float64s(left: float64, right: float64) {
+ return AllocateHeapNumberWithValue(left / right);
+ } label AtLeastOneBigInt(left: Numeric, right: Numeric) {
+ tail runtime::BigIntBinaryOp(
+ context, left, right, SmiTag<Operation>(Operation::kDivide));
+ }
+}
+
+builtin Modulus(implicit context: Context)(left: JSAny, right: JSAny): Numeric {
+ try {
+ BinaryOp2(left, right) otherwise Smis, Float64s, AtLeastOneBigInt;
+ } label Smis(left: Smi, right: Smi) {
+ return SmiMod(left, right);
+ } label Float64s(left: float64, right: float64) {
+ return AllocateHeapNumberWithValue(left % right);
+ } label AtLeastOneBigInt(left: Numeric, right: Numeric) {
+ tail runtime::BigIntBinaryOp(
+ context, left, right, SmiTag<Operation>(Operation::kModulus));
+ }
+}
+
+builtin Exponentiate(implicit context: Context)(
+ left: JSAny, right: JSAny): Numeric {
+ try {
+ BinaryOp1(left, right) otherwise Numbers, AtLeastOneBigInt;
+ } label Numbers(left: Number, right: Number) {
+ return math::MathPowImpl(left, right);
+ } label AtLeastOneBigInt(left: Numeric, right: Numeric) {
+ tail runtime::BigIntBinaryOp(
+ context, left, right, SmiTag<Operation>(Operation::kExponentiate));
+ }
+}
+
+builtin Negate(implicit context: Context)(value: JSAny): Numeric {
+ try {
+ UnaryOp2(value) otherwise Smi, HeapNumber, BigInt;
+ } label Smi(s: Smi) {
+ return SmiMul(s, -1);
+ } label HeapNumber(h: HeapNumber) {
+ return AllocateHeapNumberWithValue(Convert<float64>(h) * -1);
+ } label BigInt(b: BigInt) {
+ tail runtime::BigIntUnaryOp(
+ context, b, SmiTag<Operation>(Operation::kNegate));
+ }
+}
+
+builtin BitwiseNot(implicit context: Context)(value: JSAny): Numeric {
+ try {
+ UnaryOp1(value) otherwise Number, BigInt;
+ } label Number(n: Number) {
+ tail BitwiseXor(n, -1);
+ } label BigInt(b: BigInt) {
+ return runtime::BigIntUnaryOp(
+ context, b, SmiTag<Operation>(Operation::kBitwiseNot));
+ }
+}
+
+builtin Decrement(implicit context: Context)(value: JSAny): Numeric {
+ try {
+ UnaryOp1(value) otherwise Number, BigInt;
+ } label Number(n: Number) {
+ tail Subtract(n, 1);
+ } label BigInt(b: BigInt) {
+ return runtime::BigIntUnaryOp(
+ context, b, SmiTag<Operation>(Operation::kDecrement));
+ }
+}
+
+builtin Increment(implicit context: Context)(value: JSAny): Numeric {
+ try {
+ UnaryOp1(value) otherwise Number, BigInt;
+ } label Number(n: Number) {
+ tail Add(n, 1);
+ } label BigInt(b: BigInt) {
+ return runtime::BigIntUnaryOp(
+ context, b, SmiTag<Operation>(Operation::kIncrement));
+ }
+}
+
+// Bitwise binary operations.
+
+extern macro BinaryOpAssembler::Generate_BitwiseBinaryOp(
+ constexpr Operation, JSAny, JSAny, Context): Object;
+
+builtin ShiftLeft(implicit context: Context)(
+ left: JSAny, right: JSAny): Object {
+ return Generate_BitwiseBinaryOp(Operation::kShiftLeft, left, right, context);
+}
+
+builtin ShiftRight(implicit context: Context)(
+ left: JSAny, right: JSAny): Object {
+ return Generate_BitwiseBinaryOp(Operation::kShiftRight, left, right, context);
+}
+
+builtin ShiftRightLogical(implicit context: Context)(
+ left: JSAny, right: JSAny): Object {
+ return Generate_BitwiseBinaryOp(
+ Operation::kShiftRightLogical, left, right, context);
+}
+
+builtin BitwiseAnd(implicit context: Context)(
+ left: JSAny, right: JSAny): Object {
+ return Generate_BitwiseBinaryOp(Operation::kBitwiseAnd, left, right, context);
+}
+
+builtin BitwiseOr(implicit context: Context)(
+ left: JSAny, right: JSAny): Object {
+ return Generate_BitwiseBinaryOp(Operation::kBitwiseOr, left, right, context);
}
+
+builtin BitwiseXor(implicit context: Context)(
+ left: JSAny, right: JSAny): Object {
+ return Generate_BitwiseBinaryOp(Operation::kBitwiseXor, left, right, context);
+}
+
+// Relational builtins.
+
+builtin LessThan(implicit context: Context)(left: JSAny, right: JSAny): Object {
+ return RelationalComparison(Operation::kLessThan, left, right, context);
+}
+
+builtin LessThanOrEqual(implicit context: Context)(
+ left: JSAny, right: JSAny): Object {
+ return RelationalComparison(
+ Operation::kLessThanOrEqual, left, right, context);
+}
+
+builtin GreaterThan(implicit context: Context)(
+ left: JSAny, right: JSAny): Object {
+ return RelationalComparison(Operation::kGreaterThan, left, right, context);
+}
+
+builtin GreaterThanOrEqual(implicit context: Context)(
+ left: JSAny, right: JSAny): Object {
+ return RelationalComparison(
+ Operation::kGreaterThanOrEqual, left, right, context);
+}
+
+builtin Equal(implicit context: Context)(left: JSAny, right: JSAny): Object {
+ return Equal(left, right, context);
+}
+
+builtin StrictEqual(implicit context: Context)(
+ left: JSAny, right: JSAny): Object {
+ return ::StrictEqual(left, right);
+}
+
+} // namespace number
diff --git a/chromium/v8/src/builtins/promise-abstract-operations.tq b/chromium/v8/src/builtins/promise-abstract-operations.tq
index 9cf6da102b8..83dd56aff49 100644
--- a/chromium/v8/src/builtins/promise-abstract-operations.tq
+++ b/chromium/v8/src/builtins/promise-abstract-operations.tq
@@ -24,6 +24,16 @@ PromiseRejectEventFromStack(implicit context: Context)(JSPromise, JSAny): JSAny;
// https://tc39.es/ecma262/#sec-promise-abstract-operations
namespace promise {
+
+extern macro PromiseForwardingHandlerSymbolConstant(): Symbol;
+const kPromiseForwardingHandlerSymbol: Symbol =
+ PromiseForwardingHandlerSymbolConstant();
+extern macro PromiseHandledBySymbolConstant(): Symbol;
+const kPromiseHandledBySymbol: Symbol = PromiseHandledBySymbolConstant();
+extern macro ResolveStringConstant(): String;
+const kResolveString: String = ResolveStringConstant();
+extern macro IsPromiseResolveProtectorCellInvalid(): bool;
+
extern macro AllocateFunctionWithMapAndContext(
Map, SharedFunctionInfo, Context): JSFunction;
@@ -503,6 +513,41 @@ PromiseGetCapabilitiesExecutor(
return Undefined;
}
+macro IsPromiseResolveLookupChainIntact(implicit context: Context)(
+ nativeContext: NativeContext, constructor: JSReceiver): bool {
+ if (IsForceSlowPath()) return false;
+ const promiseFun = UnsafeCast<JSFunction>(
+ nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX]);
+ return promiseFun == constructor && !IsPromiseResolveProtectorCellInvalid();
+}
+
+// https://tc39.es/ecma262/#sec-getpromiseresolve
+transitioning macro GetPromiseResolve(implicit context: Context)(
+ nativeContext: NativeContext, constructor: Constructor): JSAny {
+ // 1. Assert: IsConstructor(constructor) is true.
+
+ // We can skip the "resolve" lookup on {constructor} if it's the
+ // Promise constructor and the Promise.resolve protector is intact,
+ // as that guards the lookup path for the "resolve" property on the
+ // Promise constructor. In this case, promiseResolveFunction is undefined,
+ // and when CallResolve is called with it later, it will call Promise.resolve.
+ let promiseResolveFunction: JSAny = Undefined;
+
+ if (!IsPromiseResolveLookupChainIntact(nativeContext, constructor)) {
+ let promiseResolve: JSAny;
+
+ // 2. Let promiseResolve be ? Get(constructor, "resolve").
+ promiseResolve = GetProperty(constructor, kResolveString);
+
+ // 3. If IsCallable(promiseResolve) is false, throw a TypeError exception.
+ promiseResolveFunction =
+ Cast<Callable>(promiseResolve) otherwise ThrowTypeError(
+ MessageTemplate::kCalledNonCallable, 'resolve');
+ }
+ // 4. return promiseResolve.
+ return promiseResolveFunction;
+}
+
transitioning macro CallResolve(implicit context: Context)(
constructor: Constructor, resolve: JSAny, value: JSAny): JSAny {
// Undefined can never be a valid value for the resolve function,
diff --git a/chromium/v8/src/builtins/promise-all-element-closure.tq b/chromium/v8/src/builtins/promise-all-element-closure.tq
index 4dfafec1c92..55f722eb24d 100644
--- a/chromium/v8/src/builtins/promise-all-element-closure.tq
+++ b/chromium/v8/src/builtins/promise-all-element-closure.tq
@@ -66,7 +66,7 @@ extern enum PromiseAllResolveElementContextSlots extends int31
constexpr 'PromiseBuiltins::PromiseAllResolveElementContextSlots' {
kPromiseAllResolveElementRemainingSlot,
kPromiseAllResolveElementCapabilitySlot,
- kPromiseAllResolveElementValuesArraySlot,
+ kPromiseAllResolveElementValuesSlot,
kPromiseAllResolveElementLength
}
extern operator '[]=' macro StoreContextElement(
@@ -106,73 +106,43 @@ transitioning macro PromiseAllResolveElementClosure<F: type>(
assert(identityHash > 0);
const index = identityHash - 1;
- // Check if we need to grow the [[ValuesArray]] to store {value} at {index}.
- const valuesArray = UnsafeCast<JSArray>(
+ let remainingElementsCount =
+ UnsafeCast<Smi>(context[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementRemainingSlot]);
+
+ let values =
+ UnsafeCast<FixedArray>(context[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementValuesSlot]);
+ const newCapacity = index + 1;
+ if (newCapacity > values.length_intptr) deferred {
+ // This happens only when the promises are resolved during iteration.
+ values = ExtractFixedArray(values, 0, values.length_intptr, newCapacity);
context[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementValuesArraySlot]);
- const elements = UnsafeCast<FixedArray>(valuesArray.elements);
- const valuesLength = Convert<intptr>(valuesArray.length);
- if (index < valuesLength) {
- // The {index} is in bounds of the {values_array}, check if this element has
- // already been resolved, and store the {value} if not.
- //
- // Promise.allSettled, for each input element, has both a resolve and a
- // reject closure that share an [[AlreadyCalled]] boolean. That is, the
- // input element can only be settled once: after resolve is called, reject
- // returns early, and vice versa. Using {function}'s context as the marker
- // only tracks per-closure instead of per-element. When the second
- // resolve/reject closure is called on the same index, values.object[index]
- // will already exist and will not be the hole value. In that case, return
- // early. Everything up to this point is not yet observable to user code.
- // This is not a problem for Promise.all since Promise.all has a single
- // resolve closure (no reject) per element.
- if (hasResolveAndRejectClosures) {
- if (elements.objects[index] != TheHole) deferred {
- return Undefined;
- }
+ kPromiseAllResolveElementValuesSlot] = values;
}
- // Update the value depending on whether Promise.all or
- // Promise.allSettled is called.
- const updatedValue = wrapResultFunctor.Call(nativeContext, value);
- elements.objects[index] = updatedValue;
- } else {
- // Check if we need to grow the backing store.
- //
- // There's no need to check if this element has already been resolved for
- // Promise.allSettled if {values_array} has not yet grown to the index.
- const newLength = index + 1;
- const elementsLength = elements.length_intptr;
-
- // Update the value depending on whether Promise.all or
- // Promise.allSettled is called.
- const updatedValue = wrapResultFunctor.Call(nativeContext, value);
-
- if (index < elementsLength) {
- // The {index} is within bounds of the {elements} backing store, so
- // just store the {value} and update the "length" of the {values_array}.
- valuesArray.length = Convert<Smi>(newLength);
- elements.objects[index] = updatedValue;
- } else
- deferred {
- // We need to grow the backing store to fit the {index} as well.
- const newElementsLength = IntPtrMin(
- CalculateNewElementsCapacity(newLength),
- kPropertyArrayHashFieldMax + 1);
- assert(index < newElementsLength);
- assert(elementsLength < newElementsLength);
- const newElements =
- ExtractFixedArray(elements, 0, elementsLength, newElementsLength);
- newElements.objects[index] = updatedValue;
-
- // Update backing store and "length" on {values_array}.
- valuesArray.elements = newElements;
- valuesArray.length = Convert<Smi>(newLength);
+ // Promise.allSettled, for each input element, has both a resolve and a reject
+ // closure that share an [[AlreadyCalled]] boolean. That is, the input element
+ // can only be settled once: after resolve is called, reject returns early,
+ // and vice versa. Using {function}'s context as the marker only tracks
+ // per-closure instead of per-element. When the second resolve/reject closure
+ // is called on the same index, values.object[index] will already exist and
+ // will not be the hole value. In that case, return early. Everything up to
+ // this point is not yet observable to user code. This is not a problem for
+ // Promise.all since Promise.all has a single resolve closure (no reject) per
+ // element.
+ if (hasResolveAndRejectClosures) {
+ if (values.objects[index] != TheHole) deferred {
+ return Undefined;
}
}
- let remainingElementsCount =
- UnsafeCast<Smi>(context[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementRemainingSlot]);
+
+ // Update the value depending on whether Promise.all or
+ // Promise.allSettled is called.
+ const updatedValue = wrapResultFunctor.Call(nativeContext, value);
+
+ values.objects[index] = updatedValue;
+
remainingElementsCount = remainingElementsCount - 1;
context[PromiseAllResolveElementContextSlots::
kPromiseAllResolveElementRemainingSlot] = remainingElementsCount;
@@ -181,6 +151,9 @@ transitioning macro PromiseAllResolveElementClosure<F: type>(
context[PromiseAllResolveElementContextSlots::
kPromiseAllResolveElementCapabilitySlot]);
const resolve = UnsafeCast<JSAny>(capability.resolve);
+ const arrayMap = UnsafeCast<Map>(
+ nativeContext[NativeContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX]);
+ const valuesArray = NewJSArray(arrayMap, values);
Call(context, resolve, Undefined, valuesArray);
}
return Undefined;
diff --git a/chromium/v8/src/builtins/promise-all.tq b/chromium/v8/src/builtins/promise-all.tq
index b7fad88f6fc..302d4f3251c 100644
--- a/chromium/v8/src/builtins/promise-all.tq
+++ b/chromium/v8/src/builtins/promise-all.tq
@@ -18,12 +18,6 @@ const kPromiseBuiltinsPromiseContextLength: constexpr int31
// case to mark it's done).
macro CreatePromiseAllResolveElementContext(implicit context: Context)(
capability: PromiseCapability, nativeContext: NativeContext): Context {
- // TODO(bmeurer): Manually fold this into a single allocation.
- const arrayMap = UnsafeCast<Map>(
- nativeContext[NativeContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX]);
- const valuesArray = AllocateJSArray(
- ElementsKind::PACKED_ELEMENTS, arrayMap, IntPtrConstant(0),
- SmiConstant(0));
const resolveContext = AllocateSyntheticFunctionContext(
nativeContext,
PromiseAllResolveElementContextSlots::kPromiseAllResolveElementLength);
@@ -32,7 +26,7 @@ macro CreatePromiseAllResolveElementContext(implicit context: Context)(
resolveContext[PromiseAllResolveElementContextSlots::
kPromiseAllResolveElementCapabilitySlot] = capability;
resolveContext[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementValuesArraySlot] = valuesArray;
+ kPromiseAllResolveElementValuesSlot] = kEmptyFixedArray;
return resolveContext;
}
@@ -115,11 +109,11 @@ struct PromiseAllSettledRejectElementFunctor {
transitioning macro PerformPromiseAll<F1: type, F2: type>(
implicit context: Context)(
- constructor: JSReceiver, capability: PromiseCapability,
- iter: iterator::IteratorRecord, createResolveElementFunctor: F1,
+ nativeContext: NativeContext, iter: iterator::IteratorRecord,
+ constructor: Constructor, capability: PromiseCapability,
+ promiseResolveFunction: JSAny, createResolveElementFunctor: F1,
createRejectElementFunctor: F2): JSAny labels
Reject(Object) {
- const nativeContext = LoadNativeContext(context);
const promise = capability.promise;
const resolve = capability.resolve;
const reject = capability.reject;
@@ -135,141 +129,119 @@ Reject(Object) {
let index: Smi = 1;
- // We can skip the "resolve" lookup on {constructor} if it's the
- // Promise constructor and the Promise.resolve protector is intact,
- // as that guards the lookup path for the "resolve" property on the
- // Promise constructor.
- let promiseResolveFunction: JSAny = Undefined;
try {
- try {
- if (!IsPromiseResolveLookupChainIntact(nativeContext, constructor)) {
- let promiseResolve: JSAny;
-
- // 5. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`).
- promiseResolve = GetProperty(constructor, kResolveString);
-
- // 6. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError*
- // exception.
- promiseResolveFunction =
- Cast<Callable>(promiseResolve) otherwise ThrowTypeError(
- MessageTemplate::kCalledNonCallable, 'resolve');
+ const fastIteratorResultMap = UnsafeCast<Map>(
+ nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]);
+ while (true) {
+ let nextValue: JSAny;
+ try {
+ // Let next be IteratorStep(iteratorRecord.[[Iterator]]).
+ // If next is an abrupt completion, set iteratorRecord.[[Done]] to
+ // true. ReturnIfAbrupt(next).
+ const next: JSReceiver = iterator::IteratorStep(
+ iter, fastIteratorResultMap) otherwise goto Done;
+
+ // Let nextValue be IteratorValue(next).
+ // If nextValue is an abrupt completion, set iteratorRecord.[[Done]]
+ // to true.
+ // ReturnIfAbrupt(nextValue).
+ nextValue = iterator::IteratorValue(next, fastIteratorResultMap);
+ } catch (e) {
+ goto Reject(e);
}
- const fastIteratorResultMap = UnsafeCast<Map>(
- nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]);
- while (true) {
- let nextValue: JSAny;
- try {
- // Let next be IteratorStep(iteratorRecord.[[Iterator]]).
- // If next is an abrupt completion, set iteratorRecord.[[Done]] to
- // true. ReturnIfAbrupt(next).
- const next: JSReceiver = iterator::IteratorStep(
- iter, fastIteratorResultMap) otherwise goto Done;
-
- // Let nextValue be IteratorValue(next).
- // If nextValue is an abrupt completion, set iteratorRecord.[[Done]]
- // to true.
- // ReturnIfAbrupt(nextValue).
- nextValue = iterator::IteratorValue(next, fastIteratorResultMap);
- } catch (e) {
- goto Reject(e);
- }
-
- // Check if we reached the limit.
- if (index == kPropertyArrayHashFieldMax) {
- // If there are too many elements (currently more than 2**21-1),
- // raise a RangeError here (which is caught below and turned into
- // a rejection of the resulting promise). We could gracefully handle
- // this case as well and support more than this number of elements
- // by going to a separate function and pass the larger indices via a
- // separate context, but it doesn't seem likely that we need this,
- // and it's unclear how the rest of the system deals with 2**21 live
- // Promises anyway.
- ThrowRangeError(
- MessageTemplate::kTooManyElementsInPromiseCombinator, 'all');
- }
-
- // Set remainingElementsCount.[[Value]] to
- // remainingElementsCount.[[Value]] + 1.
- const remainingElementsCount = UnsafeCast<Smi>(
- resolveElementContext[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementRemainingSlot]);
- resolveElementContext[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementRemainingSlot] =
- remainingElementsCount + 1;
-
- // Let resolveElement be CreateBuiltinFunction(steps,
- // « [[AlreadyCalled]],
- // [[Index]],
- // [[Values]],
- // [[Capability]],
- // [[RemainingElements]]
- // »).
- // Set resolveElement.[[AlreadyCalled]] to a Record { [[Value]]: false
- // }. Set resolveElement.[[Index]] to index. Set
- // resolveElement.[[Values]] to values. Set
- // resolveElement.[[Capability]] to resultCapability. Set
- // resolveElement.[[RemainingElements]] to remainingElementsCount.
- const resolveElementFun = createResolveElementFunctor.Call(
- resolveElementContext, nativeContext, index, capability);
- const rejectElementFun = createRejectElementFunctor.Call(
- resolveElementContext, nativeContext, index, capability);
-
- // We can skip the "resolve" lookup on the {constructor} as well as
- // the "then" lookup on the result of the "resolve" call, and
- // immediately chain continuation onto the {next_value} if:
- //
- // (a) The {constructor} is the intrinsic %Promise% function, and
- // looking up "resolve" on {constructor} yields the initial
- // Promise.resolve() builtin, and
- // (b) the promise @@species protector cell is valid, meaning that
- // no one messed with the Symbol.species property on any
- // intrinsic promise or on the Promise.prototype, and
- // (c) the {next_value} is a JSPromise whose [[Prototype]] field
- // contains the intrinsic %PromisePrototype%, and
- // (d) we're not running with async_hooks or DevTools enabled.
- //
- // In that case we also don't need to allocate a chained promise for
- // the PromiseReaction (aka we can pass undefined to
- // PerformPromiseThen), since this is only necessary for DevTools and
- // PromiseHooks.
- if (promiseResolveFunction != Undefined ||
- IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
- IsPromiseSpeciesProtectorCellInvalid() || Is<Smi>(nextValue) ||
- !IsPromiseThenLookupChainIntact(
- nativeContext, UnsafeCast<HeapObject>(nextValue).map)) {
- // Let nextPromise be ? Call(constructor, _promiseResolve_, «
- // nextValue »).
- const nextPromise = CallResolve(
- UnsafeCast<Constructor>(constructor), promiseResolveFunction,
- nextValue);
-
- // Perform ? Invoke(nextPromise, "then", « resolveElement,
- // resultCapability.[[Reject]] »).
- const then = GetProperty(nextPromise, kThenString);
- const thenResult = Call(
- nativeContext, then, nextPromise, resolveElementFun,
- rejectElementFun);
-
- // For catch prediction, mark that rejections here are
- // semantically handled by the combined Promise.
- if (IsDebugActive() && Is<JSPromise>(thenResult)) deferred {
- SetPropertyStrict(
- context, thenResult, kPromiseHandledBySymbol, promise);
- }
- } else {
- PerformPromiseThenImpl(
- UnsafeCast<JSPromise>(nextValue), resolveElementFun,
- rejectElementFun, Undefined);
- }
-
- // Set index to index + 1.
- index += 1;
+ // Check if we reached the limit.
+ if (index == kPropertyArrayHashFieldMax) {
+ // If there are too many elements (currently more than 2**21-1),
+ // raise a RangeError here (which is caught below and turned into
+ // a rejection of the resulting promise). We could gracefully handle
+ // this case as well and support more than this number of elements
+ // by going to a separate function and pass the larger indices via a
+ // separate context, but it doesn't seem likely that we need this,
+ // and it's unclear how the rest of the system deals with 2**21 live
+ // Promises anyway.
+ ThrowRangeError(
+ MessageTemplate::kTooManyElementsInPromiseCombinator, 'all');
}
- } catch (e) deferred {
- iterator::IteratorCloseOnException(iter);
- goto Reject(e);
+
+ // Set remainingElementsCount.[[Value]] to
+ // remainingElementsCount.[[Value]] + 1.
+ const remainingElementsCount = UnsafeCast<Smi>(
+ resolveElementContext[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementRemainingSlot]);
+ resolveElementContext[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementRemainingSlot] =
+ remainingElementsCount + 1;
+
+ // Let resolveElement be CreateBuiltinFunction(steps,
+ // « [[AlreadyCalled]],
+ // [[Index]],
+ // [[Values]],
+ // [[Capability]],
+ // [[RemainingElements]]
+ // »).
+ // Set resolveElement.[[AlreadyCalled]] to a Record { [[Value]]: false
+ // }. Set resolveElement.[[Index]] to index. Set
+ // resolveElement.[[Values]] to values. Set
+ // resolveElement.[[Capability]] to resultCapability. Set
+ // resolveElement.[[RemainingElements]] to remainingElementsCount.
+ const resolveElementFun = createResolveElementFunctor.Call(
+ resolveElementContext, nativeContext, index, capability);
+ const rejectElementFun = createRejectElementFunctor.Call(
+ resolveElementContext, nativeContext, index, capability);
+
+ // We can skip the "then" lookup on the result of the "resolve" call and
+ // immediately chain the continuation onto the {next_value} if:
+ //
+ // (a) The {constructor} is the intrinsic %Promise% function, and
+ // looking up "resolve" on {constructor} yields the initial
+ // Promise.resolve() builtin, and
+ // (b) the promise @@species protector cell is valid, meaning that
+ // no one messed with the Symbol.species property on any
+ // intrinsic promise or on the Promise.prototype, and
+ // (c) the {next_value} is a JSPromise whose [[Prototype]] field
+ // contains the intrinsic %PromisePrototype%, and
+ // (d) we're not running with async_hooks or DevTools enabled.
+ //
+ // In that case we also don't need to allocate a chained promise for
+ // the PromiseReaction (aka we can pass undefined to
+ // PerformPromiseThen), since this is only necessary for DevTools and
+ // PromiseHooks.
+ if (promiseResolveFunction != Undefined ||
+ IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
+ IsPromiseSpeciesProtectorCellInvalid() || Is<Smi>(nextValue) ||
+ !IsPromiseThenLookupChainIntact(
+ nativeContext, UnsafeCast<HeapObject>(nextValue).map)) {
+ // Let nextPromise be ? Call(constructor, _promiseResolve_, «
+ // nextValue »).
+ const nextPromise =
+ CallResolve(constructor, promiseResolveFunction, nextValue);
+
+ // Perform ? Invoke(nextPromise, "then", « resolveElement,
+ // resultCapability.[[Reject]] »).
+ const then = GetProperty(nextPromise, kThenString);
+ const thenResult = Call(
+ nativeContext, then, nextPromise, resolveElementFun,
+ rejectElementFun);
+
+ // For catch prediction, mark that rejections here are
+ // semantically handled by the combined Promise.
+ if (IsDebugActive() && Is<JSPromise>(thenResult)) deferred {
+ SetPropertyStrict(
+ context, thenResult, kPromiseHandledBySymbol, promise);
+ }
+ } else {
+ PerformPromiseThenImpl(
+ UnsafeCast<JSPromise>(nextValue), resolveElementFun,
+ rejectElementFun, Undefined);
+ }
+
+ // Set index to index + 1.
+ index += 1;
}
+ } catch (e) deferred {
+ iterator::IteratorCloseOnException(iter);
+ goto Reject(e);
} label Done {}
// Set iteratorRecord.[[Done]] to true.
@@ -283,30 +255,36 @@ Reject(Object) {
kPromiseAllResolveElementRemainingSlot] =
remainingElementsCount;
if (remainingElementsCount > 0) {
- // Pre-allocate the backing store for the {values_array} to the desired
- // capacity here. We may already have elements here in case of some
- // fancy Thenable that calls the resolve callback immediately, so we need
- // to handle that correctly here.
- const valuesArray = UnsafeCast<JSArray>(
+ // Pre-allocate the backing store for the {values} to the desired
+ // capacity. We may already have elements in "values" - this happens
+ // when the Thenable calls the resolve callback immediately.
+ let values = UnsafeCast<FixedArray>(
resolveElementContext[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementValuesArraySlot]);
- const oldElements = UnsafeCast<FixedArray>(valuesArray.elements);
- const oldCapacity = oldElements.length_intptr;
- const newCapacity = SmiUntag(index);
+ kPromiseAllResolveElementValuesSlot]);
+ // 'index' is a 1-based index and incremented after every Promise. Later we
+ // use 'values' as a 0-based array, so capacity 'index - 1' is enough.
+ const newCapacity = SmiUntag(index) - 1;
+
+ const oldCapacity = values.length_intptr;
if (oldCapacity < newCapacity) {
- valuesArray.elements =
- ExtractFixedArray(oldElements, 0, oldCapacity, newCapacity);
+ values = ExtractFixedArray(values, 0, oldCapacity, newCapacity);
+ resolveElementContext[PromiseAllResolveElementContextSlots::
+ kPromiseAllResolveElementValuesSlot] = values;
}
} else
deferred {
+ assert(remainingElementsCount == 0);
// If remainingElementsCount.[[Value]] is 0, then
// Let valuesArray be CreateArrayFromList(values).
// Perform ? Call(resultCapability.[[Resolve]], undefined,
// « valuesArray »).
- assert(remainingElementsCount == 0);
- const valuesArray = UnsafeCast<JSAny>(
+
+ const values = UnsafeCast<FixedArray>(
resolveElementContext[PromiseAllResolveElementContextSlots::
- kPromiseAllResolveElementValuesArraySlot]);
+ kPromiseAllResolveElementValuesSlot]);
+ const arrayMap = UnsafeCast<Map>(
+ nativeContext[NativeContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX]);
+ const valuesArray = NewJSArray(arrayMap, values);
Call(nativeContext, UnsafeCast<JSAny>(resolve), Undefined, valuesArray);
}
@@ -318,6 +296,7 @@ transitioning macro GeneratePromiseAll<F1: type, F2: type>(
implicit context: Context)(
receiver: JSAny, iterable: JSAny, createResolveElementFunctor: F1,
createRejectElementFunctor: F2): JSAny {
+ const nativeContext = LoadNativeContext(context);
// Let C be the this value.
// If Type(C) is not Object, throw a TypeError exception.
const receiver = Cast<JSReceiver>(receiver)
@@ -328,7 +307,16 @@ transitioning macro GeneratePromiseAll<F1: type, F2: type>(
// not trigger redundant ExceptionEvents
const capability = NewPromiseCapability(receiver, False);
+ // NewPromiseCapability guarantees that receiver is Constructor.
+ assert(Is<Constructor>(receiver));
+ const constructor = UnsafeCast<Constructor>(receiver);
+
try {
+ // Let promiseResolve be GetPromiseResolve(C).
+ // IfAbruptRejectPromise(promiseResolve, promiseCapability).
+ const promiseResolveFunction =
+ GetPromiseResolve(nativeContext, constructor);
+
// Let iterator be GetIterator(iterable).
// IfAbruptRejectPromise(iterator, promiseCapability).
let i = iterator::GetIterator(iterable);
@@ -339,8 +327,9 @@ transitioning macro GeneratePromiseAll<F1: type, F2: type>(
// IteratorClose(iterator, result).
// IfAbruptRejectPromise(result, promiseCapability).
return PerformPromiseAll(
- receiver, capability, i, createResolveElementFunctor,
- createRejectElementFunctor) otherwise Reject;
+ nativeContext, i, constructor, capability, promiseResolveFunction,
+ createResolveElementFunctor, createRejectElementFunctor)
+ otherwise Reject;
} catch (e) deferred {
goto Reject(e);
} label Reject(e: Object) deferred {
diff --git a/chromium/v8/src/builtins/promise-any.tq b/chromium/v8/src/builtins/promise-any.tq
index 1046ed0a89c..2fefdf4baad 100644
--- a/chromium/v8/src/builtins/promise-any.tq
+++ b/chromium/v8/src/builtins/promise-any.tq
@@ -9,7 +9,7 @@ extern enum PromiseAnyRejectElementContextSlots extends int31
constexpr 'PromiseBuiltins::PromiseAnyRejectElementContextSlots' {
kPromiseAnyRejectElementRemainingSlot,
kPromiseAnyRejectElementCapabilitySlot,
- kPromiseAnyRejectElementErrorsArraySlot,
+ kPromiseAnyRejectElementErrorsSlot,
kPromiseAnyRejectElementLength
}
@@ -35,9 +35,8 @@ transitioning macro CreatePromiseAnyRejectElementContext(
kPromiseAnyRejectElementRemainingSlot] = SmiConstant(1);
rejectContext[PromiseAnyRejectElementContextSlots::
kPromiseAnyRejectElementCapabilitySlot] = capability;
- // Will be set later.
rejectContext[PromiseAnyRejectElementContextSlots::
- kPromiseAnyRejectElementErrorsArraySlot] = Undefined;
+ kPromiseAnyRejectElementErrorsSlot] = kEmptyFixedArray;
return rejectContext;
}
@@ -92,17 +91,9 @@ PromiseAnyRejectElementClosure(
const index = identityHash - 1;
// 6. Let errors be F.[[Errors]].
- if (context[PromiseAnyRejectElementContextSlots::
- kPromiseAnyRejectElementErrorsArraySlot] == Undefined) {
- // We're going to reject the Promise with a more fundamental error (e.g.,
- // something went wrong with iterating the Promises). We don't need to
- // construct the "errors" array.
- return Undefined;
- }
-
- const errorsArray = UnsafeCast<FixedArray>(
- context[PromiseAnyRejectElementContextSlots::
- kPromiseAnyRejectElementErrorsArraySlot]);
+ let errors =
+ UnsafeCast<FixedArray>(context[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementErrorsSlot]);
// 7. Let promiseCapability be F.[[Capability]].
@@ -110,8 +101,15 @@ PromiseAnyRejectElementClosure(
let remainingElementsCount =
UnsafeCast<Smi>(context[PromiseAnyRejectElementContextSlots::
kPromiseAnyRejectElementRemainingSlot]);
+
// 9. Set errors[index] to x.
- errorsArray.objects[index] = value;
+ const newCapacity = IntPtrMax(SmiUntag(remainingElementsCount), index + 1);
+ if (newCapacity > errors.length_intptr) deferred {
+ errors = ExtractFixedArray(errors, 0, errors.length_intptr, newCapacity);
+ context[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementErrorsSlot] = errors;
+ }
+ errors.objects[index] = value;
// 10. Set remainingElementsCount.[[Value]] to
// remainingElementsCount.[[Value]] - 1.
@@ -124,7 +122,7 @@ PromiseAnyRejectElementClosure(
// a. Let error be a newly created AggregateError object.
// b. Set error.[[AggregateErrors]] to errors.
- const error = ConstructAggregateError(errorsArray);
+ const error = ConstructAggregateError(errors);
// c. Return ? Call(promiseCapability.[[Reject]], undefined, « error »).
const capability = UnsafeCast<PromiseCapability>(
context[PromiseAnyRejectElementContextSlots::
@@ -137,16 +135,15 @@ PromiseAnyRejectElementClosure(
}
transitioning macro PerformPromiseAny(implicit context: Context)(
- iteratorRecord: iterator::IteratorRecord, constructor: Constructor,
- resultCapability: PromiseCapability): JSAny labels
+ nativeContext: NativeContext, iteratorRecord: iterator::IteratorRecord,
+ constructor: Constructor, resultCapability: PromiseCapability,
+ promiseResolveFunction: JSAny): JSAny labels
Reject(Object) {
// 1. Assert: ! IsConstructor(constructor) is true.
// 2. Assert: resultCapability is a PromiseCapability Record.
- const nativeContext = LoadNativeContext(context);
-
- // 3. Let errors be a new empty List.
- let growableErrorsArray = growable_fixed_array::NewGrowableFixedArray();
+ // 3. Let errors be a new empty List. (Do nothing: errors is
+ // initialized lazily when the first Promise rejects.)
// 4. Let remainingElementsCount be a new Record { [[Value]]: 1 }.
const rejectElementContext =
@@ -157,21 +154,6 @@ Reject(Object) {
let index: Smi = 1;
try {
- // We can skip the "resolve" lookup on {constructor} if it's the
- // Promise constructor and the Promise.resolve protector is intact,
- // as that guards the lookup path for the "resolve" property on the
- // Promise constructor.
- let promiseResolveFunction: JSAny = Undefined;
- if (!IsPromiseResolveLookupChainIntact(nativeContext, constructor))
- deferred {
- // 6. Let promiseResolve be ? Get(constructor, `"resolve"`).
- const promiseResolve = GetProperty(constructor, kResolveString);
- // 7. If IsCallable(promiseResolve) is false, throw a
- // TypeError exception.
- promiseResolveFunction = Cast<Callable>(promiseResolve)
- otherwise ThrowTypeError(
- MessageTemplate::kCalledNonCallable, 'resolve');
- }
const fastIteratorResultMap = UnsafeCast<Map>(
nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]);
// 8. Repeat,
@@ -215,8 +197,8 @@ Reject(Object) {
MessageTemplate::kTooManyElementsInPromiseCombinator, 'any');
}
- // h. Append undefined to errors.
- growableErrorsArray.Push(Undefined);
+ // h. Append undefined to errors. (Do nothing: errors is initialized
+ // lazily when the first Promise rejects.)
let nextPromise: JSAny;
// i. Let nextPromise be ? Call(constructor, promiseResolve,
@@ -291,16 +273,18 @@ Reject(Object) {
kPromiseAnyRejectElementRemainingSlot] =
remainingElementsCount;
- const errorsArray = growableErrorsArray.ToFixedArray();
- rejectElementContext[PromiseAnyRejectElementContextSlots::
- kPromiseAnyRejectElementErrorsArraySlot] =
- errorsArray;
-
// iii. If remainingElementsCount.[[Value]] is 0, then
if (remainingElementsCount == 0) deferred {
// 1. Let error be a newly created AggregateError object.
// 2. Set error.[[AggregateErrors]] to errors.
- const error = ConstructAggregateError(errorsArray);
+
+ // We may already have elements in "errors" - this happens when the
+ // Thenable calls the reject callback immediately.
+ const errors = UnsafeCast<FixedArray>(
+ rejectElementContext[PromiseAnyRejectElementContextSlots::
+ kPromiseAnyRejectElementErrorsSlot]);
+
+ const error = ConstructAggregateError(errors);
// 3. Return ThrowCompletion(error).
goto Reject(error);
}
@@ -312,6 +296,8 @@ Reject(Object) {
transitioning javascript builtin
PromiseAny(
js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny {
+ const nativeContext = LoadNativeContext(context);
+
// 1. Let C be the this value.
const receiver = Cast<JSReceiver>(receiver)
otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Promise.any');
@@ -319,37 +305,42 @@ PromiseAny(
// 2. Let promiseCapability be ? NewPromiseCapability(C).
const capability = NewPromiseCapability(receiver, False);
- // NewPromiseCapability guarantees that receiver is Constructor
+ // NewPromiseCapability guarantees that receiver is Constructor.
assert(Is<Constructor>(receiver));
const constructor = UnsafeCast<Constructor>(receiver);
try {
- let iteratorRecord: iterator::IteratorRecord;
- try {
- // 3. Let iteratorRecord be GetIterator(iterable).
+ // 3. Let promiseResolve be GetPromiseResolve(C).
+ // 4. IfAbruptRejectPromise(promiseResolve, promiseCapability).
+ // (catch below)
+ const promiseResolveFunction =
+ GetPromiseResolve(nativeContext, constructor);
- // 4. IfAbruptRejectPromise(iteratorRecord, promiseCapability).
- // (catch below)
- iteratorRecord = iterator::GetIterator(iterable);
+ // 5. Let iteratorRecord be GetIterator(iterable).
- // 5. Let result be PerformPromiseAny(iteratorRecord, C,
- // promiseCapability).
+ // 6. IfAbruptRejectPromise(iteratorRecord, promiseCapability).
+ // (catch below)
+ const iteratorRecord = iterator::GetIterator(iterable);
- // 6. If result is an abrupt completion, then
+ // 7. Let result be PerformPromiseAny(iteratorRecord, C,
+ // promiseCapability).
- // a. If iteratorRecord.[[Done]] is false, set result to
- // IteratorClose(iteratorRecord, result).
+ // 8. If result is an abrupt completion, then
- // b. IfAbruptRejectPromise(result, promiseCapability).
+ // a. If iteratorRecord.[[Done]] is false, set result to
+ // IteratorClose(iteratorRecord, result).
- // [Iterator closing handled by PerformPromiseAny]
+ // b. IfAbruptRejectPromise(result, promiseCapability).
- // 7. Return Completion(result).
- return PerformPromiseAny(iteratorRecord, constructor, capability)
- otherwise Reject;
- } catch (e) deferred {
- goto Reject(e);
- }
+ // [Iterator closing handled by PerformPromiseAny]
+
+ // 9. Return Completion(result).
+ return PerformPromiseAny(
+ nativeContext, iteratorRecord, constructor, capability,
+ promiseResolveFunction)
+ otherwise Reject;
+ } catch (e) deferred {
+ goto Reject(e);
} label Reject(e: Object) deferred {
// Exception must be bound to a JS value.
assert(e != TheHole);
@@ -361,10 +352,13 @@ PromiseAny(
}
transitioning macro ConstructAggregateError(implicit context: Context)(
- errorsArray: FixedArray): JSObject {
- const obj: JSAggregateError = error::ConstructInternalAggregateErrorHelper(
+ errors: FixedArray): JSObject {
+ const obj: JSObject = error::ConstructInternalAggregateErrorHelper(
context, SmiConstant(MessageTemplate::kAllPromisesRejected));
- obj.errors = errorsArray;
+ const errorsJSArray = array::CreateJSArrayWithElements(errors);
+ SetOwnPropertyIgnoreAttributes(
+ obj, ErrorsStringConstant(), errorsJSArray,
+ SmiConstant(PropertyAttributes::DONT_ENUM));
return obj;
}
diff --git a/chromium/v8/src/builtins/promise-race.tq b/chromium/v8/src/builtins/promise-race.tq
index 27d2038398a..26ffb7ae554 100644
--- a/chromium/v8/src/builtins/promise-race.tq
+++ b/chromium/v8/src/builtins/promise-race.tq
@@ -6,24 +6,6 @@
namespace promise {
-extern macro PromiseForwardingHandlerSymbolConstant(): Symbol;
-const kPromiseForwardingHandlerSymbol: Symbol =
- PromiseForwardingHandlerSymbolConstant();
-extern macro PromiseHandledBySymbolConstant(): Symbol;
-const kPromiseHandledBySymbol: Symbol = PromiseHandledBySymbolConstant();
-extern macro ResolveStringConstant(): String;
-const kResolveString: String = ResolveStringConstant();
-extern macro SetPropertyStrict(Context, Object, Object, Object): Object;
-extern macro IsPromiseResolveProtectorCellInvalid(): bool;
-
-macro IsPromiseResolveLookupChainIntact(implicit context: Context)(
- nativeContext: NativeContext, constructor: JSReceiver): bool {
- if (IsForceSlowPath()) return false;
- const promiseFun = UnsafeCast<JSFunction>(
- nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX]);
- return promiseFun == constructor && !IsPromiseResolveProtectorCellInvalid();
-}
-
// https://tc39.es/ecma262/#sec-promise.race
transitioning javascript builtin
PromiseRace(
@@ -31,6 +13,8 @@ PromiseRace(
const receiver = Cast<JSReceiver>(receiver)
otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Promise.race');
+ const nativeContext = LoadNativeContext(context);
+
// Let promiseCapability be ? NewPromiseCapability(C).
// Don't fire debugEvent so that forwarding the rejection through all does
// not trigger redundant ExceptionEvents
@@ -39,6 +23,10 @@ PromiseRace(
const reject = capability.reject;
const promise = capability.promise;
+ // NewPromiseCapability guarantees that receiver is Constructor.
+ assert(Is<Constructor>(receiver));
+ const constructor = UnsafeCast<Constructor>(receiver);
+
// For catch prediction, don't treat the .then calls as handling it;
// instead, recurse outwards.
if (IsDebugActive()) deferred {
@@ -46,10 +34,15 @@ PromiseRace(
}
try {
- // Let iterator be GetIterator(iterable).
- // IfAbruptRejectPromise(iterator, promiseCapability).
+ let promiseResolveFunction: JSAny;
let i: iterator::IteratorRecord;
try {
+ // Let promiseResolve be GetPromiseResolve(C).
+ // IfAbruptRejectPromise(promiseResolve, promiseCapability).
+ promiseResolveFunction = GetPromiseResolve(nativeContext, constructor);
+
+ // Let iterator be GetIterator(iterable).
+ // IfAbruptRejectPromise(iterator, promiseCapability).
i = iterator::GetIterator(iterable);
} catch (e) deferred {
goto Reject(e);
@@ -57,24 +50,6 @@ PromiseRace(
// Let result be PerformPromiseRace(iteratorRecord, C, promiseCapability).
try {
- // We can skip the "resolve" lookup on {constructor} if it's the
- // Promise constructor and the Promise.resolve protector is intact,
- // as that guards the lookup path for the "resolve" property on the
- // Promise constructor.
- const nativeContext = LoadNativeContext(context);
- let promiseResolveFunction: JSAny = Undefined;
- if (!IsPromiseResolveLookupChainIntact(nativeContext, receiver))
- deferred {
- // 3. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`).
- const resolve = GetProperty(receiver, kResolveString);
-
- // 4. If IsCallable(_promiseResolve_) is *false*, throw a
- // *TypeError* exception.
- promiseResolveFunction = Cast<Callable>(resolve)
- otherwise ThrowTypeError(
- MessageTemplate::kCalledNonCallable, 'resolve');
- }
-
const fastIteratorResultMap = UnsafeCast<Map>(
nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]);
while (true) {
@@ -96,9 +71,8 @@ PromiseRace(
}
// Let nextPromise be ? Call(constructor, _promiseResolve_, «
// nextValue »).
- const nextPromise = CallResolve(
- UnsafeCast<Constructor>(receiver), promiseResolveFunction,
- nextValue);
+ const nextPromise =
+ CallResolve(constructor, promiseResolveFunction, nextValue);
// Perform ? Invoke(nextPromise, "then", « resolveElement,
// resultCapability.[[Reject]] »).
diff --git a/chromium/v8/src/builtins/regexp.tq b/chromium/v8/src/builtins/regexp.tq
index 21577b47634..603b456661d 100644
--- a/chromium/v8/src/builtins/regexp.tq
+++ b/chromium/v8/src/builtins/regexp.tq
@@ -186,8 +186,7 @@ extern enum Flag constexpr 'JSRegExp::Flag' {
kMultiline,
kSticky,
kUnicode,
- kDotAll,
- kInvalid
+ kDotAll
}
const kRegExpPrototypeOldFlagGetter: constexpr int31
diff --git a/chromium/v8/src/builtins/setup-builtins-internal.cc b/chromium/v8/src/builtins/setup-builtins-internal.cc
index 4739e18c57f..d094c3f2ad4 100644
--- a/chromium/v8/src/builtins/setup-builtins-internal.cc
+++ b/chromium/v8/src/builtins/setup-builtins-internal.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/init/setup-isolate.h"
-
#include "src/builtins/builtins.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/interface-descriptors.h"
@@ -12,7 +10,8 @@
#include "src/compiler/code-assembler.h"
#include "src/execution/isolate.h"
#include "src/handles/handles-inl.h"
-#include "src/heap/heap-inl.h" // For MemoryAllocator::code_range.
+#include "src/heap/heap-inl.h" // For Heap::code_range.
+#include "src/init/setup-isolate.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-generator.h"
#include "src/interpreter/interpreter.h"
diff --git a/chromium/v8/src/builtins/torque-internal.tq b/chromium/v8/src/builtins/torque-internal.tq
index d2b107f932d..5a75f5ca726 100644
--- a/chromium/v8/src/builtins/torque-internal.tq
+++ b/chromium/v8/src/builtins/torque-internal.tq
@@ -186,6 +186,28 @@ macro StoreFloat64OrHole(r:&float64_or_hole, value: float64_or_hole) {
* unsafe::NewReference<float64>(r.object, r.offset) = value.value;
}
}
+
+macro DownCastForTorqueClass<T : type extends HeapObject>(o: HeapObject):
+ T labels CastError {
+ const map = o.map;
+ const minInstanceType = %MinInstanceType<T>();
+ const maxInstanceType = %MaxInstanceType<T>();
+ if constexpr (minInstanceType == maxInstanceType) {
+ if constexpr (%ClassHasMapConstant<T>()) {
+ if (map != %GetClassMapConstant<T>()) goto CastError;
+ } else {
+ if (map.instance_type != minInstanceType) goto CastError;
+ }
+ } else {
+ const diff: int32 = maxInstanceType - minInstanceType;
+ const offset = Convert<int32>(Convert<uint16>(map.instance_type)) -
+ Convert<int32>(Convert<uint16>(
+ FromConstexpr<InstanceType>(minInstanceType)));
+ if (Unsigned(offset) > Unsigned(diff)) goto CastError;
+ }
+ return %RawDownCast<T>(o);
+}
+
} // namespace torque_internal
// Indicates that an array-field should not be initialized.
@@ -198,6 +220,12 @@ struct UninitializedIterator {}
intrinsic %RawDownCast<To: type, From: type>(x: From): To;
intrinsic %RawConstexprCast<To: type, From: type>(f: From): To;
+intrinsic %MinInstanceType<T: type>(): constexpr InstanceType;
+intrinsic %MaxInstanceType<T: type>(): constexpr InstanceType;
+
+intrinsic %ClassHasMapConstant<T: type>(): constexpr bool;
+intrinsic %GetClassMapConstant<T: type>(): Map;
+
struct IteratorSequence<T: type, FirstIterator: type, SecondIterator: type> {
macro Empty(): bool {
return this.first.Empty() && this.second.Empty();
diff --git a/chromium/v8/src/builtins/typed-array-entries.tq b/chromium/v8/src/builtins/typed-array-entries.tq
new file mode 100644
index 00000000000..6749a14e90a
--- /dev/null
+++ b/chromium/v8/src/builtins/typed-array-entries.tq
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-typed-array-gen.h'
+
+namespace typed_array {
+const kBuiltinNameEntries: constexpr string = '%TypedArray%.prototype.entries';
+
+// %TypedArray%.entries ()
+// https://tc39.github.io/ecma262/#sec-%typedarray%.entries
+transitioning javascript builtin
+TypedArrayPrototypeEntries(js-implicit context: NativeContext, receiver: JSAny)(
+ ...arguments): JSArrayIterator {
+ try {
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+
+ EnsureAttached(array) otherwise IsDetached;
+ return CreateArrayIterator(array, IterationKind::kEntries);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameEntries);
+ } label IsDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameEntries);
+ }
+}
+}
diff --git a/chromium/v8/src/builtins/typed-array-keys.tq b/chromium/v8/src/builtins/typed-array-keys.tq
new file mode 100644
index 00000000000..24c53c71052
--- /dev/null
+++ b/chromium/v8/src/builtins/typed-array-keys.tq
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-typed-array-gen.h'
+
+namespace typed_array {
+const kBuiltinNameKeys: constexpr string = '%TypedArray%.prototype.keys';
+
+// %TypedArray%.keys ()
+// https://tc39.github.io/ecma262/#sec-%typedarray%.keys
+transitioning javascript builtin
+TypedArrayPrototypeKeys(js-implicit context: NativeContext, receiver: JSAny)(
+ ...arguments): JSArrayIterator {
+ try {
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+
+ EnsureAttached(array) otherwise IsDetached;
+ return CreateArrayIterator(array, IterationKind::kKeys);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameKeys);
+ } label IsDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameKeys);
+ }
+}
+}
diff --git a/chromium/v8/src/builtins/typed-array-values.tq b/chromium/v8/src/builtins/typed-array-values.tq
new file mode 100644
index 00000000000..a60aaaf707f
--- /dev/null
+++ b/chromium/v8/src/builtins/typed-array-values.tq
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-typed-array-gen.h'
+
+namespace typed_array {
+const kBuiltinNameValues: constexpr string = '%TypedArray%.prototype.values';
+
+// %TypedArray%.values ()
+// https://tc39.github.io/ecma262/#sec-%typedarray%.values
+transitioning javascript builtin
+TypedArrayPrototypeValues(js-implicit context: NativeContext, receiver: JSAny)(
+ ...arguments): JSArrayIterator {
+ try {
+ const array: JSTypedArray = Cast<JSTypedArray>(receiver)
+ otherwise NotTypedArray;
+
+ EnsureAttached(array) otherwise IsDetached;
+ return CreateArrayIterator(array, IterationKind::kValues);
+ } label NotTypedArray deferred {
+ ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameValues);
+ } label IsDetached deferred {
+ ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameValues);
+ }
+}
+}
diff --git a/chromium/v8/src/builtins/wasm.tq b/chromium/v8/src/builtins/wasm.tq
index 097e39d430e..690df7afecc 100644
--- a/chromium/v8/src/builtins/wasm.tq
+++ b/chromium/v8/src/builtins/wasm.tq
@@ -7,6 +7,10 @@
namespace runtime {
extern runtime WasmMemoryGrow(Context, WasmInstanceObject, Smi): Smi;
extern runtime WasmRefFunc(Context, WasmInstanceObject, Smi): JSAny;
+extern runtime WasmTableInit(
+ Context, WasmInstanceObject, Object, Object, Smi, Smi, Smi): JSAny;
+extern runtime WasmTableCopy(
+ Context, WasmInstanceObject, Object, Object, Smi, Smi, Smi): JSAny;
extern runtime WasmFunctionTableGet(
Context, WasmInstanceObject, Smi, Smi): JSAny;
extern runtime WasmFunctionTableSet(
@@ -17,11 +21,24 @@ extern runtime ReThrow(Context, Object): JSAny;
extern runtime WasmStackGuard(Context): JSAny;
extern runtime ThrowWasmStackOverflow(Context): JSAny;
extern runtime WasmTraceMemory(Context, Smi): JSAny;
+extern runtime WasmTraceEnter(Context): JSAny;
+extern runtime WasmTraceExit(Context, Smi): JSAny;
+extern runtime WasmAtomicNotify(
+ Context, WasmInstanceObject, Number, Number): Smi;
+extern runtime WasmI32AtomicWait(
+ Context, WasmInstanceObject, Number, Number, BigInt): Smi;
+extern runtime WasmI64AtomicWait(
+ Context, WasmInstanceObject, Number, BigInt, BigInt): Smi;
+}
+
+namespace unsafe {
+extern macro TimesTaggedSize(intptr): intptr;
+extern macro Allocate(intptr): HeapObject;
}
namespace wasm {
const kFuncTableType:
- constexpr int31 generates 'wasm::ValueType::Kind::kFuncRef';
+ constexpr int31 generates 'wasm::HeapType::kHeapFunc';
extern macro WasmBuiltinsAssembler::LoadInstanceFromFrame(): WasmInstanceObject;
@@ -33,6 +50,8 @@ extern macro WasmBuiltinsAssembler::LoadTablesFromInstance(WasmInstanceObject):
FixedArray;
extern macro WasmBuiltinsAssembler::LoadExternalFunctionsFromInstance(
WasmInstanceObject): FixedArray;
+extern macro WasmBuiltinsAssembler::LoadManagedObjectMapsFromInstance(
+ WasmInstanceObject): FixedArray;
macro LoadContextFromFrame(): NativeContext {
return LoadContextFromInstance(LoadInstanceFromFrame());
@@ -61,6 +80,38 @@ builtin WasmMemoryGrow(numPages: int32): int32 {
return SmiToInt32(result);
}
+builtin WasmTableInit(
+ dstRaw: uint32, srcRaw: uint32, sizeRaw: uint32, tableIndex: Smi,
+ segmentIndex: Smi): JSAny {
+ try {
+ const instance: WasmInstanceObject = LoadInstanceFromFrame();
+ const dst: Smi = Convert<PositiveSmi>(dstRaw) otherwise TableOutOfBounds;
+ const src: Smi = Convert<PositiveSmi>(srcRaw) otherwise TableOutOfBounds;
+ const size: Smi = Convert<PositiveSmi>(sizeRaw) otherwise TableOutOfBounds;
+ tail runtime::WasmTableInit(
+ LoadContextFromInstance(instance), instance, tableIndex, segmentIndex,
+ dst, src, size);
+ } label TableOutOfBounds deferred {
+ tail ThrowWasmTrapTableOutOfBounds();
+ }
+}
+
+builtin WasmTableCopy(
+ dstRaw: uint32, srcRaw: uint32, sizeRaw: uint32, dstTable: Smi,
+ srcTable: Smi): JSAny {
+ try {
+ const instance: WasmInstanceObject = LoadInstanceFromFrame();
+ const dst: Smi = Convert<PositiveSmi>(dstRaw) otherwise TableOutOfBounds;
+ const src: Smi = Convert<PositiveSmi>(srcRaw) otherwise TableOutOfBounds;
+ const size: Smi = Convert<PositiveSmi>(sizeRaw) otherwise TableOutOfBounds;
+ tail runtime::WasmTableCopy(
+ LoadContextFromInstance(instance), instance, dstTable, srcTable, dst,
+ src, size);
+ } label TableOutOfBounds deferred {
+ tail ThrowWasmTrapTableOutOfBounds();
+ }
+}
+
builtin WasmTableGet(tableIndex: intptr, index: int32): Object {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const entryIndex: intptr = ChangeInt32ToIntPtr(index);
@@ -145,7 +196,7 @@ builtin WasmThrow(exception: Object): JSAny {
}
builtin WasmRethrow(exception: Object): JSAny {
- if (exception == Null) tail ThrowWasmTrapRethrowNullRef();
+ if (exception == Null) tail ThrowWasmTrapRethrowNull();
tail runtime::ReThrow(LoadContextFromFrame(), exception);
}
@@ -161,11 +212,77 @@ builtin WasmTraceMemory(info: Smi): JSAny {
tail runtime::WasmTraceMemory(LoadContextFromFrame(), info);
}
+builtin WasmTraceEnter(): JSAny {
+ tail runtime::WasmTraceEnter(LoadContextFromFrame());
+}
+
+builtin WasmTraceExit(info: Smi): JSAny {
+ tail runtime::WasmTraceExit(LoadContextFromFrame(), info);
+}
+
builtin WasmAllocateJSArray(implicit context: Context)(size: Smi): JSArray {
const map: Map = GetFastPackedElementsJSArrayMap();
return AllocateJSArray(ElementsKind::PACKED_ELEMENTS, map, size, size);
}
+builtin WasmAllocateStruct(implicit context: Context)(mapIndex: Smi):
+ HeapObject {
+ const instance: WasmInstanceObject = LoadInstanceFromFrame();
+ const maps: FixedArray = LoadManagedObjectMapsFromInstance(instance);
+ const map: Map = %RawDownCast<Map>(LoadFixedArrayElement(maps, mapIndex));
+ const instanceSize: intptr =
+ unsafe::TimesTaggedSize(Convert<intptr>(map.instance_size_in_words));
+ const result: HeapObject = unsafe::Allocate(instanceSize);
+ * UnsafeConstCast(& result.map) = map;
+ return result;
+}
+
+builtin WasmInt32ToNumber(value: int32): Number {
+ return ChangeInt32ToTagged(value);
+}
+
+builtin WasmUint32ToNumber(value: uint32): Number {
+ return ChangeUint32ToTagged(value);
+}
+
+extern builtin I64ToBigInt(intptr): BigInt;
+
+builtin WasmAtomicNotify(address: uint32, count: uint32): uint32 {
+ const instance: WasmInstanceObject = LoadInstanceFromFrame();
+ const result: Smi = runtime::WasmAtomicNotify(
+ LoadContextFromInstance(instance), instance, WasmUint32ToNumber(address),
+ WasmUint32ToNumber(count));
+ return Unsigned(SmiToInt32(result));
+}
+
+builtin WasmI32AtomicWait64(
+ address: uint32, expectedValue: int32, timeout: intptr): uint32 {
+ if constexpr (Is64()) {
+ const instance: WasmInstanceObject = LoadInstanceFromFrame();
+ const result: Smi = runtime::WasmI32AtomicWait(
+ LoadContextFromInstance(instance), instance,
+ WasmUint32ToNumber(address), WasmInt32ToNumber(expectedValue),
+ I64ToBigInt(timeout));
+ return Unsigned(SmiToInt32(result));
+ } else {
+ unreachable;
+ }
+}
+
+builtin WasmI64AtomicWait64(
+ address: uint32, expectedValue: intptr, timeout: intptr): uint32 {
+ if constexpr (Is64()) {
+ const instance: WasmInstanceObject = LoadInstanceFromFrame();
+ const result: Smi = runtime::WasmI64AtomicWait(
+ LoadContextFromInstance(instance), instance,
+ WasmUint32ToNumber(address), I64ToBigInt(expectedValue),
+ I64ToBigInt(timeout));
+ return Unsigned(SmiToInt32(result));
+ } else {
+ unreachable;
+ }
+}
+
extern macro TryHasOwnProperty(HeapObject, Map, InstanceType, Name): never
labels Found, NotFound, Bailout;
type OnNonExistent constexpr 'OnNonExistent';
@@ -251,12 +368,12 @@ builtin ThrowWasmTrapTableOutOfBounds(): JSAny {
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapTableOutOfBounds));
}
-builtin ThrowWasmTrapBrOnExnNullRef(): JSAny {
- tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapBrOnExnNullRef));
+builtin ThrowWasmTrapBrOnExnNull(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapBrOnExnNull));
}
-builtin ThrowWasmTrapRethrowNullRef(): JSAny {
- tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapRethrowNullRef));
+builtin ThrowWasmTrapRethrowNull(): JSAny {
+ tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapRethrowNull));
}
builtin ThrowWasmTrapNullDereference(): JSAny {
diff --git a/chromium/v8/src/codegen/arm/assembler-arm.cc b/chromium/v8/src/codegen/arm/assembler-arm.cc
index 9032714f574..343cc5f2ded 100644
--- a/chromium/v8/src/codegen/arm/assembler-arm.cc
+++ b/chromium/v8/src/codegen/arm/assembler-arm.cc
@@ -3892,7 +3892,7 @@ void Assembler::vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src) {
emit(EncodeNeonVCVT(U32, dst, F32, src));
}
-enum UnaryOp { VMVN, VSWP, VABS, VABSF, VNEG, VNEGF };
+enum UnaryOp { VMVN, VSWP, VABS, VABSF, VNEG, VNEGF, VRINTM, VRINTP, VRINTZ };
static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
int dst_code, int src_code) {
@@ -3920,6 +3920,15 @@ static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
DCHECK_EQ(Neon32, size);
op_encoding = B16 | B10 | 0x7 * B7;
break;
+ case VRINTM:
+ op_encoding = B17 | 0xD * B7;
+ break;
+ case VRINTP:
+ op_encoding = B17 | 0xF * B7;
+ break;
+ case VRINTZ:
+ op_encoding = B17 | 0xB * B7;
+ break;
default:
UNREACHABLE();
}
@@ -4575,6 +4584,30 @@ void Assembler::vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
emit(EncodeNeonPairwiseOp(VPMAX, dt, dst, src1, src2));
}
+void Assembler::vrintm(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ // SIMD vector round floating-point to integer towards -Infinity.
+ // See ARM DDI 0487F.b, F6-5493.
+ DCHECK(IsEnabled(ARMv8));
+ emit(EncodeNeonUnaryOp(VRINTM, NEON_Q, NeonSize(dt), dst.code(), src.code()));
+}
+
+void Assembler::vrintp(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ // SIMD vector round floating-point to integer towards +Infinity.
+ // See ARM DDI 0487F.b, F6-5501.
+ DCHECK(IsEnabled(ARMv8));
+ emit(EncodeNeonUnaryOp(VRINTP, NEON_Q, NeonSize(dt), dst.code(), src.code()));
+}
+
+void Assembler::vrintz(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ // SIMD vector round floating-point to integer towards Zero.
+ // See ARM DDI 0487F.b, F6-5511.
+ DCHECK(IsEnabled(ARMv8));
+ emit(EncodeNeonUnaryOp(VRINTZ, NEON_Q, NeonSize(dt), dst.code(), src.code()));
+}
+
void Assembler::vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
diff --git a/chromium/v8/src/codegen/arm/assembler-arm.h b/chromium/v8/src/codegen/arm/assembler-arm.h
index 61205760df0..d344b53dbf2 100644
--- a/chromium/v8/src/codegen/arm/assembler-arm.h
+++ b/chromium/v8/src/codegen/arm/assembler-arm.h
@@ -820,7 +820,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
- // ARMv8 rounding instructions.
+ // ARMv8 rounding instructions (Scalar).
void vrinta(const SwVfpRegister dst, const SwVfpRegister src);
void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
void vrintn(const SwVfpRegister dst, const SwVfpRegister src);
@@ -908,6 +908,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DwVfpRegister src2);
void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2);
+
+ // ARMv8 rounding instructions (NEON).
+ void vrintm(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src);
+ void vrintp(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src);
+ void vrintz(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src);
+
void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
QwNeonRegister shift);
diff --git a/chromium/v8/src/codegen/arm/interface-descriptors-arm.cc b/chromium/v8/src/codegen/arm/interface-descriptors-arm.cc
index 5a4e08dc77c..09b80af2d49 100644
--- a/chromium/v8/src/codegen/arm/interface-descriptors-arm.cc
+++ b/chromium/v8/src/codegen/arm/interface-descriptors-arm.cc
@@ -191,11 +191,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
@@ -295,6 +290,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/arm/macro-assembler-arm.cc b/chromium/v8/src/codegen/arm/macro-assembler-arm.cc
index 7e5fa8cef1c..7b9e73e1d91 100644
--- a/chromium/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/chromium/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -2455,7 +2455,7 @@ void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
Register scratch = temps.Acquire();
DCHECK(cc == eq || cc == ne);
Bfc(scratch, object, 0, kPageSizeBits);
- ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
tst(scratch, Operand(mask));
b(cc, condition_met);
}
diff --git a/chromium/v8/src/codegen/arm64/assembler-arm64.cc b/chromium/v8/src/codegen/arm64/assembler-arm64.cc
index 97a57d6f3c6..2e21ab913d7 100644
--- a/chromium/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/chromium/v8/src/codegen/arm64/assembler-arm64.cc
@@ -41,19 +41,66 @@
namespace v8 {
namespace internal {
+namespace {
+
+#ifdef USE_SIMULATOR
+static unsigned SimulatorFeaturesFromCommandLine() {
+ if (strcmp(FLAG_sim_arm64_optional_features, "none") == 0) {
+ return 0;
+ }
+ if (strcmp(FLAG_sim_arm64_optional_features, "all") == 0) {
+ return (1u << NUMBER_OF_CPU_FEATURES) - 1;
+ }
+ fprintf(
+ stderr,
+ "Error: unrecognised value for --sim-arm64-optional-features ('%s').\n",
+ FLAG_sim_arm64_optional_features);
+ fprintf(stderr,
+ "Supported values are: none\n"
+ " all\n");
+ FATAL("sim-arm64-optional-features");
+}
+#endif // USE_SIMULATOR
+
+static constexpr unsigned CpuFeaturesFromCompiler() {
+ unsigned features = 0;
+#if defined(__ARM_FEATURE_JCVT)
+ features |= 1u << JSCVT;
+#endif
+ return features;
+}
+
+} // namespace
+
// -----------------------------------------------------------------------------
// CpuFeatures implementation.
void CpuFeatures::ProbeImpl(bool cross_compile) {
- // AArch64 has no configuration options, no further probing is required.
- supported_ = 0;
-
// Only use statically determined features for cross compile (snapshot).
- if (cross_compile) return;
+ if (cross_compile) {
+ supported_ |= CpuFeaturesFromCompiler();
+ return;
+ }
// We used to probe for coherent cache support, but on older CPUs it
// causes crashes (crbug.com/524337), and newer CPUs don't even have
// the feature any more.
+
+#ifdef USE_SIMULATOR
+ supported_ |= SimulatorFeaturesFromCommandLine();
+#else
+ // Probe for additional features at runtime.
+ base::CPU cpu;
+ unsigned runtime = 0;
+ if (cpu.has_jscvt()) {
+ runtime |= 1u << JSCVT;
+ }
+
+ // Use the best of the features found by CPU detection and those inferred from
+ // the build system.
+ supported_ |= CpuFeaturesFromCompiler();
+ supported_ |= runtime;
+#endif // USE_SIMULATOR
}
void CpuFeatures::PrintTarget() {}
@@ -1115,10 +1162,10 @@ void Assembler::cls(const Register& rd, const Register& rn) {
DataProcessing1Source(rd, rn, CLS);
}
-void Assembler::pacia1716() { Emit(PACIA1716); }
-void Assembler::autia1716() { Emit(AUTIA1716); }
-void Assembler::paciasp() { Emit(PACIASP); }
-void Assembler::autiasp() { Emit(AUTIASP); }
+void Assembler::pacib1716() { Emit(PACIB1716); }
+void Assembler::autib1716() { Emit(AUTIB1716); }
+void Assembler::pacibsp() { Emit(PACIBSP); }
+void Assembler::autibsp() { Emit(AUTIBSP); }
void Assembler::bti(BranchTargetIdentifier id) {
SystemHint op;
@@ -1136,9 +1183,9 @@ void Assembler::bti(BranchTargetIdentifier id) {
op = BTI_jc;
break;
case BranchTargetIdentifier::kNone:
- case BranchTargetIdentifier::kPaciasp:
+ case BranchTargetIdentifier::kPacibsp:
// We always want to generate a BTI instruction here, so disallow
- // skipping its generation or generating a PACIASP instead.
+ // skipping its generation or generating a PACIBSP instead.
UNREACHABLE();
}
hint(op);
@@ -2714,6 +2761,11 @@ void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn) {
Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
}
+void Assembler::fjcvtzs(const Register& rd, const VRegister& vn) {
+ DCHECK(rd.IsW() && vn.Is1D());
+ Emit(FJCVTZS | Rn(vn) | Rd(rd));
+}
+
#define NEON_FP2REGMISC_FCVT_LIST(V) \
V(fcvtnu, NEON_FCVTNU, FCVTNU) \
V(fcvtns, NEON_FCVTNS, FCVTNS) \
diff --git a/chromium/v8/src/codegen/arm64/assembler-arm64.h b/chromium/v8/src/codegen/arm64/assembler-arm64.h
index a9e8a5e85ad..f787bad464f 100644
--- a/chromium/v8/src/codegen/arm64/assembler-arm64.h
+++ b/chromium/v8/src/codegen/arm64/assembler-arm64.h
@@ -780,21 +780,21 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void clz(const Register& rd, const Register& rn);
void cls(const Register& rd, const Register& rn);
- // Pointer Authentication Code for Instruction address, using key A, with
+ // Pointer Authentication Code for Instruction address, using key B, with
// address in x17 and modifier in x16 [Armv8.3].
- void pacia1716();
+ void pacib1716();
- // Pointer Authentication Code for Instruction address, using key A, with
+ // Pointer Authentication Code for Instruction address, using key B, with
// address in LR and modifier in SP [Armv8.3].
- void paciasp();
+ void pacibsp();
- // Authenticate Instruction address, using key A, with address in x17 and
+ // Authenticate Instruction address, using key B, with address in x17 and
// modifier in x16 [Armv8.3].
- void autia1716();
+ void autib1716();
- // Authenticate Instruction address, using key A, with address in LR and
+ // Authenticate Instruction address, using key B, with address in LR and
// modifier in SP [Armv8.3].
- void autiasp();
+ void autibsp();
// Memory instructions.
@@ -1750,6 +1750,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// FP convert to signed integer, nearest with ties to even.
void fcvtns(const Register& rd, const VRegister& vn);
+ // FP JavaScript convert to signed integer, rounding toward zero [Armv8.3].
+ void fjcvtzs(const Register& rd, const VRegister& vn);
+
// FP convert to unsigned integer, nearest with ties to even.
void fcvtnu(const Register& rd, const VRegister& vn);
diff --git a/chromium/v8/src/codegen/arm64/constants-arm64.h b/chromium/v8/src/codegen/arm64/constants-arm64.h
index e63962993a7..52790b9faf4 100644
--- a/chromium/v8/src/codegen/arm64/constants-arm64.h
+++ b/chromium/v8/src/codegen/arm64/constants-arm64.h
@@ -412,9 +412,9 @@ enum class BranchTargetIdentifier {
// Emit a "BTI jc" instruction, which is a combination of "BTI j" and "BTI c".
kBtiJumpCall,
- // Emit a PACIASP instruction, which acts like a "BTI c" or a "BTI jc", based
- // on the value of SCTLR_EL1.BT0.
- kPaciasp
+ // Emit a PACIBSP instruction, which acts like a "BTI c" or a "BTI jc",
+ // based on the value of SCTLR_EL1.BT0.
+ kPacibsp
};
enum BarrierDomain {
@@ -793,10 +793,10 @@ enum SystemPAuthOp : uint32_t {
SystemPAuthFixed = 0xD503211F,
SystemPAuthFMask = 0xFFFFFD1F,
SystemPAuthMask = 0xFFFFFFFF,
- PACIA1716 = SystemPAuthFixed | 0x00000100,
- AUTIA1716 = SystemPAuthFixed | 0x00000180,
- PACIASP = SystemPAuthFixed | 0x00000320,
- AUTIASP = SystemPAuthFixed | 0x000003A0
+ PACIB1716 = SystemPAuthFixed | 0x00000140,
+ AUTIB1716 = SystemPAuthFixed | 0x000001C0,
+ PACIBSP = SystemPAuthFixed | 0x00000360,
+ AUTIBSP = SystemPAuthFixed | 0x000003E0
};
// Any load or store (including pair).
@@ -1325,7 +1325,8 @@ enum FPIntegerConvertOp : uint32_t {
FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
FMOV_dx = FMOV_sw | SixtyFourBits | FP64,
FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000,
- FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000
+ FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000,
+ FJCVTZS = FPIntegerConvertFixed | FP64 | 0x001E0000
};
// Conversion between fixed point and floating point.
diff --git a/chromium/v8/src/codegen/arm64/decoder-arm64-inl.h b/chromium/v8/src/codegen/arm64/decoder-arm64-inl.h
index 25d69b38983..1a7d483dea9 100644
--- a/chromium/v8/src/codegen/arm64/decoder-arm64-inl.h
+++ b/chromium/v8/src/codegen/arm64/decoder-arm64-inl.h
@@ -538,7 +538,6 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
(instr->Mask(0x20C60000) == 0x00840000) ||
(instr->Mask(0xA0C60000) == 0x80060000) ||
(instr->Mask(0xA0C60000) == 0x00860000) ||
- (instr->Mask(0xA0C60000) == 0x00460000) ||
(instr->Mask(0xA0CE0000) == 0x80860000) ||
(instr->Mask(0xA0CE0000) == 0x804E0000) ||
(instr->Mask(0xA0CE0000) == 0x000E0000) ||
diff --git a/chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc b/chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc
index 9f059224449..2c60ea2ec04 100644
--- a/chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc
+++ b/chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc
@@ -191,11 +191,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
@@ -299,6 +294,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 93b8136d9a9..e638312ed0b 100644
--- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -318,8 +318,8 @@ void TurboAssembler::Bind(Label* label, BranchTargetIdentifier id) {
// instructions between the bind and the target identifier instruction.
InstructionAccurateScope scope(this, 1);
bind(label);
- if (id == BranchTargetIdentifier::kPaciasp) {
- paciasp();
+ if (id == BranchTargetIdentifier::kPacibsp) {
+ pacibsp();
} else {
bti(id);
}
@@ -1136,7 +1136,7 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kSignLR) {
- Paciasp();
+ Pacibsp();
}
#endif
@@ -1153,7 +1153,7 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
DCHECK_IMPLIES((lr_mode == kDontStoreLR), ((src0 != lr) && (src1 != lr)));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kSignLR) {
- Paciasp();
+ Pacibsp();
}
#endif
@@ -1188,7 +1188,7 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kAuthLR) {
- Autiasp();
+ Autibsp();
}
#endif
}
@@ -1199,7 +1199,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
DCHECK_IMPLIES((lr_mode == kDontStoreLR), (src != lr));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kSignLR) {
- Paciasp();
+ Pacibsp();
}
#endif
@@ -1228,7 +1228,7 @@ void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) {
DCHECK_IMPLIES((lr_mode == kDontLoadLR), (dst != lr));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kAuthLR) {
- Autiasp();
+ Autibsp();
}
#endif
}
@@ -1238,7 +1238,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
DCHECK_IMPLIES((lr_mode == kDontStoreLR), !registers.IncludesAliasOf(lr));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kSignLR && registers.IncludesAliasOf(lr)) {
- Paciasp();
+ Pacibsp();
}
#endif
@@ -1280,7 +1280,7 @@ void TurboAssembler::PopCPURegList(CPURegList registers) {
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kAuthLR && contains_lr) {
- Autiasp();
+ Autibsp();
}
#endif
}
diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc
index c157df29966..a591e690c3f 100644
--- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -1197,7 +1197,7 @@ void MacroAssembler::PeekPair(const CPURegister& dst1, const CPURegister& dst2,
void MacroAssembler::PushCalleeSavedRegisters() {
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- Paciasp();
+ Pacibsp();
#endif
{
@@ -1249,7 +1249,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
}
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- Autiasp();
+ Autibsp();
#endif
}
@@ -1971,7 +1971,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
Adr(x17, &return_location);
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
Add(x16, sp, kSystemPointerSize);
- Pacia1716();
+ Pacib1716();
#endif
Poke(x17, 0);
@@ -2263,6 +2263,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
DoubleRegister double_input,
StubCallMode stub_mode,
LinkRegisterStatus lr_status) {
+ if (CpuFeatures::IsSupported(JSCVT)) {
+ Fjcvtzs(result.W(), double_input);
+ return;
+ }
+
Label done;
// Try to convert the double to an int64. If successful, the bottom 32 bits
@@ -2650,7 +2655,7 @@ void TurboAssembler::CheckPageFlag(const Register& object, int mask,
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
And(scratch, object, ~kPageAlignmentMask);
- Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ Ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
if (cc == eq) {
TestAndBranchIfAnySet(scratch, mask, condition_met);
} else {
@@ -3243,7 +3248,7 @@ void TurboAssembler::RestoreFPAndLR() {
// We can load the return address directly into x17.
Add(x16, fp, StandardFrameConstants::kCallerSPOffset);
Ldp(fp, x17, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- Autia1716();
+ Autib1716();
Mov(lr, x17);
#else
Ldp(fp, lr, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -3256,7 +3261,7 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
Adr(x17, return_location);
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
Add(x16, fp, WasmExitFrameConstants::kCallingPCOffset + kSystemPointerSize);
- Pacia1716();
+ Pacib1716();
#endif
Str(x17, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
}
diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h
index 109e73c3c22..0cb9e823198 100644
--- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -503,13 +503,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Cbnz(const Register& rt, Label* label);
void Cbz(const Register& rt, Label* label);
- void Paciasp() {
+ void Pacibsp() {
DCHECK(allow_macro_instructions_);
- paciasp();
+ pacibsp();
}
- void Autiasp() {
+ void Autibsp() {
DCHECK(allow_macro_instructions_);
- autiasp();
+ autibsp();
}
// The 1716 pac and aut instructions encourage people to use x16 and x17
@@ -519,7 +519,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Register temp = temps.AcquireX(); // temp will be x16
// __ Mov(x17, ptr);
// __ Mov(x16, modifier); // Will override temp!
- // __ Pacia1716();
+ // __ Pacib1716();
//
// To work around this issue, you must exclude x16 and x17 from the scratch
// register list. You may need to replace them with other registers:
@@ -529,18 +529,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// temps.Include(x10, x11);
// __ Mov(x17, ptr);
// __ Mov(x16, modifier);
- // __ Pacia1716();
- void Pacia1716() {
+ // __ Pacib1716();
+ void Pacib1716() {
DCHECK(allow_macro_instructions_);
DCHECK(!TmpList()->IncludesAliasOf(x16));
DCHECK(!TmpList()->IncludesAliasOf(x17));
- pacia1716();
+ pacib1716();
}
- void Autia1716() {
+ void Autib1716() {
DCHECK(allow_macro_instructions_);
DCHECK(!TmpList()->IncludesAliasOf(x16));
DCHECK(!TmpList()->IncludesAliasOf(x17));
- autia1716();
+ autib1716();
}
inline void Dmb(BarrierDomain domain, BarrierType type);
@@ -1009,6 +1009,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
fcvtzs(vd, vn, fbits);
}
+ void Fjcvtzs(const Register& rd, const VRegister& vn) {
+ DCHECK(allow_macro_instructions());
+ DCHECK(!rd.IsZero());
+ fjcvtzs(rd, vn);
+ }
+
inline void Fcvtzu(const Register& rd, const VRegister& fn);
void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
DCHECK(allow_macro_instructions());
diff --git a/chromium/v8/src/codegen/arm64/register-arm64.h b/chromium/v8/src/codegen/arm64/register-arm64.h
index c98b0f6162f..76bf3049c89 100644
--- a/chromium/v8/src/codegen/arm64/register-arm64.h
+++ b/chromium/v8/src/codegen/arm64/register-arm64.h
@@ -92,9 +92,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
}
static constexpr CPURegister Create(int code, int size, RegisterType type) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(IsValid(code, size, type));
-#endif
+ CONSTEXPR_DCHECK(IsValid(code, size, type));
return CPURegister{code, size, type};
}
@@ -304,9 +302,7 @@ class VRegister : public CPURegister {
}
static constexpr VRegister Create(int code, int size, int lane_count = 1) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(IsValidLaneCount(lane_count));
-#endif
+ CONSTEXPR_DCHECK(IsValidLaneCount(lane_count));
return VRegister(CPURegister::Create(code, size, CPURegister::kVRegister),
lane_count);
}
diff --git a/chromium/v8/src/codegen/assembler.cc b/chromium/v8/src/codegen/assembler.cc
index 3b27bf5db9e..3d0b7d28e47 100644
--- a/chromium/v8/src/codegen/assembler.cc
+++ b/chromium/v8/src/codegen/assembler.cc
@@ -81,7 +81,7 @@ namespace {
class DefaultAssemblerBuffer : public AssemblerBuffer {
public:
explicit DefaultAssemblerBuffer(int size)
- : buffer_(OwnedVector<uint8_t>::New(size)) {
+ : buffer_(OwnedVector<uint8_t>::NewForOverwrite(size)) {
#ifdef DEBUG
ZapCode(reinterpret_cast<Address>(buffer_.start()), size);
#endif
diff --git a/chromium/v8/src/codegen/assembler.h b/chromium/v8/src/codegen/assembler.h
index 1c287222e96..6419e55cec7 100644
--- a/chromium/v8/src/codegen/assembler.h
+++ b/chromium/v8/src/codegen/assembler.h
@@ -78,10 +78,16 @@ class JumpOptimizationInfo {
public:
bool is_collecting() const { return stage_ == kCollection; }
bool is_optimizing() const { return stage_ == kOptimization; }
- void set_optimizing() { stage_ = kOptimization; }
+ void set_optimizing() {
+ DCHECK(is_optimizable());
+ stage_ = kOptimization;
+ }
bool is_optimizable() const { return optimizable_; }
- void set_optimizable() { optimizable_ = true; }
+ void set_optimizable() {
+ DCHECK(is_collecting());
+ optimizable_ = true;
+ }
// Used to verify the instruction sequence is always the same in two stages.
size_t hash_code() const { return hash_code_; }
@@ -251,6 +257,15 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
+ int pc_offset_for_safepoint() {
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
+ // Mips needs it's own implementation to avoid trampoline's influence.
+ UNREACHABLE();
+#else
+ return pc_offset();
+#endif
+ }
+
byte* buffer_start() const { return buffer_->start(); }
int buffer_size() const { return buffer_->size(); }
int instruction_size() const { return pc_offset(); }
diff --git a/chromium/v8/src/codegen/code-factory.cc b/chromium/v8/src/codegen/code-factory.cc
index 060a66edc7a..006b6bee167 100644
--- a/chromium/v8/src/codegen/code-factory.cc
+++ b/chromium/v8/src/codegen/code-factory.cc
@@ -268,6 +268,23 @@ Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode) {
}
// static
+Callable CodeFactory::Call_WithFeedback(Isolate* isolate,
+ ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return Builtins::CallableFor(
+ isolate, Builtins::kCall_ReceiverIsNullOrUndefined_WithFeedback);
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return Builtins::CallableFor(
+ isolate, Builtins::kCall_ReceiverIsNotNullOrUndefined_WithFeedback);
+ case ConvertReceiverMode::kAny:
+ return Builtins::CallableFor(isolate,
+ Builtins::kCall_ReceiverIsAny_WithFeedback);
+ }
+ UNREACHABLE();
+}
+
+// static
Callable CodeFactory::CallWithArrayLike(Isolate* isolate) {
return Builtins::CallableFor(isolate, Builtins::kCallWithArrayLike);
}
diff --git a/chromium/v8/src/codegen/code-factory.h b/chromium/v8/src/codegen/code-factory.h
index b8d294ce714..02fc7e4b236 100644
--- a/chromium/v8/src/codegen/code-factory.h
+++ b/chromium/v8/src/codegen/code-factory.h
@@ -71,6 +71,7 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable ArgumentAdaptor(Isolate* isolate);
static Callable Call(Isolate* isolate,
ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ static Callable Call_WithFeedback(Isolate* isolate, ConvertReceiverMode mode);
static Callable CallWithArrayLike(Isolate* isolate);
static Callable CallWithSpread(Isolate* isolate);
static Callable CallFunction(
diff --git a/chromium/v8/src/codegen/code-stub-assembler.cc b/chromium/v8/src/codegen/code-stub-assembler.cc
index 901ce0c7b49..6e9b817759d 100644
--- a/chromium/v8/src/codegen/code-stub-assembler.cc
+++ b/chromium/v8/src/codegen/code-stub-assembler.cc
@@ -20,7 +20,6 @@
#include "src/objects/descriptor-array.h"
#include "src/objects/function-kind.h"
#include "src/objects/heap-number.h"
-#include "src/objects/js-aggregate-error.h"
#include "src/objects/js-generator.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
@@ -136,17 +135,6 @@ void CodeStubAssembler::Check(SloppyTNode<Word32T> condition_node,
Check(branch, message, file, line, extra_nodes);
}
-template <>
-TNode<Smi> CodeStubAssembler::IntPtrToParameter<Smi>(TNode<IntPtrT> value) {
- return SmiTag(value);
-}
-template <>
-TNode<IntPtrT> CodeStubAssembler::IntPtrToParameter<IntPtrT>(
- TNode<IntPtrT> value) {
- return value;
-}
-
-
void CodeStubAssembler::IncrementCallCount(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id) {
Comment("increment call count");
@@ -292,33 +280,6 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
}
}
-bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode<Smi> test) {
- Smi smi_test;
- if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) {
- return true;
- }
- return false;
-}
-
-bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode<IntPtrT> test) {
- int32_t constant_test;
- if (ToInt32Constant(test, &constant_test) && constant_test == 0) {
- return true;
- }
- return false;
-}
-
-bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test,
- ParameterMode mode) {
- if (mode == INTPTR_PARAMETERS) {
- return IsIntPtrOrSmiConstantZero(UncheckedCast<IntPtrT>(test));
- } else {
- DCHECK_EQ(mode, SMI_PARAMETERS);
- return IsIntPtrOrSmiConstantZero(UncheckedCast<Smi>(test));
- }
- return false;
-}
-
bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(Node* maybe_constant,
int* value,
ParameterMode mode) {
@@ -604,6 +565,16 @@ TNode<BoolT> CodeStubAssembler::IsValidSmiIndex(TNode<Smi> smi) {
return Int32TrueConstant();
}
+template <>
+TNode<Smi> CodeStubAssembler::TaggedToParameter(TNode<Smi> value) {
+ return value;
+}
+
+template <>
+TNode<IntPtrT> CodeStubAssembler::TaggedToParameter(TNode<Smi> value) {
+ return SmiUntag(value);
+}
+
TNode<IntPtrT> CodeStubAssembler::TaggedIndexToIntPtr(
TNode<TaggedIndex> value) {
return Signed(WordSarShiftOutZeros(BitcastTaggedToWordForTagAndSmiBits(value),
@@ -2413,10 +2384,10 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
}
TNode<MaybeObject> CodeStubAssembler::LoadWeakFixedArrayElement(
- TNode<WeakFixedArray> object, Node* index, int additional_offset,
- ParameterMode parameter_mode, LoadSensitivity needs_poisoning) {
+ TNode<WeakFixedArray> object, TNode<IntPtrT> index, int additional_offset) {
return LoadArrayElement(object, WeakFixedArray::kHeaderSize, index,
- additional_offset, parameter_mode, needs_poisoning);
+ additional_offset, INTPTR_PARAMETERS,
+ LoadSensitivity::kSafe);
}
TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(
@@ -2893,17 +2864,17 @@ TNode<Int32T> CodeStubAssembler::EnsureArrayPushable(TNode<Context> context,
}
void CodeStubAssembler::PossiblyGrowElementsCapacity(
- ParameterMode mode, ElementsKind kind, TNode<HeapObject> array,
- Node* length, TVariable<FixedArrayBase>* var_elements, Node* growth,
+ ElementsKind kind, TNode<HeapObject> array, TNode<BInt> length,
+ TVariable<FixedArrayBase>* var_elements, TNode<BInt> growth,
Label* bailout) {
+ ParameterMode mode = OptimalParameterMode();
Label fits(this, var_elements);
- Node* capacity =
- TaggedToParameter(LoadFixedArrayBaseLength(var_elements->value()), mode);
- // length and growth nodes are already in a ParameterMode appropriate
- // representation.
- Node* new_length = IntPtrOrSmiAdd(growth, length, mode);
- GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity, mode), &fits);
- Node* new_capacity = CalculateNewElementsCapacity(new_length, mode);
+ TNode<BInt> capacity =
+ TaggedToParameter<BInt>(LoadFixedArrayBaseLength(var_elements->value()));
+
+ TNode<BInt> new_length = IntPtrOrSmiAdd(growth, length);
+ GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity), &fits);
+ TNode<BInt> new_capacity = CalculateNewElementsCapacity(new_length);
*var_elements = GrowElementsCapacity(array, var_elements->value(), kind, kind,
capacity, new_capacity, mode, bailout);
Goto(&fits);
@@ -2919,15 +2890,14 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
Label pre_bailout(this);
Label success(this);
TVARIABLE(Smi, var_tagged_length);
- ParameterMode mode = OptimalParameterMode();
TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array)));
TVARIABLE(FixedArrayBase, var_elements, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
TNode<IntPtrT> first = arg_index->value();
TNode<BInt> growth = IntPtrToBInt(IntPtrSub(args->GetLength(), first));
- PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
- &var_elements, growth, &pre_bailout);
+ PossiblyGrowElementsCapacity(kind, array, var_length.value(), &var_elements,
+ growth, &pre_bailout);
// Push each argument onto the end of the array now that there is enough
// capacity.
@@ -2936,8 +2906,8 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
args->ForEach(
push_vars,
[&](TNode<Object> arg) {
- TryStoreArrayElement(kind, mode, &pre_bailout, elements,
- var_length.value(), arg);
+ TryStoreArrayElement(kind, &pre_bailout, elements, var_length.value(),
+ arg);
Increment(&var_length);
},
first);
@@ -2950,7 +2920,7 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
BIND(&pre_bailout);
{
- TNode<Smi> length = ParameterToTagged(var_length.value(), mode);
+ TNode<Smi> length = ParameterToTagged(var_length.value());
var_tagged_length = length;
TNode<Smi> diff = SmiSub(length, LoadFastJSArrayLength(array));
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
@@ -2962,15 +2932,17 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
return var_tagged_length.value();
}
-void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind,
- ParameterMode mode, Label* bailout,
+void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind, Label* bailout,
TNode<FixedArrayBase> elements,
- Node* index, TNode<Object> value) {
+ TNode<BInt> index,
+ TNode<Object> value) {
if (IsSmiElementsKind(kind)) {
GotoIf(TaggedIsNotSmi(value), bailout);
} else if (IsDoubleElementsKind(kind)) {
GotoIfNotNumber(value, bailout);
}
+
+ ParameterMode mode = OptimalParameterMode();
if (IsDoubleElementsKind(kind)) {
StoreElement(elements, kind, index, ChangeNumberToFloat64(CAST(value)),
mode);
@@ -2984,19 +2956,18 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
TNode<Object> value,
Label* bailout) {
Comment("BuildAppendJSArray: ", ElementsKindToString(kind));
- ParameterMode mode = OptimalParameterMode();
TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array)));
TVARIABLE(FixedArrayBase, var_elements, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
- Node* growth = IntPtrOrSmiConstant(1, mode);
- PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
- &var_elements, growth, bailout);
+ TNode<BInt> growth = IntPtrOrSmiConstant<BInt>(1);
+ PossiblyGrowElementsCapacity(kind, array, var_length.value(), &var_elements,
+ growth, bailout);
// Push each argument onto the end of the array now that there is enough
// capacity.
- TryStoreArrayElement(kind, mode, bailout, var_elements.value(),
- var_length.value(), value);
+ TryStoreArrayElement(kind, bailout, var_elements.value(), var_length.value(),
+ value);
Increment(&var_length);
TNode<Smi> length = BIntToSmi(var_length.value());
@@ -3552,12 +3523,13 @@ TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity(
TNode<JSArray> CodeStubAssembler::AllocateJSArray(
TNode<Map> array_map, TNode<FixedArrayBase> elements, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, int array_header_size) {
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ int array_header_size) {
Comment("begin allocation of JSArray passing in elements");
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
int base_size = array_header_size;
- if (!allocation_site.is_null()) {
+ if (allocation_site) {
base_size += AllocationMemento::kSize;
}
@@ -3571,8 +3543,9 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, TNode<IntPtrT> capacity,
- AllocationFlags allocation_flags, int array_header_size) {
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ TNode<IntPtrT> capacity, AllocationFlags allocation_flags,
+ int array_header_size) {
Comment("begin allocation of JSArray with elements");
CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0);
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
@@ -3608,7 +3581,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
BIND(&nonempty);
{
int base_size = array_header_size;
- if (!allocation_site.is_null()) {
+ if (allocation_site) {
base_size += AllocationMemento::kSize;
}
@@ -3680,7 +3653,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
TNode<Map> array_map, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, TNode<IntPtrT> size_in_bytes) {
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ TNode<IntPtrT> size_in_bytes) {
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
// Allocate space for the JSArray and the elements FixedArray in one go.
@@ -3691,9 +3665,9 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
- if (!allocation_site.is_null()) {
+ if (allocation_site) {
InitializeAllocationMemento(array, IntPtrConstant(JSArray::kHeaderSize),
- allocation_site);
+ *allocation_site);
}
return CAST(array);
@@ -3701,7 +3675,7 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
TNode<JSArray> CodeStubAssembler::AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
- TNode<Smi> length, TNode<AllocationSite> allocation_site,
+ TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
AllocationFlags allocation_flags) {
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
@@ -3728,9 +3702,10 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
return array;
}
-TNode<JSArray> CodeStubAssembler::ExtractFastJSArray(
- TNode<Context> context, TNode<JSArray> array, Node* begin, Node* count,
- ParameterMode mode, Node* capacity, TNode<AllocationSite> allocation_site) {
+TNode<JSArray> CodeStubAssembler::ExtractFastJSArray(TNode<Context> context,
+ TNode<JSArray> array,
+ TNode<BInt> begin,
+ TNode<BInt> count) {
TNode<Map> original_array_map = LoadMap(array);
TNode<Int32T> elements_kind = LoadMapElementsKind(original_array_map);
@@ -3739,17 +3714,18 @@ TNode<JSArray> CodeStubAssembler::ExtractFastJSArray(
TNode<Map> array_map = LoadJSArrayElementsMap(elements_kind, native_context);
TNode<FixedArrayBase> new_elements = ExtractFixedArray(
- LoadElements(array), begin, count, capacity,
- ExtractFixedArrayFlag::kAllFixedArrays, mode, nullptr, elements_kind);
+ LoadElements(array), begin, count, base::nullopt,
+ ExtractFixedArrayFlag::kAllFixedArrays, nullptr, elements_kind);
TNode<JSArray> result = AllocateJSArray(
- array_map, new_elements, ParameterToTagged(count, mode), allocation_site);
+ array_map, new_elements, ParameterToTagged(count), base::nullopt);
return result;
}
TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
TNode<Context> context, TNode<JSArray> array,
- TNode<AllocationSite> allocation_site, HoleConversionMode convert_holes) {
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ HoleConversionMode convert_holes) {
// TODO(dhai): we should be able to assert IsFastJSArray(array) here, but this
// function is also used to copy boilerplates even when the no-elements
// protector is invalid. This function should be renamed to reflect its uses.
@@ -3775,7 +3751,7 @@ TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
// Simple extraction that preserves holes.
new_elements =
ExtractFixedArray(LoadElements(array), IntPtrOrSmiConstant(0, mode),
- TaggedToParameter(CAST(length), mode), nullptr,
+ TaggedToParameter<BInt>(CAST(length)), nullptr,
ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, mode,
nullptr, var_elements_kind.value());
var_new_elements = new_elements;
@@ -3793,7 +3769,7 @@ TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
// ExtractFixedArrayFlag::kDontCopyCOW.
new_elements = ExtractFixedArray(
LoadElements(array), IntPtrOrSmiConstant(0, mode),
- TaggedToParameter(CAST(length), mode), nullptr,
+ TaggedToParameter<BInt>(CAST(length)), nullptr,
ExtractFixedArrayFlag::kAllFixedArrays, mode, &var_holes_converted);
var_new_elements = new_elements;
// If the array type didn't change, use the original elements kind.
@@ -4284,9 +4260,9 @@ void CodeStubAssembler::FillPropertyArrayWithUndefined(
CSA_SLOW_ASSERT(this, MatchesParameterMode(to_node, mode));
ElementsKind kind = PACKED_ELEMENTS;
TNode<Oddball> value = UndefinedConstant();
- BuildFastFixedArrayForEach(
+ BuildFastArrayForEach(
array, kind, from_node, to_node,
- [this, value](Node* array, Node* offset) {
+ [this, value](TNode<HeapObject> array, TNode<IntPtrT> offset) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset,
value);
},
@@ -4312,9 +4288,10 @@ void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind,
float_value = LoadHeapNumberValue(CAST(value));
}
- BuildFastFixedArrayForEach(
+ BuildFastArrayForEach(
array, kind, from_node, to_node,
- [this, value, float_value, kind](Node* array, Node* offset) {
+ [this, value, float_value, kind](TNode<HeapObject> array,
+ TNode<IntPtrT> offset) {
if (IsDoubleElementsKind(kind)) {
StoreNoWriteBarrier(MachineRepresentation::kFloat64, array, offset,
float_value);
@@ -4472,7 +4449,7 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
const TNode<IntPtrT> delta =
IntPtrMul(IntPtrSub(dst_index, begin),
IntPtrConstant(ElementsKindToByteSize(kind)));
- auto loop_body = [&](Node* array, Node* offset) {
+ auto loop_body = [&](TNode<HeapObject> array, TNode<IntPtrT> offset) {
const TNode<AnyTaggedT> element = Load<AnyTaggedT>(array, offset);
const TNode<WordT> delta_offset = IntPtrAdd(offset, delta);
Store(array, delta_offset, element);
@@ -4485,17 +4462,15 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
BIND(&iterate_forward);
{
// Make a loop for the stores.
- BuildFastFixedArrayForEach(elements, kind, begin, end, loop_body,
- INTPTR_PARAMETERS,
- ForEachDirection::kForward);
+ BuildFastArrayForEach(elements, kind, begin, end, loop_body,
+ INTPTR_PARAMETERS, ForEachDirection::kForward);
Goto(&finished);
}
BIND(&iterate_backward);
{
- BuildFastFixedArrayForEach(elements, kind, begin, end, loop_body,
- INTPTR_PARAMETERS,
- ForEachDirection::kReverse);
+ BuildFastArrayForEach(elements, kind, begin, end, loop_body,
+ INTPTR_PARAMETERS, ForEachDirection::kReverse);
Goto(&finished);
}
}
@@ -4563,9 +4538,9 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
const TNode<IntPtrT> delta =
IntPtrMul(IntPtrSub(dst_index, src_index),
IntPtrConstant(ElementsKindToByteSize(kind)));
- BuildFastFixedArrayForEach(
+ BuildFastArrayForEach(
src_elements, kind, begin, end,
- [&](Node* array, Node* offset) {
+ [&](TNode<HeapObject> array, TNode<IntPtrT> offset) {
const TNode<AnyTaggedT> element = Load<AnyTaggedT>(array, offset);
const TNode<WordT> delta_offset = IntPtrAdd(offset, delta);
if (write_barrier == SKIP_WRITE_BARRIER) {
@@ -4757,12 +4732,6 @@ void CodeStubAssembler::CopyFixedArrayElements(
Comment("] CopyFixedArrayElements");
}
-TNode<JSAggregateError> CodeStubAssembler::HeapObjectToJSAggregateError(
- TNode<HeapObject> heap_object, Label* fail) {
- GotoIfNot(IsJSAggregateError(heap_object), fail);
- return UncheckedCast<JSAggregateError>(heap_object);
-}
-
TNode<FixedArray> CodeStubAssembler::HeapObjectToFixedArray(
TNode<HeapObject> base, Label* cast_fail) {
Label fixed_array(this);
@@ -4795,10 +4764,10 @@ void CodeStubAssembler::CopyPropertyArrayValues(TNode<HeapObject> from_array,
Node* start = IntPtrOrSmiConstant(0, mode);
ElementsKind kind = PACKED_ELEMENTS;
- BuildFastFixedArrayForEach(
+ BuildFastArrayForEach(
from_array, kind, start, property_count,
- [this, to_array, needs_write_barrier, destroy_source](Node* array,
- Node* offset) {
+ [this, to_array, needs_write_barrier, destroy_source](
+ TNode<HeapObject> array, TNode<IntPtrT> offset) {
TNode<AnyTaggedT> value = Load<AnyTaggedT>(array, offset);
if (destroy_source == DestroySource::kNo) {
@@ -4829,11 +4798,9 @@ void CodeStubAssembler::CopyPropertyArrayValues(TNode<HeapObject> from_array,
Comment("] CopyPropertyArrayValues");
}
-Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
- Node* offset,
- ElementsKind from_kind,
- ElementsKind to_kind,
- Label* if_hole) {
+Node* CodeStubAssembler::LoadElementAndPrepareForStore(
+ TNode<FixedArrayBase> array, TNode<IntPtrT> offset, ElementsKind from_kind,
+ ElementsKind to_kind, Label* if_hole) {
CSA_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
if (IsDoubleElementsKind(from_kind)) {
TNode<Float64T> value =
@@ -4874,28 +4841,32 @@ TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
TNode<Smi> capacity = LoadFixedArrayBaseLength(elements);
- ParameterMode mode = OptimalParameterMode();
- return TryGrowElementsCapacity(
- object, elements, kind, TaggedToParameter(key, mode),
- TaggedToParameter(capacity, mode), mode, bailout);
+ return TryGrowElementsCapacity(object, elements, kind,
+ TaggedToParameter<BInt>(key),
+ TaggedToParameter<BInt>(capacity), bailout);
}
+template <typename TIndex>
TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
TNode<HeapObject> object, TNode<FixedArrayBase> elements, ElementsKind kind,
- Node* key, Node* capacity, ParameterMode mode, Label* bailout) {
+ TNode<TIndex> key, TNode<TIndex> capacity, Label* bailout) {
+ static_assert(
+ std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT key and capacity nodes are allowed");
Comment("TryGrowElementsCapacity");
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(key, mode));
// If the gap growth is too big, fall back to the runtime.
- Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode);
- Node* max_capacity = IntPtrOrSmiAdd(capacity, max_gap, mode);
- GotoIf(UintPtrOrSmiGreaterThanOrEqual(key, max_capacity, mode), bailout);
+ TNode<TIndex> max_gap = IntPtrOrSmiConstant<TIndex>(JSObject::kMaxGap);
+ TNode<TIndex> max_capacity = IntPtrOrSmiAdd(capacity, max_gap);
+ GotoIf(UintPtrOrSmiGreaterThanOrEqual(key, max_capacity), bailout);
// Calculate the capacity of the new backing store.
Node* new_capacity = CalculateNewElementsCapacity(
- IntPtrOrSmiAdd(key, IntPtrOrSmiConstant(1, mode), mode), mode);
+ IntPtrOrSmiAdd(key, IntPtrOrSmiConstant<TIndex>(1)));
+
+ ParameterMode mode =
+ std::is_same<TIndex, Smi>::value ? SMI_PARAMETERS : INTPTR_PARAMETERS;
return GrowElementsCapacity(object, elements, kind, kind, capacity,
new_capacity, mode, bailout);
}
@@ -6002,10 +5973,6 @@ TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperMap(SloppyTNode<Map> map) {
return IsJSPrimitiveWrapperInstanceType(LoadMapInstanceType(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSAggregateError(TNode<HeapObject> object) {
- return HasInstanceType(object, JS_AGGREGATE_ERROR_TYPE);
-}
-
TNode<BoolT> CodeStubAssembler::IsJSArrayInstanceType(
SloppyTNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, JS_ARRAY_TYPE);
@@ -6144,11 +6111,6 @@ TNode<BoolT> CodeStubAssembler::IsAccessorPair(SloppyTNode<HeapObject> object) {
return IsAccessorPairMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsAllocationSite(
- SloppyTNode<HeapObject> object) {
- return IsAllocationSiteInstanceType(LoadInstanceType(object));
-}
-
TNode<BoolT> CodeStubAssembler::IsHeapNumber(SloppyTNode<HeapObject> object) {
return IsHeapNumberMap(LoadMap(object));
}
@@ -6329,11 +6291,6 @@ TNode<BoolT> CodeStubAssembler::IsJSFunctionInstanceType(
return InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsAllocationSiteInstanceType(
- SloppyTNode<Int32T> instance_type) {
- return InstanceTypeEqual(instance_type, ALLOCATION_SITE_TYPE);
-}
-
TNode<BoolT> CodeStubAssembler::IsJSFunction(SloppyTNode<HeapObject> object) {
return IsJSFunctionMap(LoadMap(object));
}
@@ -6844,13 +6801,13 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input,
TNode<Word32T> hash = Word32And(SmiToInt32(smi_input.value()), mask);
TNode<IntPtrT> entry_index =
Signed(ChangeUint32ToWord(Int32Add(hash, hash)));
- TNode<Object> smi_key = UnsafeLoadFixedArrayElement(
- number_string_cache, entry_index, 0, INTPTR_PARAMETERS);
+ TNode<Object> smi_key =
+ UnsafeLoadFixedArrayElement(number_string_cache, entry_index);
GotoIf(TaggedNotEqual(smi_key, smi_input.value()), bailout);
// Smi match, return value from cache entry.
result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index,
- kTaggedSize, INTPTR_PARAMETERS));
+ kTaggedSize));
Goto(&done);
}
BIND(&done);
@@ -7297,6 +7254,12 @@ TNode<Number> CodeStubAssembler::ToLength_Inline(SloppyTNode<Context> context,
[=] { return CAST(CallBuiltin(Builtins::kToLength, context, input)); });
}
+TNode<Object> CodeStubAssembler::OrdinaryToPrimitive(
+ TNode<Context> context, TNode<Object> input, OrdinaryToPrimitiveHint hint) {
+ Callable callable = CodeFactory::OrdinaryToPrimitive(isolate(), hint);
+ return CallStub(callable, context, input);
+}
+
TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
uint32_t shift, uint32_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask);
@@ -8422,16 +8385,6 @@ void CodeStubAssembler::Lookup(TNode<Name> unique_name, TNode<Array> array,
}
}
-TNode<BoolT> CodeStubAssembler::IsSimpleObjectMap(TNode<Map> map) {
- uint32_t mask = Map::Bits1::HasNamedInterceptorBit::kMask |
- Map::Bits1::IsAccessCheckNeededBit::kMask;
- // !IsSpecialReceiverType && !IsNamedInterceptor && !IsAccessCheckNeeded
- return Select<BoolT>(
- IsSpecialReceiverInstanceType(LoadMapInstanceType(map)),
- [=] { return Int32FalseConstant(); },
- [=] { return IsClearWord32(LoadMapBitField(map), mask); });
-}
-
void CodeStubAssembler::TryLookupPropertyInSimpleObject(
TNode<JSObject> object, TNode<Map> map, TNode<Name> unique_name,
Label* if_found_fast, Label* if_found_dict,
@@ -10009,9 +9962,8 @@ Node* CodeStubAssembler::CheckForCapacityGrow(
GotoIf(UintPtrLessThan(key, current_capacity), &fits_capacity);
{
- Node* new_elements =
- TryGrowElementsCapacity(object, elements, kind, key, current_capacity,
- INTPTR_PARAMETERS, &grow_bailout);
+ Node* new_elements = TryGrowElementsCapacity(
+ object, elements, kind, key, current_capacity, &grow_bailout);
checked_elements.Bind(new_elements);
Goto(&fits_capacity);
}
@@ -10316,10 +10268,10 @@ template TNode<UintPtrT> CodeStubAssembler::BuildFastLoop<UintPtrT>(
TNode<UintPtrT> end_index, const FastLoopBody<UintPtrT>& body,
int increment, IndexAdvanceMode advance_mode);
-void CodeStubAssembler::BuildFastFixedArrayForEach(
+void CodeStubAssembler::BuildFastArrayForEach(
const CodeStubAssembler::VariableList& vars, Node* fixed_array,
ElementsKind kind, Node* first_element_inclusive,
- Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
+ Node* last_element_exclusive, const FastArrayForEachBody& body,
ParameterMode mode, ForEachDirection direction) {
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
CSA_SLOW_ASSERT(this, MatchesParameterMode(first_element_inclusive, mode));
@@ -10339,14 +10291,14 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
TNode<IntPtrT> index = IntPtrConstant(i);
TNode<IntPtrT> offset = ElementOffsetFromIndex(
index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
- body(fixed_array, offset);
+ body(CAST(fixed_array), offset);
}
} else {
for (int i = last_val - 1; i >= first_val; --i) {
TNode<IntPtrT> index = IntPtrConstant(i);
TNode<IntPtrT> offset = ElementOffsetFromIndex(
index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
- body(fixed_array, offset);
+ body(CAST(fixed_array), offset);
}
}
return;
@@ -10364,7 +10316,7 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kTaggedSize;
BuildFastLoop<IntPtrT>(
vars, start, limit,
- [&](TNode<IntPtrT> offset) { body(fixed_array, offset); },
+ [&](TNode<IntPtrT> offset) { body(CAST(fixed_array), offset); },
direction == ForEachDirection::kReverse ? -increment : increment,
direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
: IndexAdvanceMode::kPost);
@@ -11013,7 +10965,7 @@ void CodeStubAssembler::GenerateEqual_Same(SloppyTNode<Object> value,
BIND(&if_boolean);
{
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBoolean);
Goto(if_equal);
}
@@ -11095,60 +11047,75 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_smi);
{
Label if_right_smi(this), if_right_not_smi(this);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kSignedSmall);
Branch(TaggedIsSmi(right), &if_right_smi, &if_right_not_smi);
BIND(&if_right_smi);
{
// We have already checked for {left} and {right} being the same value,
// so when we get here they must be different Smis.
- CombineFeedback(var_type_feedback,
- CompareOperationFeedback::kSignedSmall);
Goto(&if_notequal);
}
BIND(&if_right_not_smi);
- TNode<Map> right_map = LoadMap(CAST(right));
- Label if_right_heapnumber(this), if_right_boolean(this),
- if_right_bigint(this, Label::kDeferred),
- if_right_receiver(this, Label::kDeferred);
- GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
- // {left} is Smi and {right} is not HeapNumber or Smi.
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
- GotoIf(IsBooleanMap(right_map), &if_right_boolean);
- TNode<Uint16T> right_type = LoadMapInstanceType(right_map);
- GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
- GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint);
- Branch(IsJSReceiverInstanceType(right_type), &if_right_receiver,
- &if_notequal);
-
- BIND(&if_right_heapnumber);
{
- var_left_float = SmiToFloat64(CAST(left));
- var_right_float = LoadHeapNumberValue(CAST(right));
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
- Goto(&do_float_comparison);
- }
+ TNode<Map> right_map = LoadMap(CAST(right));
+ Label if_right_heapnumber(this), if_right_boolean(this),
+ if_right_oddball(this), if_right_bigint(this, Label::kDeferred),
+ if_right_receiver(this, Label::kDeferred);
+ GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
- BIND(&if_right_boolean);
- {
- var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
- Goto(&loop);
- }
+ // {left} is Smi and {right} is not HeapNumber or Smi.
+ TNode<Uint16T> right_type = LoadMapInstanceType(right_map);
+ GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
+ GotoIf(IsOddballInstanceType(right_type), &if_right_oddball);
+ GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint);
+ GotoIf(IsJSReceiverInstanceType(right_type), &if_right_receiver);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ Goto(&if_notequal);
- BIND(&if_right_bigint);
- {
- result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber,
- NoContextConstant(), right, left));
- Goto(&end);
- }
+ BIND(&if_right_heapnumber);
+ {
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
+ var_left_float = SmiToFloat64(CAST(left));
+ var_right_float = LoadHeapNumberValue(CAST(right));
+ Goto(&do_float_comparison);
+ }
- BIND(&if_right_receiver);
- {
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_right = CallStub(callable, context, right);
- Goto(&loop);
+ BIND(&if_right_oddball);
+ {
+ Label if_right_boolean(this);
+ GotoIf(IsBooleanMap(right_map), &if_right_boolean);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kOddball);
+ Goto(&if_notequal);
+
+ BIND(&if_right_boolean);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBoolean);
+ var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
+ Goto(&loop);
+ }
+ }
+
+ BIND(&if_right_bigint);
+ {
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
+ result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber,
+ NoContextConstant(), right, left));
+ Goto(&end);
+ }
+
+ BIND(&if_right_receiver);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kReceiver);
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_right = CallStub(callable, context, right);
+ Goto(&loop);
+ }
}
}
@@ -11187,29 +11154,41 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_number);
{
Label if_right_not_number(this);
+
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
GotoIf(Word32NotEqual(left_type, right_type), &if_right_not_number);
var_left_float = LoadHeapNumberValue(CAST(left));
var_right_float = LoadHeapNumberValue(CAST(right));
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
Goto(&do_float_comparison);
BIND(&if_right_not_number);
{
- Label if_right_boolean(this);
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ Label if_right_oddball(this);
+
GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
- GotoIf(IsBooleanMap(right_map), &if_right_boolean);
+ GotoIf(IsOddballInstanceType(right_type), &if_right_oddball);
GotoIf(IsBigIntInstanceType(right_type), &use_symmetry);
- Branch(IsJSReceiverInstanceType(right_type), &use_symmetry,
- &if_notequal);
+ GotoIf(IsJSReceiverInstanceType(right_type), &use_symmetry);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ Goto(&if_notequal);
- BIND(&if_right_boolean);
+ BIND(&if_right_oddball);
{
- var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
- Goto(&loop);
+ Label if_right_boolean(this);
+ GotoIf(IsBooleanMap(right_map), &if_right_boolean);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kOddball);
+ Goto(&if_notequal);
+
+ BIND(&if_right_boolean);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBoolean);
+ var_right =
+ LoadObjectField(CAST(right), Oddball::kToNumberOffset);
+ Goto(&loop);
+ }
}
}
}
@@ -11218,6 +11197,8 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
{
Label if_right_heapnumber(this), if_right_bigint(this),
if_right_string(this), if_right_boolean(this);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
+
GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint);
GotoIf(IsStringInstanceType(right_type), &if_right_string);
@@ -11227,9 +11208,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_heapnumber);
{
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber,
NoContextConstant(), left, right));
Goto(&end);
@@ -11237,7 +11216,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_bigint);
{
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
+ // We already have BigInt feedback.
result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt,
NoContextConstant(), left, right));
Goto(&end);
@@ -11245,9 +11224,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_string);
{
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kString);
result = CAST(CallRuntime(Runtime::kBigIntEqualToString,
NoContextConstant(), left, right));
Goto(&end);
@@ -11255,9 +11232,8 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_boolean);
{
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBoolean);
var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
Goto(&loop);
}
@@ -11266,35 +11242,60 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_oddball);
{
Label if_left_boolean(this), if_left_not_boolean(this);
- Branch(IsBooleanMap(left_map), &if_left_boolean, &if_left_not_boolean);
+ GotoIf(IsBooleanMap(left_map), &if_left_boolean);
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kNullOrUndefined);
+ GotoIf(IsUndetectableMap(left_map), &if_left_not_boolean);
+ }
+ Goto(&if_left_not_boolean);
BIND(&if_left_not_boolean);
{
// {left} is either Null or Undefined. Check if {right} is
// undetectable (which includes Null and Undefined).
- Label if_right_undetectable(this), if_right_not_undetectable(this);
- Branch(IsUndetectableMap(right_map), &if_right_undetectable,
- &if_right_not_undetectable);
+ Label if_right_undetectable(this), if_right_number(this),
+ if_right_oddball(this),
+ if_right_not_number_or_oddball_or_undetectable(this);
+ GotoIf(IsUndetectableMap(right_map), &if_right_undetectable);
+ GotoIf(IsHeapNumberInstanceType(right_type), &if_right_number);
+ GotoIf(IsOddballInstanceType(right_type), &if_right_oddball);
+ Goto(&if_right_not_number_or_oddball_or_undetectable);
BIND(&if_right_undetectable);
{
- if (var_type_feedback != nullptr) {
- // If {right} is undetectable, it must be either also
- // Null or Undefined, or a Receiver (aka document.all).
- *var_type_feedback = SmiConstant(
- CompareOperationFeedback::kReceiverOrNullOrUndefined);
- }
+ // If {right} is undetectable, it must be either also
+ // Null or Undefined, or a Receiver (aka document.all).
+ CombineFeedback(
+ var_type_feedback,
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
Goto(&if_equal);
}
- BIND(&if_right_not_undetectable);
+ BIND(&if_right_number);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumber);
+ Goto(&if_notequal);
+ }
+
+ BIND(&if_right_oddball);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kOddball);
+ Goto(&if_notequal);
+ }
+
+ BIND(&if_right_not_number_or_oddball_or_undetectable);
{
if (var_type_feedback != nullptr) {
// Track whether {right} is Null, Undefined or Receiver.
- *var_type_feedback = SmiConstant(
+ CombineFeedback(
+ var_type_feedback,
CompareOperationFeedback::kReceiverOrNullOrUndefined);
GotoIf(IsJSReceiverInstanceType(right_type), &if_notequal);
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
}
Goto(&if_notequal);
}
@@ -11302,9 +11303,8 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_boolean);
{
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBoolean);
// If {right} is a Boolean too, it must be a different Boolean.
GotoIf(TaggedEqual(right_map, left_map), &if_notequal);
@@ -11387,9 +11387,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
{
// {right} is a Primitive, and neither Null or Undefined;
// convert {left} to Primitive too.
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
var_left = CallStub(callable, context, left);
Goto(&loop);
@@ -11400,6 +11398,12 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&do_right_stringtonumber);
{
+ if (var_type_feedback != nullptr) {
+ TNode<Map> right_map = LoadMap(CAST(right));
+ TNode<Uint16T> right_type = LoadMapInstanceType(right_map);
+ CombineFeedback(var_type_feedback,
+ CollectFeedbackForString(right_type));
+ }
var_right = CallBuiltin(Builtins::kStringToNumber, context, right);
Goto(&loop);
}
@@ -11678,15 +11682,47 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(
BIND(&if_lhsisoddball);
{
- STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE);
- GotoIf(IsBooleanMap(rhs_map), &if_not_equivalent_types);
- GotoIf(Int32LessThan(rhs_instance_type,
- Int32Constant(ODDBALL_TYPE)),
- &if_not_equivalent_types);
- OverwriteFeedback(
- var_type_feedback,
- CompareOperationFeedback::kReceiverOrNullOrUndefined);
- Goto(&if_notequal);
+ Label if_lhsisboolean(this), if_lhsisnotboolean(this);
+ Branch(IsBooleanMap(lhs_map), &if_lhsisboolean,
+ &if_lhsisnotboolean);
+
+ BIND(&if_lhsisboolean);
+ {
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumberOrOddball);
+ GotoIf(IsBooleanMap(rhs_map), &if_notequal);
+ Goto(&if_not_equivalent_types);
+ }
+
+ BIND(&if_lhsisnotboolean);
+ {
+ Label if_rhsisheapnumber(this), if_rhsisnotheapnumber(this);
+
+ STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE ==
+ ODDBALL_TYPE);
+ GotoIf(Int32LessThan(rhs_instance_type,
+ Int32Constant(ODDBALL_TYPE)),
+ &if_not_equivalent_types);
+
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisheapnumber,
+ &if_rhsisnotheapnumber);
+
+ BIND(&if_rhsisheapnumber);
+ {
+ OverwriteFeedback(
+ var_type_feedback,
+ CompareOperationFeedback::kNumberOrOddball);
+ Goto(&if_not_equivalent_types);
+ }
+
+ BIND(&if_rhsisnotheapnumber);
+ {
+ OverwriteFeedback(
+ var_type_feedback,
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
+ Goto(&if_notequal);
+ }
+ }
}
BIND(&if_lhsissymbol);
@@ -11742,7 +11778,14 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(
}
BIND(&if_rhsisnotnumber);
- Goto(&if_not_equivalent_types);
+ {
+ TNode<Uint16T> rhs_instance_type = LoadMapInstanceType(rhs_map);
+ GotoIfNot(IsOddballInstanceType(rhs_instance_type),
+ &if_not_equivalent_types);
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumberOrOddball);
+ Goto(&if_notequal);
+ }
}
}
}
@@ -12380,28 +12423,6 @@ TNode<Number> CodeStubAssembler::BitwiseOp(TNode<Word32T> left32,
UNREACHABLE();
}
-// ES #sec-createarrayiterator
-TNode<JSArrayIterator> CodeStubAssembler::CreateArrayIterator(
- TNode<Context> context, TNode<Object> object, IterationKind kind) {
- TNode<NativeContext> native_context = LoadNativeContext(context);
- TNode<Map> iterator_map = CAST(LoadContextElement(
- native_context, Context::INITIAL_ARRAY_ITERATOR_MAP_INDEX));
- TNode<HeapObject> iterator = Allocate(JSArrayIterator::kHeaderSize);
- StoreMapNoWriteBarrier(iterator, iterator_map);
- StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOrHashOffset,
- RootIndex::kEmptyFixedArray);
- StoreObjectFieldRoot(iterator, JSArrayIterator::kElementsOffset,
- RootIndex::kEmptyFixedArray);
- StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kIteratedObjectOffset, object);
- StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
- SmiConstant(0));
- StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kKindOffset,
- SmiConstant(Smi::FromInt(static_cast<int>(kind))));
- return CAST(iterator);
-}
-
TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
SloppyTNode<Context> context, SloppyTNode<Object> value,
SloppyTNode<Oddball> done) {
@@ -12508,10 +12529,8 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLength(
}
CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
- TNode<IntPtrT> argc, TNode<RawPtrT> fp,
- ReceiverMode receiver_mode)
+ TNode<IntPtrT> argc, TNode<RawPtrT> fp)
: assembler_(assembler),
- receiver_mode_(receiver_mode),
argc_(argc),
base_(),
fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) {
@@ -12531,7 +12550,6 @@ CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
}
TNode<Object> CodeStubArguments::GetReceiver() const {
- DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
#ifdef V8_REVERSE_JSARGS
intptr_t offset = -kSystemPointerSize;
#else
@@ -12541,7 +12559,6 @@ TNode<Object> CodeStubArguments::GetReceiver() const {
}
void CodeStubArguments::SetReceiver(TNode<Object> object) const {
- DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
#ifdef V8_REVERSE_JSARGS
intptr_t offset = -kSystemPointerSize;
#else
@@ -12575,26 +12592,6 @@ TNode<Object> CodeStubArguments::AtIndex(int index) const {
}
TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
- int index, TNode<Object> default_value) {
- CodeStubAssembler::TVariable<Object> result(assembler_);
- CodeStubAssembler::Label argument_missing(assembler_),
- argument_done(assembler_, &result);
-
- assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual(
- assembler_->IntPtrConstant(index), argc_),
- &argument_missing);
- result = AtIndex(index);
- assembler_->Goto(&argument_done);
-
- assembler_->BIND(&argument_missing);
- result = default_value;
- assembler_->Goto(&argument_done);
-
- assembler_->BIND(&argument_done);
- return result.value();
-}
-
-TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
TNode<IntPtrT> index, TNode<Object> default_value) {
CodeStubAssembler::TVariable<Object> result(assembler_);
CodeStubAssembler::Label argument_missing(assembler_),
@@ -12641,13 +12638,8 @@ void CodeStubArguments::ForEach(
}
void CodeStubArguments::PopAndReturn(TNode<Object> value) {
- TNode<IntPtrT> pop_count;
- if (receiver_mode_ == ReceiverMode::kHasReceiver) {
- pop_count = assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1));
- } else {
- pop_count = argc_;
- }
-
+ TNode<IntPtrT> pop_count =
+ assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1));
assembler_->PopAndReturn(pop_count, value);
}
@@ -13085,17 +13077,9 @@ TNode<Object> CodeStubAssembler::CallApiCallback(
TNode<Object> context, TNode<RawPtrT> callback, TNode<IntPtrT> argc,
TNode<Object> data, TNode<Object> holder, TNode<Object> receiver,
TNode<Object> value) {
- // CallApiCallback receives the first four arguments in registers
- // (callback, argc, data and holder). The last arguments are in the stack in
- // JS ordering. See ApiCallbackDescriptor.
Callable callable = CodeFactory::CallApiCallback(isolate());
-#ifdef V8_REVERSE_JSARGS
- return CallStub(callable, context, callback, argc, data, holder, value,
- receiver);
-#else
return CallStub(callable, context, callback, argc, data, holder, receiver,
value);
-#endif
}
TNode<Object> CodeStubAssembler::CallRuntimeNewArray(
diff --git a/chromium/v8/src/codegen/code-stub-assembler.h b/chromium/v8/src/codegen/code-stub-assembler.h
index b01729c73db..a1369993994 100644
--- a/chromium/v8/src/codegen/code-stub-assembler.h
+++ b/chromium/v8/src/codegen/code-stub-assembler.h
@@ -107,6 +107,10 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(TypedArraySpeciesProtector, typed_array_species_protector, \
TypedArraySpeciesProtector)
+#define UNIQUE_INSTANCE_TYPE_IMMUTABLE_IMMOVABLE_MAP_ADAPTER( \
+ V, rootIndexName, rootAccessorName, class_name) \
+ V(rootIndexName, rootAccessorName, class_name##Map)
+
#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
@@ -137,6 +141,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
EmptySlowElementDictionary) \
V(empty_string, empty_string, EmptyString) \
V(error_to_string, error_to_string, ErrorToString) \
+ V(errors_string, errors_string, ErrorsString) \
V(FalseValue, false_value, False) \
V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
@@ -168,7 +173,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
V(null_to_string, null_to_string, NullToString) \
V(NullValue, null_value, Null) \
- V(number_string, number_string, numberString) \
+ V(number_string, number_string, NumberString) \
V(number_to_string, number_to_string, NumberToString) \
V(Object_string, Object_string, ObjectString) \
V(object_to_string, object_to_string, ObjectToString) \
@@ -195,8 +200,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(resolve_string, resolve_string, ResolveString) \
V(return_string, return_string, ReturnString) \
V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
- V(SloppyArgumentsElementsMap, sloppy_arguments_elements_map, \
- SloppyArgumentsElementsMap) \
V(SmallOrderedHashSetMap, small_ordered_hash_set_map, \
SmallOrderedHashSetMap) \
V(SmallOrderedHashMapMap, small_ordered_hash_map_map, \
@@ -212,6 +215,8 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(SymbolMap, symbol_map, SymbolMap) \
V(TheHoleValue, the_hole_value, TheHole) \
V(then_string, then_string, ThenString) \
+ V(toString_string, toString_string, ToStringString) \
+ V(to_primitive_symbol, to_primitive_symbol, ToPrimitiveSymbol) \
V(to_string_tag_symbol, to_string_tag_symbol, ToStringTagSymbol) \
V(TransitionArrayMap, transition_array_map, TransitionArrayMap) \
V(TrueValue, true_value, True) \
@@ -228,9 +233,11 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(undefined_to_string, undefined_to_string, UndefinedToString) \
V(UndefinedValue, undefined_value, Undefined) \
V(uninitialized_symbol, uninitialized_symbol, UninitializedSymbol) \
+ V(valueOf_string, valueOf_string, ValueOfString) \
V(WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArrayMap) \
V(zero_string, zero_string, ZeroString) \
- TORQUE_INTERNAL_MAP_CSA_LIST(V)
+ UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR( \
+ UNIQUE_INSTANCE_TYPE_IMMUTABLE_IMMOVABLE_MAP_ADAPTER, V)
#define HEAP_IMMOVABLE_OBJECT_LIST(V) \
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
@@ -367,15 +374,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
#endif
}
- MachineRepresentation ParameterRepresentation(ParameterMode mode) const {
- return mode == INTPTR_PARAMETERS ? MachineType::PointerRepresentation()
- : MachineRepresentation::kTaggedSigned;
- }
-
- MachineRepresentation OptimalParameterRepresentation() const {
- return ParameterRepresentation(OptimalParameterMode());
- }
-
TNode<IntPtrT> ParameterToIntPtr(TNode<Smi> value) { return SmiUntag(value); }
TNode<IntPtrT> ParameterToIntPtr(TNode<IntPtrT> value) { return value; }
// TODO(v8:9708): remove once all uses are ported.
@@ -384,24 +382,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<IntPtrT>(value);
}
- template <typename TIndex>
- TNode<TIndex> IntPtrToParameter(TNode<IntPtrT> value);
+ TNode<Smi> ParameterToTagged(TNode<Smi> value) { return value; }
- Node* IntPtrToParameter(SloppyTNode<IntPtrT> value, ParameterMode mode) {
- if (mode == SMI_PARAMETERS) return SmiTag(value);
- return value;
- }
-
- Node* Int32ToParameter(SloppyTNode<Int32T> value, ParameterMode mode) {
- return IntPtrToParameter(ChangeInt32ToIntPtr(value), mode);
- }
+ TNode<Smi> ParameterToTagged(TNode<IntPtrT> value) { return SmiTag(value); }
TNode<Smi> ParameterToTagged(Node* value, ParameterMode mode) {
if (mode != SMI_PARAMETERS) return SmiTag(value);
return UncheckedCast<Smi>(value);
}
- Node* TaggedToParameter(SloppyTNode<Smi> value, ParameterMode mode) {
+ template <typename TIndex>
+ TNode<TIndex> TaggedToParameter(TNode<Smi> value);
+
+ // TODO(v8:9708): remove once all uses are ported.
+ Node* TaggedToParameter(TNode<Smi> value, ParameterMode mode) {
if (mode != SMI_PARAMETERS) return SmiUntag(value);
return value;
}
@@ -481,19 +475,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<HeapObject>(value);
}
- TNode<JSAggregateError> HeapObjectToJSAggregateError(
- TNode<HeapObject> heap_object, Label* fail);
-
- TNode<JSArray> HeapObjectToJSArray(TNode<HeapObject> heap_object,
- Label* fail) {
- GotoIfNot(IsJSArray(heap_object), fail);
- return UncheckedCast<JSArray>(heap_object);
- }
-
- TNode<JSArrayBuffer> HeapObjectToJSArrayBuffer(TNode<HeapObject> heap_object,
- Label* fail) {
- GotoIfNot(IsJSArrayBuffer(heap_object), fail);
- return UncheckedCast<JSArrayBuffer>(heap_object);
+ TNode<Uint16T> Uint16Constant(uint16_t t) {
+ return UncheckedCast<Uint16T>(Int32Constant(t));
}
TNode<JSArray> TaggedToFastJSArray(TNode<Context> context,
@@ -659,11 +642,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// TODO(v8:9708): remove once all uses are ported.
Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
- bool IsIntPtrOrSmiConstantZero(TNode<Smi> test);
- bool IsIntPtrOrSmiConstantZero(TNode<IntPtrT> test);
- // TODO(v8:9708): remove once all uses are ported.
- bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode);
-
bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value,
ParameterMode mode);
@@ -777,15 +755,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
}
- Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) {
- if (mode == SMI_PARAMETERS) {
- return SmiShl(CAST(a), shift);
- } else {
- DCHECK_EQ(INTPTR_PARAMETERS, mode);
- return WordShl(a, shift);
- }
- }
-
Node* WordOrSmiShr(Node* a, int shift, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return SmiShr(CAST(a), shift);
@@ -1437,11 +1406,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
TNode<Object> UnsafeLoadFixedArrayElement(
- TNode<FixedArray> object, Node* index, int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS,
+ TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return LoadFixedArrayElement(object, index, additional_offset,
- parameter_mode, needs_poisoning,
+ INTPTR_PARAMETERS, needs_poisoning,
CheckBounds::kDebugOnly);
}
@@ -1452,14 +1420,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return LoadFixedArrayElement(object, index, 0, INTPTR_PARAMETERS,
needs_poisoning, check_bounds);
}
- // This doesn't emit a bounds-check. As part of the security-performance
- // tradeoff, only use it if it is performance critical.
- TNode<Object> UnsafeLoadFixedArrayElement(TNode<FixedArray> object,
- TNode<IntPtrT> index,
- LoadSensitivity needs_poisoning) {
- return LoadFixedArrayElement(object, index, needs_poisoning,
- CheckBounds::kDebugOnly);
- }
TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset = 0,
@@ -1514,18 +1474,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
// Load an array element from a WeakFixedArray.
- TNode<MaybeObject> LoadWeakFixedArrayElement(
- TNode<WeakFixedArray> object, Node* index, int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
-
- TNode<MaybeObject> LoadWeakFixedArrayElement(
- TNode<WeakFixedArray> object, int index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return LoadWeakFixedArrayElement(object, IntPtrConstant(index),
- additional_offset, INTPTR_PARAMETERS,
- needs_poisoning);
- }
+ TNode<MaybeObject> LoadWeakFixedArrayElement(TNode<WeakFixedArray> object,
+ TNode<IntPtrT> index,
+ int additional_offset = 0);
// Load an array element from a FixedDoubleArray.
TNode<Float64T> LoadFixedDoubleArrayElement(
@@ -1845,9 +1796,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> EnsureArrayPushable(TNode<Context> context, TNode<Map> map,
Label* bailout);
- void TryStoreArrayElement(ElementsKind kind, ParameterMode mode,
- Label* bailout, TNode<FixedArrayBase> elements,
- Node* index, TNode<Object> value);
+ void TryStoreArrayElement(ElementsKind kind, Label* bailout,
+ TNode<FixedArrayBase> elements, TNode<BInt> index,
+ TNode<Object> value);
// Consumes args into the array, and returns tagged new length.
TNode<Smi> BuildAppendJSArray(ElementsKind kind, TNode<JSArray> array,
CodeStubArguments* args,
@@ -1961,45 +1912,43 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
//
// Allocate and return a JSArray with initialized header fields and its
// uninitialized elements.
- // The ParameterMode argument is only used for the capacity parameter.
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, TNode<IntPtrT> capacity,
- AllocationFlags allocation_flags = kNone,
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ TNode<IntPtrT> capacity, AllocationFlags allocation_flags = kNone,
int array_header_size = JSArray::kHeaderSize);
// Allocate a JSArray and fill elements with the hole.
- TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
- TNode<IntPtrT> capacity, TNode<Smi> length,
- TNode<AllocationSite> allocation_site,
- AllocationFlags allocation_flags = kNone);
- TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
- TNode<Smi> capacity, TNode<Smi> length,
- TNode<AllocationSite> allocation_site,
- AllocationFlags allocation_flags = kNone) {
+ TNode<JSArray> AllocateJSArray(
+ ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
+ TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
+ AllocationFlags allocation_flags = kNone);
+ TNode<JSArray> AllocateJSArray(
+ ElementsKind kind, TNode<Map> array_map, TNode<Smi> capacity,
+ TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
+ AllocationFlags allocation_flags = kNone) {
return AllocateJSArray(kind, array_map, SmiUntag(capacity), length,
allocation_site, allocation_flags);
}
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<Smi> capacity, TNode<Smi> length,
AllocationFlags allocation_flags = kNone) {
- return AllocateJSArray(kind, array_map, SmiUntag(capacity), length, {},
- allocation_flags);
+ return AllocateJSArray(kind, array_map, SmiUntag(capacity), length,
+ base::nullopt, allocation_flags);
}
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<IntPtrT> capacity, TNode<Smi> length,
AllocationFlags allocation_flags = kNone) {
- return AllocateJSArray(kind, array_map, capacity, length, {},
+ return AllocateJSArray(kind, array_map, capacity, length, base::nullopt,
allocation_flags);
}
// Allocate a JSArray and initialize the header fields.
- TNode<JSArray> AllocateJSArray(TNode<Map> array_map,
- TNode<FixedArrayBase> elements,
- TNode<Smi> length,
- TNode<AllocationSite> allocation_site = {},
- int array_header_size = JSArray::kHeaderSize);
+ TNode<JSArray> AllocateJSArray(
+ TNode<Map> array_map, TNode<FixedArrayBase> elements, TNode<Smi> length,
+ base::Optional<TNode<AllocationSite>> allocation_site = base::nullopt,
+ int array_header_size = JSArray::kHeaderSize);
enum class HoleConversionMode { kDontConvert, kConvertToUndefined };
// Clone a fast JSArray |array| into a new fast JSArray.
@@ -2014,15 +1963,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// function generates significantly less code in this case.
TNode<JSArray> CloneFastJSArray(
TNode<Context> context, TNode<JSArray> array,
- TNode<AllocationSite> allocation_site = {},
+ base::Optional<TNode<AllocationSite>> allocation_site = base::nullopt,
HoleConversionMode convert_holes = HoleConversionMode::kDontConvert);
TNode<JSArray> ExtractFastJSArray(TNode<Context> context,
- TNode<JSArray> array, Node* begin,
- Node* count,
- ParameterMode mode = INTPTR_PARAMETERS,
- Node* capacity = nullptr,
- TNode<AllocationSite> allocation_site = {});
+ TNode<JSArray> array, TNode<BInt> begin,
+ TNode<BInt> count);
TNode<FixedArrayBase> AllocateFixedArray(
ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS,
@@ -2093,11 +2039,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* capacity, ParameterMode mode = INTPTR_PARAMETERS,
AllocationFlags flags = kNone);
- // Perform CreateArrayIterator (ES #sec-createarrayiterator).
- TNode<JSArrayIterator> CreateArrayIterator(TNode<Context> context,
- TNode<Object> object,
- IterationKind mode);
-
// TODO(v8:9722): Return type should be JSIteratorResult
TNode<JSObject> AllocateJSIteratorResult(SloppyTNode<Context> context,
SloppyTNode<Object> value,
@@ -2234,17 +2175,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<FixedDoubleArray>(base);
}
- TNode<SloppyArgumentsElements> HeapObjectToSloppyArgumentsElements(
- TNode<HeapObject> base, Label* cast_fail) {
- GotoIf(TaggedNotEqual(LoadMap(base), SloppyArgumentsElementsMapConstant()),
- cast_fail);
- return UncheckedCast<SloppyArgumentsElements>(base);
- }
-
TNode<Int32T> ConvertElementsKindToInt(TNode<Int32T> elements_kind) {
return UncheckedCast<Int32T>(elements_kind);
}
+ template <typename T>
+ bool ClassHasMapConstant() {
+ return false;
+ }
+
+ template <typename T>
+ TNode<Map> GetClassMapConstant() {
+ UNREACHABLE();
+ return TNode<Map>();
+ }
+
enum class ExtractFixedArrayFlag {
kFixedArrays = 1,
kFixedDoubleArrays = 2,
@@ -2295,20 +2240,32 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<FixedArrayBase> ExtractFixedArray(
TNode<FixedArrayBase> source, TNode<Smi> first, TNode<Smi> count,
- TNode<Smi> capacity,
+ base::Optional<TNode<Smi>> capacity,
ExtractFixedArrayFlags extract_flags =
- ExtractFixedArrayFlag::kAllFixedArrays) {
- return ExtractFixedArray(source, first, count, capacity, extract_flags,
- SMI_PARAMETERS);
+ ExtractFixedArrayFlag::kAllFixedArrays,
+ TVariable<BoolT>* var_holes_converted = nullptr,
+ base::Optional<TNode<Int32T>> source_elements_kind = base::nullopt) {
+ // TODO(solanes): just use capacity when ExtractFixedArray is fully
+ // converted.
+ Node* capacity_node = capacity ? static_cast<Node*>(*capacity) : nullptr;
+ return ExtractFixedArray(source, first, count, capacity_node, extract_flags,
+ SMI_PARAMETERS, var_holes_converted,
+ source_elements_kind);
}
- TNode<FixedArray> ExtractFixedArray(
- TNode<FixedArray> source, TNode<IntPtrT> first, TNode<IntPtrT> count,
- TNode<IntPtrT> capacity,
+ TNode<FixedArrayBase> ExtractFixedArray(
+ TNode<FixedArrayBase> source, TNode<IntPtrT> first, TNode<IntPtrT> count,
+ base::Optional<TNode<IntPtrT>> capacity,
ExtractFixedArrayFlags extract_flags =
- ExtractFixedArrayFlag::kAllFixedArrays) {
- return CAST(ExtractFixedArray(source, first, count, capacity, extract_flags,
- INTPTR_PARAMETERS));
+ ExtractFixedArrayFlag::kAllFixedArrays,
+ TVariable<BoolT>* var_holes_converted = nullptr,
+ base::Optional<TNode<Int32T>> source_elements_kind = base::nullopt) {
+ // TODO(solanes): just use capacity when ExtractFixedArray is fully
+ // converted.
+ Node* capacity_node = capacity ? static_cast<Node*>(*capacity) : nullptr;
+ return ExtractFixedArray(source, first, count, capacity_node, extract_flags,
+ INTPTR_PARAMETERS, var_holes_converted,
+ source_elements_kind);
}
// Copy a portion of an existing FixedArray or FixedDoubleArray into a new
@@ -2400,12 +2357,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// (NOTE: not index!), does a hole check if |if_hole| is provided and
// converts the value so that it becomes ready for storing to array of
// |to_kind| elements.
- Node* LoadElementAndPrepareForStore(Node* array, Node* offset,
+ Node* LoadElementAndPrepareForStore(TNode<FixedArrayBase> array,
+ TNode<IntPtrT> offset,
ElementsKind from_kind,
ElementsKind to_kind, Label* if_hole);
- Node* CalculateNewElementsCapacity(Node* old_capacity,
- ParameterMode mode = INTPTR_PARAMETERS);
+ Node* CalculateNewElementsCapacity(Node* old_capacity, ParameterMode mode);
TNode<Smi> CalculateNewElementsCapacity(TNode<Smi> old_capacity) {
return CAST(CalculateNewElementsCapacity(old_capacity, SMI_PARAMETERS));
@@ -2425,11 +2382,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Tries to grow the |capacity|-length |elements| array of given |object|
// to store the |key| or bails out if the growing gap is too big. Returns
// new elements.
+ template <typename TIndex>
TNode<FixedArrayBase> TryGrowElementsCapacity(TNode<HeapObject> object,
TNode<FixedArrayBase> elements,
- ElementsKind kind, Node* key,
- Node* capacity,
- ParameterMode mode,
+ ElementsKind kind,
+ TNode<TIndex> key,
+ TNode<TIndex> capacity,
Label* bailout);
// Grows elements capacity of given object. Returns new elements.
@@ -2441,10 +2399,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Given a need to grow by |growth|, allocate an appropriate new capacity
// if necessary, and return a new elements FixedArray object. Label |bailout|
// is followed for allocation failure.
- void PossiblyGrowElementsCapacity(ParameterMode mode, ElementsKind kind,
- TNode<HeapObject> array, Node* length,
+ void PossiblyGrowElementsCapacity(ElementsKind kind, TNode<HeapObject> array,
+ TNode<BInt> length,
TVariable<FixedArrayBase>* var_elements,
- Node* growth, Label* bailout);
+ TNode<BInt> growth, Label* bailout);
// Allocation site manipulation
void InitializeAllocationMemento(TNode<HeapObject> base,
@@ -2568,7 +2526,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> InstanceTypeEqual(SloppyTNode<Int32T> instance_type, int type);
TNode<BoolT> IsAccessorInfo(SloppyTNode<HeapObject> object);
TNode<BoolT> IsAccessorPair(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsAllocationSite(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNoElementsProtectorCellInvalid();
TNode<BoolT> IsArrayIteratorProtectorCellInvalid();
TNode<BoolT> IsBigIntInstanceType(SloppyTNode<Int32T> instance_type);
@@ -2608,7 +2565,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsOddball(SloppyTNode<HeapObject> object);
TNode<BoolT> IsOddballInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsIndirectStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSAggregateError(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayBuffer(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSDataView(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayInstanceType(SloppyTNode<Int32T> instance_type);
@@ -2617,7 +2573,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSArrayIterator(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSAsyncGeneratorObject(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSFunctionInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsAllocationSiteInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSFunctionMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSFunction(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSBoundFunction(SloppyTNode<HeapObject> object);
@@ -2685,9 +2640,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsCustomElementsReceiverInstanceType(
TNode<Int32T> instance_type);
TNode<BoolT> IsSpecialReceiverMap(SloppyTNode<Map> map);
- // Returns true if the map corresponds to non-special fast or dictionary
- // object.
- TNode<BoolT> IsSimpleObjectMap(TNode<Map> map);
TNode<BoolT> IsStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsString(SloppyTNode<HeapObject> object);
TNode<BoolT> IsSymbolInstanceType(SloppyTNode<Int32T> instance_type);
@@ -2844,6 +2796,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Number> ToLength_Inline(SloppyTNode<Context> context,
SloppyTNode<Object> input);
+ TNode<Object> OrdinaryToPrimitive(TNode<Context> context, TNode<Object> input,
+ OrdinaryToPrimitiveHint hint);
+
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word32|. Returns result as an uint32 node.
template <typename BitField>
@@ -3512,24 +3467,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum class ForEachDirection { kForward, kReverse };
- using FastFixedArrayForEachBody =
- std::function<void(Node* fixed_array, Node* offset)>;
+ using FastArrayForEachBody =
+ std::function<void(TNode<HeapObject> array, TNode<IntPtrT> offset)>;
- void BuildFastFixedArrayForEach(
- const CodeStubAssembler::VariableList& vars, Node* fixed_array,
+ void BuildFastArrayForEach(
+ const CodeStubAssembler::VariableList& vars, Node* array,
ElementsKind kind, Node* first_element_inclusive,
- Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
+ Node* last_element_exclusive, const FastArrayForEachBody& body,
ParameterMode mode = INTPTR_PARAMETERS,
ForEachDirection direction = ForEachDirection::kReverse);
- void BuildFastFixedArrayForEach(
- Node* fixed_array, ElementsKind kind, Node* first_element_inclusive,
- Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
+ void BuildFastArrayForEach(
+ Node* array, ElementsKind kind, Node* first_element_inclusive,
+ Node* last_element_exclusive, const FastArrayForEachBody& body,
ParameterMode mode = INTPTR_PARAMETERS,
ForEachDirection direction = ForEachDirection::kReverse) {
CodeStubAssembler::VariableList list(0, zone());
- BuildFastFixedArrayForEach(list, fixed_array, kind, first_element_inclusive,
- last_element_exclusive, body, mode, direction);
+ BuildFastArrayForEach(list, array, kind, first_element_inclusive,
+ last_element_exclusive, body, mode, direction);
}
TNode<IntPtrT> GetArrayAllocationSize(TNode<IntPtrT> element_count,
@@ -3764,6 +3719,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
bool ConstexprInt32NotEqual(int32_t a, int32_t b) { return a != b; }
bool ConstexprInt32GreaterThanEqual(int32_t a, int32_t b) { return a >= b; }
uint32_t ConstexprUint32Add(uint32_t a, uint32_t b) { return a + b; }
+ int32_t ConstexprUint32Sub(uint32_t a, uint32_t b) { return a - b; }
int31_t ConstexprInt31Add(int31_t a, int31_t b) {
int32_t val;
CHECK(!base::bits::SignedAddOverflow32(a, b, &val));
@@ -3943,7 +3899,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// fields initialized.
TNode<JSArray> AllocateUninitializedJSArray(
TNode<Map> array_map, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, TNode<IntPtrT> size_in_bytes);
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ TNode<IntPtrT> size_in_bytes);
TNode<BoolT> IsValidSmi(TNode<Smi> smi);
@@ -4017,49 +3974,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
ParameterMode parameter_mode = INTPTR_PARAMETERS);
};
-// template <typename TIndex>
class V8_EXPORT_PRIVATE CodeStubArguments {
public:
using Node = compiler::Node;
- enum ReceiverMode { kHasReceiver, kNoReceiver };
-
- // |argc| specifies the number of arguments passed to the builtin excluding
- // the receiver. The arguments will include a receiver iff |receiver_mode|
- // is kHasReceiver.
- CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, argc, TNode<RawPtrT>(), receiver_mode) {}
-
- CodeStubArguments(CodeStubAssembler* assembler, TNode<Int32T> argc,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, assembler->ChangeInt32ToIntPtr(argc),
- TNode<RawPtrT>(), receiver_mode) {}
-
- // TODO(v8:9708): Consider removing this variant
- CodeStubArguments(CodeStubAssembler* assembler, TNode<Smi> argc,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc),
- TNode<RawPtrT>(), receiver_mode) {}
// |argc| specifies the number of arguments passed to the builtin excluding
- // the receiver. The arguments will include a receiver iff |receiver_mode|
- // is kHasReceiver.
+ // the receiver. The arguments include the receiver.
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc)
+ : CodeStubArguments(assembler, argc, TNode<RawPtrT>()) {}
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<Int32T> argc)
+ : CodeStubArguments(assembler, assembler->ChangeInt32ToIntPtr(argc)) {}
CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc,
- TNode<RawPtrT> fp,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver);
-
- CodeStubArguments(CodeStubAssembler* assembler, TNode<Smi> argc,
- TNode<RawPtrT> fp,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc), fp,
- receiver_mode) {}
+ TNode<RawPtrT> fp);
// Used by Torque to construct arguments based on a Torque-defined
// struct of values.
CodeStubArguments(CodeStubAssembler* assembler,
TorqueStructArguments torque_arguments)
: assembler_(assembler),
- receiver_mode_(ReceiverMode::kHasReceiver),
argc_(torque_arguments.length),
base_(torque_arguments.base),
fp_(torque_arguments.frame) {}
@@ -4072,68 +4004,41 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
// Computes address of the index'th argument.
TNode<RawPtrT> AtIndexPtr(TNode<IntPtrT> index) const;
- TNode<RawPtrT> AtIndexPtr(TNode<Smi> index) const {
- return AtIndexPtr(assembler_->ParameterToIntPtr(index));
- }
// |index| is zero-based and does not include the receiver
TNode<Object> AtIndex(TNode<IntPtrT> index) const;
- // TODO(v8:9708): Consider removing this variant
- TNode<Object> AtIndex(TNode<Smi> index) const {
- return AtIndex(assembler_->ParameterToIntPtr(index));
- }
-
TNode<Object> AtIndex(int index) const;
- TNode<Object> GetOptionalArgumentValue(int index) {
- return GetOptionalArgumentValue(index, assembler_->UndefinedConstant());
- }
- TNode<Object> GetOptionalArgumentValue(int index,
- TNode<Object> default_value);
-
TNode<IntPtrT> GetLength() const { return argc_; }
TorqueStructArguments GetTorqueArguments() const {
return TorqueStructArguments{fp_, base_, argc_};
}
+ TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index,
+ TNode<Object> default_value);
TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index) {
return GetOptionalArgumentValue(index, assembler_->UndefinedConstant());
}
- TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index,
- TNode<Object> default_value);
-
- using ForEachBodyFunction = std::function<void(TNode<Object> arg)>;
+ TNode<Object> GetOptionalArgumentValue(int index) {
+ return GetOptionalArgumentValue(assembler_->IntPtrConstant(index));
+ }
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
- template <typename TIndex>
- void ForEach(const ForEachBodyFunction& body, TNode<TIndex> first = {},
- TNode<TIndex> last = {}) const {
+ using ForEachBodyFunction = std::function<void(TNode<Object> arg)>;
+ void ForEach(const ForEachBodyFunction& body, TNode<IntPtrT> first = {},
+ TNode<IntPtrT> last = {}) const {
CodeStubAssembler::VariableList list(0, assembler_->zone());
ForEach(list, body, first, last);
}
-
- // Iteration doesn't include the receiver. |first| and |last| are zero-based.
void ForEach(const CodeStubAssembler::VariableList& vars,
const ForEachBodyFunction& body, TNode<IntPtrT> first = {},
TNode<IntPtrT> last = {}) const;
- void ForEach(const CodeStubAssembler::VariableList& vars,
- const ForEachBodyFunction& body, TNode<Smi> first,
- TNode<Smi> last = {}) const {
- TNode<IntPtrT> first_intptr = assembler_->ParameterToIntPtr(first);
- TNode<IntPtrT> last_intptr;
- if (last != nullptr) {
- last_intptr = assembler_->ParameterToIntPtr(last);
- }
- return ForEach(vars, body, first_intptr, last_intptr);
- }
-
void PopAndReturn(TNode<Object> value);
private:
CodeStubAssembler* assembler_;
- ReceiverMode receiver_mode_;
TNode<IntPtrT> argc_;
TNode<RawPtrT> base_;
TNode<RawPtrT> fp_;
@@ -4226,6 +4131,19 @@ class PrototypeCheckAssembler : public CodeStubAssembler {
DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags)
+#define CLASS_MAP_CONSTANT_ADAPTER(V, rootIndexName, rootAccessorName, \
+ class_name) \
+ template <> \
+ inline bool CodeStubAssembler::ClassHasMapConstant<class_name>() { \
+ return true; \
+ } \
+ template <> \
+ inline TNode<Map> CodeStubAssembler::GetClassMapConstant<class_name>() { \
+ return class_name##MapConstant(); \
+ }
+
+UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(CLASS_MAP_CONSTANT_ADAPTER, _)
+
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_CODE_STUB_ASSEMBLER_H_
diff --git a/chromium/v8/src/codegen/compiler.cc b/chromium/v8/src/codegen/compiler.cc
index c436c57407c..1386c1dd199 100644
--- a/chromium/v8/src/codegen/compiler.cc
+++ b/chromium/v8/src/codegen/compiler.cc
@@ -751,7 +751,7 @@ void InsertCodeIntoOptimizedCodeCache(
// Function context specialization folds-in the function context,
// so no sharing can occur.
- if (compilation_info->is_function_context_specializing()) {
+ if (compilation_info->function_context_specializing()) {
// Native context specialized code is not shared, so make sure the optimized
// code cache is clear.
ClearOptimizedCodeCache(compilation_info);
@@ -1090,7 +1090,9 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
VMState<BYTECODE_COMPILER> state(isolate);
if (parse_info->literal() == nullptr &&
!parsing::ParseProgram(parse_info, script, maybe_outer_scope_info,
- isolate)) {
+ isolate, parsing::ReportStatisticsMode::kYes)) {
+ FailWithPendingException(isolate, script, parse_info,
+ Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
return MaybeHandle<SharedFunctionInfo>();
}
// Measure how long it takes to do the compilation; only take the
@@ -1456,7 +1458,7 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
// Parse and update ParseInfo with the results. Don't update parsing
// statistics since we've already parsed the code before.
if (!parsing::ParseAny(&parse_info, shared_info, isolate,
- parsing::ReportErrorsAndStatisticsMode::kNo)) {
+ parsing::ReportStatisticsMode::kNo)) {
// Parsing failed probably as a result of stack exhaustion.
bytecode->SetSourcePositionsFailedToCollect();
return FailAndClearPendingException(isolate);
@@ -1548,7 +1550,8 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
}
// Parse and update ParseInfo with the results.
- if (!parsing::ParseAny(&parse_info, shared_info, isolate)) {
+ if (!parsing::ParseAny(&parse_info, shared_info, isolate,
+ parsing::ReportStatisticsMode::kYes)) {
return FailWithPendingException(isolate, script, &parse_info, flag);
}
@@ -1595,7 +1598,7 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
Handle<Code> code = handle(shared_info->GetCode(), isolate);
// Initialize the feedback cell for this JSFunction.
- JSFunction::InitializeFeedbackCell(function);
+ JSFunction::InitializeFeedbackCell(function, is_compiled_scope);
// Optimize now if --always-opt is enabled.
if (FLAG_always_opt && !function->shared().HasAsmWasmData()) {
@@ -1801,7 +1804,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
} else {
result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_info, context, AllocationType::kYoung);
- JSFunction::InitializeFeedbackCell(result);
+ JSFunction::InitializeFeedbackCell(result, &is_compiled_scope);
if (allow_eval_cache) {
// Make sure to cache this result.
Handle<FeedbackCell> new_feedback_cell(result->raw_feedback_cell(),
@@ -1813,7 +1816,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
} else {
result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_info, context, AllocationType::kYoung);
- JSFunction::InitializeFeedbackCell(result);
+ JSFunction::InitializeFeedbackCell(result, &is_compiled_scope);
if (allow_eval_cache) {
// Add the SharedFunctionInfo and the LiteralsArray to the eval cache if
// we didn't retrieve from there.
@@ -2764,7 +2767,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
// If code is compiled to bytecode (i.e., isn't asm.js), then allocate a
// feedback and check for optimized code.
if (is_compiled_scope.is_compiled() && shared->HasBytecodeArray()) {
- JSFunction::InitializeFeedbackCell(function);
+ JSFunction::InitializeFeedbackCell(function, &is_compiled_scope);
Code code = function->has_feedback_vector()
? function->feedback_vector().optimized_code()
@@ -2779,7 +2782,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
if (FLAG_always_opt && shared->allows_lazy_compilation() &&
!shared->optimization_disabled() && !function->IsOptimized() &&
!function->HasOptimizedCode()) {
- JSFunction::EnsureFeedbackVector(function);
+ JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
}
}
diff --git a/chromium/v8/src/codegen/cpu-features.h b/chromium/v8/src/codegen/cpu-features.h
index 14c94ebae9a..eef98f77e78 100644
--- a/chromium/v8/src/codegen/cpu-features.h
+++ b/chromium/v8/src/codegen/cpu-features.h
@@ -27,7 +27,7 @@ enum CpuFeature {
POPCNT,
ATOM,
-#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
+#elif V8_TARGET_ARCH_ARM
// - Standard configurations. The baseline is ARMv6+VFPv2.
ARMv7, // ARMv7-A + VFPv3-D32 + NEON
ARMv7_SUDIV, // ARMv7-A + VFPv4-D32 + NEON + SUDIV
@@ -39,6 +39,9 @@ enum CpuFeature {
VFP32DREGS = ARMv7,
SUDIV = ARMv7_SUDIV,
+#elif V8_TARGET_ARCH_ARM64
+ JSCVT,
+
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
FPU,
FP64FPU,
diff --git a/chromium/v8/src/codegen/external-reference.cc b/chromium/v8/src/codegen/external-reference.cc
index 5c2c63e816c..4595269d028 100644
--- a/chromium/v8/src/codegen/external-reference.cc
+++ b/chromium/v8/src/codegen/external-reference.cc
@@ -277,6 +277,14 @@ FUNCTION_REFERENCE(wasm_float32_to_int64, wasm::float32_to_int64_wrapper)
FUNCTION_REFERENCE(wasm_float32_to_uint64, wasm::float32_to_uint64_wrapper)
FUNCTION_REFERENCE(wasm_float64_to_int64, wasm::float64_to_int64_wrapper)
FUNCTION_REFERENCE(wasm_float64_to_uint64, wasm::float64_to_uint64_wrapper)
+FUNCTION_REFERENCE(wasm_float32_to_int64_sat,
+ wasm::float32_to_int64_sat_wrapper)
+FUNCTION_REFERENCE(wasm_float32_to_uint64_sat,
+ wasm::float32_to_uint64_sat_wrapper)
+FUNCTION_REFERENCE(wasm_float64_to_int64_sat,
+ wasm::float64_to_int64_sat_wrapper)
+FUNCTION_REFERENCE(wasm_float64_to_uint64_sat,
+ wasm::float64_to_uint64_sat_wrapper)
FUNCTION_REFERENCE(wasm_int64_div, wasm::int64_div_wrapper)
FUNCTION_REFERENCE(wasm_int64_mod, wasm::int64_mod_wrapper)
FUNCTION_REFERENCE(wasm_uint64_div, wasm::uint64_div_wrapper)
@@ -289,6 +297,9 @@ FUNCTION_REFERENCE(wasm_word32_rol, wasm::word32_rol_wrapper)
FUNCTION_REFERENCE(wasm_word32_ror, wasm::word32_ror_wrapper)
FUNCTION_REFERENCE(wasm_word64_rol, wasm::word64_rol_wrapper)
FUNCTION_REFERENCE(wasm_word64_ror, wasm::word64_ror_wrapper)
+FUNCTION_REFERENCE(wasm_f32x4_ceil, wasm::f32x4_ceil_wrapper)
+FUNCTION_REFERENCE(wasm_f32x4_floor, wasm::f32x4_floor_wrapper)
+FUNCTION_REFERENCE(wasm_f32x4_trunc, wasm::f32x4_trunc_wrapper)
FUNCTION_REFERENCE(wasm_memory_init, wasm::memory_init_wrapper)
FUNCTION_REFERENCE(wasm_memory_copy, wasm::memory_copy_wrapper)
FUNCTION_REFERENCE(wasm_memory_fill, wasm::memory_fill_wrapper)
@@ -488,8 +499,12 @@ FUNCTION_REFERENCE_WITH_ISOLATE(re_match_for_call_from_js,
IrregexpInterpreter::MatchForCallFromJs)
FUNCTION_REFERENCE_WITH_ISOLATE(
- re_case_insensitive_compare_uc16,
- NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)
+ re_case_insensitive_compare_unicode,
+ NativeRegExpMacroAssembler::CaseInsensitiveCompareUnicode)
+
+FUNCTION_REFERENCE_WITH_ISOLATE(
+ re_case_insensitive_compare_non_unicode,
+ NativeRegExpMacroAssembler::CaseInsensitiveCompareNonUnicode)
ExternalReference ExternalReference::re_word_character_map(Isolate* isolate) {
return ExternalReference(
diff --git a/chromium/v8/src/codegen/external-reference.h b/chromium/v8/src/codegen/external-reference.h
index f42a7d74861..f5e93210d66 100644
--- a/chromium/v8/src/codegen/external-reference.h
+++ b/chromium/v8/src/codegen/external-reference.h
@@ -77,8 +77,10 @@ class StatsCounter;
V(address_of_regexp_stack_memory_top_address, \
"RegExpStack::memory_top_address_address()") \
V(address_of_static_offsets_vector, "OffsetsVector::static_offsets_vector") \
- V(re_case_insensitive_compare_uc16, \
- "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()") \
+ V(re_case_insensitive_compare_unicode, \
+ "NativeRegExpMacroAssembler::CaseInsensitiveCompareUnicode()") \
+ V(re_case_insensitive_compare_non_unicode, \
+ "NativeRegExpMacroAssembler::CaseInsensitiveCompareNonUnicode()") \
V(re_check_stack_guard_state, \
"RegExpMacroAssembler*::CheckStackGuardState()") \
V(re_grow_stack, "NativeRegExpMacroAssembler::GrowStack()") \
@@ -181,9 +183,13 @@ class StatsCounter;
V(wasm_f64_trunc, "wasm::f64_trunc_wrapper") \
V(wasm_float32_to_int64, "wasm::float32_to_int64_wrapper") \
V(wasm_float32_to_uint64, "wasm::float32_to_uint64_wrapper") \
+ V(wasm_float32_to_int64_sat, "wasm::float32_to_int64_sat_wrapper") \
+ V(wasm_float32_to_uint64_sat, "wasm::float32_to_uint64_sat_wrapper") \
V(wasm_float64_pow, "wasm::float64_pow") \
V(wasm_float64_to_int64, "wasm::float64_to_int64_wrapper") \
V(wasm_float64_to_uint64, "wasm::float64_to_uint64_wrapper") \
+ V(wasm_float64_to_int64_sat, "wasm::float64_to_int64_sat_wrapper") \
+ V(wasm_float64_to_uint64_sat, "wasm::float64_to_uint64_sat_wrapper") \
V(wasm_int64_div, "wasm::int64_div") \
V(wasm_int64_mod, "wasm::int64_mod") \
V(wasm_int64_to_float32, "wasm::int64_to_float32_wrapper") \
@@ -200,6 +206,9 @@ class StatsCounter;
V(wasm_word64_ror, "wasm::word64_ror") \
V(wasm_word64_ctz, "wasm::word64_ctz") \
V(wasm_word64_popcnt, "wasm::word64_popcnt") \
+ V(wasm_f32x4_ceil, "wasm::f32x4_ceil_wrapper") \
+ V(wasm_f32x4_floor, "wasm::f32x4_floor_wrapper") \
+ V(wasm_f32x4_trunc, "wasm::f32x4_trunc_wrapper") \
V(wasm_memory_init, "wasm::memory_init") \
V(wasm_memory_copy, "wasm::memory_copy") \
V(wasm_memory_fill, "wasm::memory_fill") \
diff --git a/chromium/v8/src/codegen/ia32/assembler-ia32.cc b/chromium/v8/src/codegen/ia32/assembler-ia32.cc
index 551750936db..321a59ceded 100644
--- a/chromium/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/chromium/v8/src/codegen/ia32/assembler-ia32.cc
@@ -691,6 +691,29 @@ void Assembler::stos() {
EMIT(0xAB);
}
+void Assembler::xadd(Operand dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC1);
+ emit_operand(src, dst);
+}
+
+void Assembler::xadd_b(Operand dst, Register src) {
+ DCHECK(src.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC0);
+ emit_operand(src, dst);
+}
+
+void Assembler::xadd_w(Operand dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xC1);
+ emit_operand(src, dst);
+}
+
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
if (src == eax || dst == eax) { // Single-byte encoding.
@@ -2246,6 +2269,30 @@ void Assembler::ucomisd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
+void Assembler::roundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x08);
+ emit_sse_operand(dst, src);
+ // Mask precision exeption.
+ EMIT(static_cast<byte>(mode) | 0x8);
+}
+
+void Assembler::roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x09);
+ emit_sse_operand(dst, src);
+ // Mask precision exeption.
+ EMIT(static_cast<byte>(mode) | 0x8);
+}
+
void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2921,6 +2968,15 @@ void Assembler::vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
EMIT(offset);
}
+void Assembler::vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ vinstr(0x08, dst, xmm0, Operand(src), k66, k0F3A, kWIG);
+ EMIT(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+}
+void Assembler::vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ vinstr(0x09, dst, xmm0, Operand(src), k66, k0F3A, kWIG);
+ EMIT(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+}
+
void Assembler::vmovmskps(Register dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
diff --git a/chromium/v8/src/codegen/ia32/assembler-ia32.h b/chromium/v8/src/codegen/ia32/assembler-ia32.h
index 60d978df5be..5edbe8677a1 100644
--- a/chromium/v8/src/codegen/ia32/assembler-ia32.h
+++ b/chromium/v8/src/codegen/ia32/assembler-ia32.h
@@ -528,6 +528,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void rep_stos();
void stos();
+ void xadd(Operand dst, Register src);
+ void xadd_b(Operand dst, Register src);
+ void xadd_w(Operand dst, Register src);
+
// Exchange
void xchg(Register dst, Register src);
void xchg(Register dst, Operand src);
@@ -1064,6 +1068,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void pinsrd(XMMRegister dst, Operand src, uint8_t offset);
+ void roundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
// AVX instructions
void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmadd132sd(dst, src1, Operand(src2));
@@ -1409,6 +1416,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset);
+ void vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
void vcvtdq2ps(XMMRegister dst, XMMRegister src) {
vcvtdq2ps(dst, Operand(src));
}
diff --git a/chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc b/chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc
index 8b1ea8d880e..ee9c3919cd4 100644
--- a/chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc
+++ b/chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc
@@ -195,12 +195,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx, eax};
@@ -312,6 +306,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc
index b73050a680d..8b1cc912987 100644
--- a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -597,6 +597,28 @@ void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
add(dst, Immediate(0x80000000));
}
+void TurboAssembler::Roundps(XMMRegister dst, XMMRegister src,
+ RoundingMode mode) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vroundps(dst, src, mode);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ roundps(dst, src, mode);
+ }
+}
+
+void TurboAssembler::Roundpd(XMMRegister dst, XMMRegister src,
+ RoundingMode mode) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vroundpd(dst, src, mode);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ roundpd(dst, src, mode);
+ }
+}
+
void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
DCHECK_GE(63, shift);
if (shift >= 32) {
@@ -2045,9 +2067,9 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
and_(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ test_b(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
} else {
- test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ test(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
}
j(cc, condition_met, condition_met_distance);
}
diff --git a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h
index 94ddb2f7847..2b1f4400146 100644
--- a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -286,6 +286,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand)
+ AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, XMMRegister)
+ AVX_OP2_WITH_TYPE(Cvttps2dq, cvttps2dq, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtps, sqrtps, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, const Operand&)
@@ -319,6 +321,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP3_XO(Pcmpeqb, pcmpeqb)
AVX_OP3_XO(Pcmpeqw, pcmpeqw)
AVX_OP3_XO(Pcmpeqd, pcmpeqd)
+ AVX_OP3_XO(Por, por)
AVX_OP3_XO(Psubb, psubb)
AVX_OP3_XO(Psubw, psubw)
AVX_OP3_XO(Psubd, psubd)
@@ -357,6 +360,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
+ AVX_PACKED_OP3(Addps, addps)
AVX_PACKED_OP3(Addpd, addpd)
AVX_PACKED_OP3(Subps, subps)
AVX_PACKED_OP3(Subpd, subpd)
@@ -365,6 +369,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3(Cmpeqpd, cmpeqpd)
AVX_PACKED_OP3(Cmpneqpd, cmpneqpd)
AVX_PACKED_OP3(Cmpltpd, cmpltpd)
+ AVX_PACKED_OP3(Cmpleps, cmpleps)
AVX_PACKED_OP3(Cmplepd, cmplepd)
AVX_PACKED_OP3(Minps, minps)
AVX_PACKED_OP3(Minpd, minpd)
@@ -380,6 +385,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3(Psrlq, psrlq)
AVX_PACKED_OP3(Psraw, psraw)
AVX_PACKED_OP3(Psrad, psrad)
+ AVX_PACKED_OP3(Pmaddwd, pmaddwd)
+ AVX_PACKED_OP3(Paddd, paddd)
AVX_PACKED_OP3(Paddq, paddq)
AVX_PACKED_OP3(Psubq, psubq)
AVX_PACKED_OP3(Pmuludq, pmuludq)
@@ -444,6 +451,30 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef AVX_OP2_WITH_TYPE_SCOPE
#undef AVX_OP2_XO_SSE4
+#define AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
+ sse_scope) \
+ void macro_name(dst_type dst, src_type src) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope scope(this, AVX); \
+ v##name(dst, dst, src); \
+ return; \
+ } \
+ if (CpuFeatures::IsSupported(sse_scope)) { \
+ CpuFeatureScope scope(this, sse_scope); \
+ name(dst, src); \
+ return; \
+ } \
+ UNREACHABLE(); \
+ }
+#define AVX_OP3_XO_SSE4(macro_name, name) \
+ AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSE4_1) \
+ AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSE4_1)
+
+ AVX_OP3_XO_SSE4(Pmaxsd, pmaxsd)
+
+#undef AVX_OP3_XO_SSE4
+#undef AVX_OP3_WITH_TYPE_SCOPE
+
void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
void Pshufb(XMMRegister dst, Operand src);
void Pblendw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
@@ -506,6 +537,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void Cvttsd2ui(Register dst, Operand src, XMMRegister tmp);
+ void Roundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void Roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
void Push(Register src) { push(src); }
void Push(Operand src) { push(src); }
void Push(Immediate value);
diff --git a/chromium/v8/src/codegen/ia32/sse-instr.h b/chromium/v8/src/codegen/ia32/sse-instr.h
index b8a7a3c827a..a56dc13361c 100644
--- a/chromium/v8/src/codegen/ia32/sse-instr.h
+++ b/chromium/v8/src/codegen/ia32/sse-instr.h
@@ -9,6 +9,7 @@
V(packsswb, 66, 0F, 63) \
V(packssdw, 66, 0F, 6B) \
V(packuswb, 66, 0F, 67) \
+ V(pmaddwd, 66, 0F, F5) \
V(paddb, 66, 0F, FC) \
V(paddw, 66, 0F, FD) \
V(paddd, 66, 0F, FE) \
diff --git a/chromium/v8/src/codegen/interface-descriptors.cc b/chromium/v8/src/codegen/interface-descriptors.cc
index 503da3cb43c..ce12bc61f53 100644
--- a/chromium/v8/src/codegen/interface-descriptors.cc
+++ b/chromium/v8/src/codegen/interface-descriptors.cc
@@ -30,10 +30,12 @@ void CallInterfaceDescriptorData::InitializePlatformSpecific(
void CallInterfaceDescriptorData::InitializePlatformIndependent(
Flags flags, int return_count, int parameter_count,
- const MachineType* machine_types, int machine_types_length) {
+ const MachineType* machine_types, int machine_types_length,
+ StackArgumentOrder stack_order) {
DCHECK(IsInitializedPlatformSpecific());
flags_ = flags;
+ stack_order_ = stack_order;
return_count_ = return_count;
param_count_ = parameter_count;
const int types_length = return_count_ + param_count_;
@@ -83,7 +85,6 @@ void CallDescriptors::InitializeOncePerProcess() {
DCHECK(ContextOnlyDescriptor{}.HasContextParameter());
DCHECK(!NoContextDescriptor{}.HasContextParameter());
DCHECK(!AllocateDescriptor{}.HasContextParameter());
- DCHECK(!AllocateHeapNumberDescriptor{}.HasContextParameter());
DCHECK(!AbortDescriptor{}.HasContextParameter());
DCHECK(!WasmFloat32ToNumberDescriptor{}.HasContextParameter());
DCHECK(!WasmFloat64ToNumberDescriptor{}.HasContextParameter());
@@ -391,44 +392,17 @@ void WasmFloat64ToNumberDescriptor::InitializePlatformSpecific(
}
#endif // !V8_TARGET_ARCH_IA32
-void WasmTableInitDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data,
- kParameterCount - kStackArgumentsCount);
-}
-
-void WasmTableCopyDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data,
- kParameterCount - kStackArgumentsCount);
-}
-
-void WasmAtomicNotifyDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64)
void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
-void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data,
kParameterCount - kStackArgumentsCount);
}
-
-void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
#endif
void CloneObjectWithVectorDescriptor::InitializePlatformSpecific(
diff --git a/chromium/v8/src/codegen/interface-descriptors.h b/chromium/v8/src/codegen/interface-descriptors.h
index fc27b46ca14..14c021b3b76 100644
--- a/chromium/v8/src/codegen/interface-descriptors.h
+++ b/chromium/v8/src/codegen/interface-descriptors.h
@@ -20,93 +20,101 @@ namespace internal {
BUILTIN_LIST_FROM_TORQUE(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
IGNORE_BUILTIN, IGNORE_BUILTIN)
-#define INTERFACE_DESCRIPTOR_LIST(V) \
- V(Abort) \
- V(Allocate) \
- V(AllocateHeapNumber) \
- V(ApiCallback) \
- V(ApiGetter) \
- V(ArgumentsAdaptor) \
- V(ArrayConstructor) \
- V(ArrayNArgumentsConstructor) \
- V(ArrayNoArgumentConstructor) \
- V(ArraySingleArgumentConstructor) \
- V(AsyncFunctionStackParameter) \
- V(BigIntToI32Pair) \
- V(BigIntToI64) \
- V(BinaryOp) \
- V(BinaryOp_WithFeedback) \
- V(CallForwardVarargs) \
- V(CallFunctionTemplate) \
- V(CallTrampoline) \
- V(CallTrampoline_WithFeedback) \
- V(CallVarargs) \
- V(CallWithArrayLike) \
- V(CallWithSpread) \
- V(CEntry1ArgvOnStack) \
- V(CloneObjectWithVector) \
- V(Compare) \
- V(Compare_WithFeedback) \
- V(ConstructForwardVarargs) \
- V(ConstructStub) \
- V(ConstructVarargs) \
- V(ConstructWithArrayLike) \
- V(Construct_WithFeedback) \
- V(ConstructWithSpread) \
- V(ContextOnly) \
- V(CppBuiltinAdaptor) \
- V(EphemeronKeyBarrier) \
- V(FastNewFunctionContext) \
- V(FastNewObject) \
- V(FrameDropperTrampoline) \
- V(GetIteratorStackParameter) \
- V(GetProperty) \
- V(GrowArrayElements) \
- V(I32PairToBigInt) \
- V(I64ToBigInt) \
- V(InterpreterCEntry1) \
- V(InterpreterCEntry2) \
- V(InterpreterDispatch) \
- V(InterpreterPushArgsThenCall) \
- V(InterpreterPushArgsThenConstruct) \
- V(JSTrampoline) \
- V(Load) \
- V(LoadGlobal) \
- V(LoadGlobalNoFeedback) \
- V(LoadGlobalWithVector) \
- V(LoadNoFeedback) \
- V(LoadWithVector) \
- V(NewArgumentsElements) \
- V(NoContext) \
- V(RecordWrite) \
- V(ResumeGenerator) \
- V(RunMicrotasks) \
- V(RunMicrotasksEntry) \
- V(Store) \
- V(StoreGlobal) \
- V(StoreGlobalWithVector) \
- V(StoreTransition) \
- V(StoreWithVector) \
- V(StringAt) \
- V(StringAtAsString) \
- V(StringSubstring) \
- V(TypeConversion) \
- V(TypeConversionStackParameter) \
- V(Typeof) \
- V(UnaryOp_WithFeedback) \
- V(Void) \
- V(WasmAtomicNotify) \
- V(WasmFloat32ToNumber) \
- V(WasmFloat64ToNumber) \
- V(WasmI32AtomicWait32) \
- V(WasmI32AtomicWait64) \
- V(WasmI64AtomicWait32) \
- V(WasmI64AtomicWait64) \
- V(WasmTableInit) \
- V(WasmTableCopy) \
- BUILTIN_LIST_TFS(V) \
+#define INTERFACE_DESCRIPTOR_LIST(V) \
+ V(Abort) \
+ V(Allocate) \
+ V(ApiCallback) \
+ V(ApiGetter) \
+ V(ArgumentsAdaptor) \
+ V(ArrayConstructor) \
+ V(ArrayNArgumentsConstructor) \
+ V(ArrayNoArgumentConstructor) \
+ V(ArraySingleArgumentConstructor) \
+ V(AsyncFunctionStackParameter) \
+ V(BigIntToI32Pair) \
+ V(BigIntToI64) \
+ V(BinaryOp) \
+ V(BinaryOp_WithFeedback) \
+ V(CallForwardVarargs) \
+ V(CallFunctionTemplate) \
+ V(CallTrampoline) \
+ V(CallTrampoline_WithFeedback) \
+ V(CallVarargs) \
+ V(CallWithArrayLike) \
+ V(CallWithArrayLike_WithFeedback) \
+ V(CallWithSpread) \
+ V(CallWithSpread_WithFeedback) \
+ V(CEntry1ArgvOnStack) \
+ V(CloneObjectWithVector) \
+ V(Compare) \
+ V(Compare_WithFeedback) \
+ V(ConstructForwardVarargs) \
+ V(ConstructStub) \
+ V(ConstructVarargs) \
+ V(ConstructWithArrayLike) \
+ V(ConstructWithArrayLike_WithFeedback) \
+ V(Construct_WithFeedback) \
+ V(ConstructWithSpread) \
+ V(ConstructWithSpread_WithFeedback) \
+ V(ContextOnly) \
+ V(CppBuiltinAdaptor) \
+ V(EphemeronKeyBarrier) \
+ V(FastNewFunctionContext) \
+ V(FastNewObject) \
+ V(FrameDropperTrampoline) \
+ V(GetIteratorStackParameter) \
+ V(GetProperty) \
+ V(GrowArrayElements) \
+ V(I32PairToBigInt) \
+ V(I64ToBigInt) \
+ V(InterpreterCEntry1) \
+ V(InterpreterCEntry2) \
+ V(InterpreterDispatch) \
+ V(InterpreterPushArgsThenCall) \
+ V(InterpreterPushArgsThenConstruct) \
+ V(JSTrampoline) \
+ V(Load) \
+ V(LoadGlobal) \
+ V(LoadGlobalNoFeedback) \
+ V(LoadGlobalWithVector) \
+ V(LoadNoFeedback) \
+ V(LoadWithVector) \
+ V(NewArgumentsElements) \
+ V(NoContext) \
+ V(RecordWrite) \
+ V(ResumeGenerator) \
+ V(RunMicrotasks) \
+ V(RunMicrotasksEntry) \
+ V(Store) \
+ V(StoreGlobal) \
+ V(StoreGlobalWithVector) \
+ V(StoreTransition) \
+ V(StoreWithVector) \
+ V(StringAt) \
+ V(StringAtAsString) \
+ V(StringSubstring) \
+ V(TypeConversion) \
+ V(TypeConversionStackParameter) \
+ V(Typeof) \
+ V(UnaryOp_WithFeedback) \
+ V(Void) \
+ V(WasmFloat32ToNumber) \
+ V(WasmFloat64ToNumber) \
+ V(WasmI32AtomicWait32) \
+ V(WasmI64AtomicWait32) \
+ BUILTIN_LIST_TFS(V) \
TORQUE_BUILTIN_LIST_TFC(V)
+enum class StackArgumentOrder {
+ kDefault, // Arguments in the stack are pushed in the default/stub order (the
+ // first argument is pushed first).
+ kJS, // Arguments in the stack are pushed in the same order as the one used
+ // by JS-to-JS function calls. This should be used if calling a
+ // JSFunction or if the builtin is expected to be called directly from a
+ // JSFunction. When V8_REVERSE_JSARGS is set, this order is reversed
+ // compared to kDefault.
+};
+
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
public:
enum Flag {
@@ -142,7 +150,8 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
void InitializePlatformIndependent(Flags flags, int return_count,
int parameter_count,
const MachineType* machine_types,
- int machine_types_length);
+ int machine_types_length,
+ StackArgumentOrder stack_order);
void Reset();
@@ -165,6 +174,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
DCHECK_LT(index, param_count_);
return machine_types_[return_count_ + index];
}
+ StackArgumentOrder stack_order() const { return stack_order_; }
void RestrictAllocatableRegisters(const Register* registers, int num) {
DCHECK_EQ(allocatable_registers_, 0);
@@ -199,6 +209,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
int return_count_ = -1;
int param_count_ = -1;
Flags flags_ = kNoFlags;
+ StackArgumentOrder stack_order_ = StackArgumentOrder::kDefault;
// Specifying the set of registers that could be used by the register
// allocator. Currently, it's only used by RecordWrite code stub.
@@ -295,6 +306,10 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
return data()->allocatable_registers();
}
+ StackArgumentOrder GetStackArgumentOrder() const {
+ return data()->stack_order();
+ }
+
static const Register ContextRegister();
const char* DebugName() const;
@@ -314,9 +329,9 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
CallInterfaceDescriptorData* data) {
// Default descriptor configuration: one result, all parameters are passed
// in registers and all parameters have MachineType::AnyTagged() type.
- data->InitializePlatformIndependent(CallInterfaceDescriptorData::kNoFlags,
- 1, data->register_param_count(),
- nullptr, 0);
+ data->InitializePlatformIndependent(
+ CallInterfaceDescriptorData::kNoFlags, 1, data->register_param_count(),
+ nullptr, 0, StackArgumentOrder::kDefault);
}
// Initializes |data| using the platform dependent default set of registers.
@@ -402,7 +417,8 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
override { \
data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount, \
- kParameterCount, nullptr, 0); \
+ kParameterCount, nullptr, 0, \
+ kStackArgumentOrder); \
} \
name(CallDescriptors::Key key) : base(key) {} \
\
@@ -420,9 +436,11 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
\
public:
-#define DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS(flags, return_count, ...) \
+#define DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS(flags, stack_order, \
+ return_count, ...) \
static constexpr int kDescriptorFlags = flags; \
static constexpr int kReturnCount = return_count; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = stack_order; \
enum ParameterIndices { \
__dummy = -1, /* to be able to pass zero arguments */ \
##__VA_ARGS__, \
@@ -431,35 +449,41 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
kContext = kParameterCount /* implicit parameter */ \
};
-#define DEFINE_RESULT_AND_PARAMETERS(return_count, ...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoFlags, return_count, ##__VA_ARGS__)
+#define DEFINE_RESULT_AND_PARAMETERS(return_count, ...) \
+ DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
+ CallInterfaceDescriptorData::kNoFlags, StackArgumentOrder::kDefault, \
+ return_count, ##__VA_ARGS__)
// This is valid only for builtins that use EntryFrame, which does not scan
// stack arguments on GC.
-#define DEFINE_PARAMETERS_ENTRY(...) \
- static constexpr int kDescriptorFlags = \
- CallInterfaceDescriptorData::kNoContext | \
- CallInterfaceDescriptorData::kNoStackScan; \
- static constexpr int kReturnCount = 1; \
- enum ParameterIndices { \
- __dummy = -1, /* to be able to pass zero arguments */ \
- ##__VA_ARGS__, \
- \
- kParameterCount \
+#define DEFINE_PARAMETERS_ENTRY(...) \
+ static constexpr int kDescriptorFlags = \
+ CallInterfaceDescriptorData::kNoContext | \
+ CallInterfaceDescriptorData::kNoStackScan; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = \
+ StackArgumentOrder::kDefault; \
+ static constexpr int kReturnCount = 1; \
+ enum ParameterIndices { \
+ __dummy = -1, /* to be able to pass zero arguments */ \
+ ##__VA_ARGS__, \
+ \
+ kParameterCount \
};
-#define DEFINE_PARAMETERS(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoFlags, 1, ##__VA_ARGS__)
+#define DEFINE_PARAMETERS(...) \
+ DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
+ CallInterfaceDescriptorData::kNoFlags, StackArgumentOrder::kDefault, 1, \
+ ##__VA_ARGS__)
-#define DEFINE_PARAMETERS_NO_CONTEXT(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoContext, 1, ##__VA_ARGS__)
+#define DEFINE_PARAMETERS_NO_CONTEXT(...) \
+ DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
+ CallInterfaceDescriptorData::kNoContext, StackArgumentOrder::kDefault, \
+ 1, ##__VA_ARGS__)
-#define DEFINE_PARAMETERS_VARARGS(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kAllowVarArgs, 1, ##__VA_ARGS__)
+#define DEFINE_PARAMETERS_VARARGS(...) \
+ DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
+ CallInterfaceDescriptorData::kAllowVarArgs, StackArgumentOrder::kJS, 1, \
+ ##__VA_ARGS__)
#define DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG(flag, ...) \
void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
@@ -470,7 +494,7 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
"Parameter names definition is not consistent with parameter types"); \
data->InitializePlatformIndependent( \
Flags(flag | kDescriptorFlags), kReturnCount, kParameterCount, \
- machine_types, arraysize(machine_types)); \
+ machine_types, arraysize(machine_types), kStackArgumentOrder); \
}
#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
@@ -481,18 +505,20 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged() /* result */, \
##__VA_ARGS__)
-#define DEFINE_JS_PARAMETERS(...) \
- static constexpr int kDescriptorFlags = \
- CallInterfaceDescriptorData::kAllowVarArgs; \
- static constexpr int kReturnCount = 1; \
- enum ParameterIndices { \
- kTarget, \
- kNewTarget, \
- kActualArgumentsCount, \
- ##__VA_ARGS__, \
- \
- kParameterCount, \
- kContext = kParameterCount /* implicit parameter */ \
+#define DEFINE_JS_PARAMETERS(...) \
+ static constexpr int kDescriptorFlags = \
+ CallInterfaceDescriptorData::kAllowVarArgs; \
+ static constexpr int kReturnCount = 1; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = \
+ StackArgumentOrder::kJS; \
+ enum ParameterIndices { \
+ kTarget, \
+ kNewTarget, \
+ kActualArgumentsCount, \
+ ##__VA_ARGS__, \
+ \
+ kParameterCount, \
+ kContext = kParameterCount /* implicit parameter */ \
};
#define DEFINE_JS_PARAMETER_TYPES(...) \
@@ -554,7 +580,8 @@ class TorqueInterfaceDescriptor : public CallInterfaceDescriptor {
DCHECK_EQ(kReturnCount + kParameterCount, machine_types.size());
data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount,
kParameterCount, machine_types.data(),
- static_cast<int>(machine_types.size()));
+ static_cast<int>(machine_types.size()),
+ StackArgumentOrder::kDefault);
}
};
@@ -948,6 +975,20 @@ class CallWithSpreadDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CallWithSpreadDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
+class CallWithSpread_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kArgumentsCount, kSpread, kSlot,
+ kMaybeFeedbackVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::Int32(), // kArgumentsCount
+ MachineType::AnyTagged(), // kSpread
+ MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(CallWithSpread_WithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kArgumentsList)
@@ -956,6 +997,19 @@ class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CallWithArrayLikeDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
+class CallWithArrayLike_WithFeedbackDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kArgumentsList, kSlot, kMaybeFeedbackVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::AnyTagged(), // kArgumentsList
+ MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(CallWithArrayLike_WithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
class ConstructVarargsDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_JS_PARAMETERS(kArgumentsLength, kArgumentsList)
@@ -979,6 +1033,20 @@ class ConstructWithSpreadDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ConstructWithSpreadDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
+class ConstructWithSpread_WithFeedbackDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ // Note: kSlot comes before kSpread since as an untagged value it must be
+ // passed in a register.
+ DEFINE_JS_PARAMETERS(kSlot, kSpread, kMaybeFeedbackVector)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::Int32(), // kSlot
+ MachineType::AnyTagged(), // kSpread
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(ConstructWithSpread_WithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList)
@@ -988,6 +1056,21 @@ class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ConstructWithArrayLikeDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
+class ConstructWithArrayLike_WithFeedbackDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList, kSlot,
+ kMaybeFeedbackVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::AnyTagged(), // kNewTarget
+ MachineType::AnyTagged(), // kArgumentsList
+ MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(ConstructWithArrayLike_WithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
// TODO(ishell): consider merging this with ArrayConstructorDescriptor
class ConstructStubDescriptor : public CallInterfaceDescriptor {
public:
@@ -1006,13 +1089,6 @@ class AbortDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(AbortDescriptor, CallInterfaceDescriptor)
};
-class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT()
- DEFINE_PARAMETER_TYPES()
- DECLARE_DESCRIPTOR(AllocateHeapNumberDescriptor, CallInterfaceDescriptor)
-};
-
class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_JS_PARAMETERS(kAllocationSite)
@@ -1331,52 +1407,6 @@ class WasmFloat64ToNumberDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmFloat64ToNumberDescriptor, CallInterfaceDescriptor)
};
-class WasmTableInitDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kDestination, kSource, kSize, kTableIndex,
- kSegmentIndex)
- DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kDestination
- MachineType::Int32(), // kSource
- MachineType::Int32(), // kSize
- MachineType::AnyTagged(), // kTableIndex
- MachineType::AnyTagged(), // kSegmentindex
- )
-
-#if V8_TARGET_ARCH_IA32
- static constexpr bool kPassLastArgOnStack = true;
-#else
- static constexpr bool kPassLastArgOnStack = false;
-#endif
-
- // Pass the last parameter through the stack.
- static constexpr int kStackArgumentsCount = kPassLastArgOnStack ? 1 : 0;
-
- DECLARE_DESCRIPTOR(WasmTableInitDescriptor, CallInterfaceDescriptor)
-};
-
-class WasmTableCopyDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kDestination, kSource, kSize, kDestinationTable,
- kSourceTable)
- DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kDestination
- MachineType::Int32(), // kSource
- MachineType::Int32(), // kSize
- MachineType::AnyTagged(), // kDestinationTable
- MachineType::AnyTagged(), // kSourceTable
- )
-
-#if V8_TARGET_ARCH_IA32
- static constexpr bool kPassLastArgOnStack = true;
-#else
- static constexpr bool kPassLastArgOnStack = false;
-#endif
-
- // Pass the last parameter through the stack.
- static constexpr int kStackArgumentsCount = kPassLastArgOnStack ? 1 : 0;
-
- DECLARE_DESCRIPTOR(WasmTableCopyDescriptor, CallInterfaceDescriptor)
-};
-
class V8_EXPORT_PRIVATE I64ToBigIntDescriptor final
: public CallInterfaceDescriptor {
public:
@@ -1414,15 +1444,6 @@ class V8_EXPORT_PRIVATE BigIntToI32PairDescriptor final
DECLARE_DESCRIPTOR(BigIntToI32PairDescriptor, CallInterfaceDescriptor)
};
-class WasmAtomicNotifyDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kCount)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
- MachineType::Uint32(), // kAddress
- MachineType::Uint32()) // kCount
- DECLARE_DESCRIPTOR(WasmAtomicNotifyDescriptor, CallInterfaceDescriptor)
-};
-
class WasmI32AtomicWait32Descriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeoutLow,
@@ -1461,26 +1482,6 @@ class WasmI64AtomicWait32Descriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmI64AtomicWait32Descriptor, CallInterfaceDescriptor)
};
-class WasmI32AtomicWait64Descriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeout)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
- MachineType::Uint32(), // kAddress
- MachineType::Int32(), // kExpectedValue
- MachineType::Uint64()) // kTimeout
- DECLARE_DESCRIPTOR(WasmI32AtomicWait64Descriptor, CallInterfaceDescriptor)
-};
-
-class WasmI64AtomicWait64Descriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeout)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
- MachineType::Uint32(), // kAddress
- MachineType::Uint64(), // kExpectedValue
- MachineType::Uint64()) // kTimeout
- DECLARE_DESCRIPTOR(WasmI64AtomicWait64Descriptor, CallInterfaceDescriptor)
-};
-
class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kSource, kFlags, kSlot, kVector)
@@ -1497,11 +1498,12 @@ class BinaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kMaybeFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
- MachineType::Int32(), // kSlot
+ MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kMaybeFeedbackVector
DECLARE_DESCRIPTOR(BinaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
class CallTrampoline_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount, kSlot,
@@ -1519,11 +1521,12 @@ class Compare_WithFeedbackDescriptor : public CallInterfaceDescriptor {
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kMaybeFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
- MachineType::Int32(), // kSlot
+ MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kMaybeFeedbackVector
DECLARE_DESCRIPTOR(Compare_WithFeedbackDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
class Construct_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
// kSlot is passed in a register, kMaybeFeedbackVector on the stack.
@@ -1538,7 +1541,7 @@ class UnaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kValue, kSlot, kMaybeFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
- MachineType::Int32(), // kSlot
+ MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kMaybeFeedbackVector
DECLARE_DESCRIPTOR(UnaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
};
diff --git a/chromium/v8/src/codegen/machine-type.h b/chromium/v8/src/codegen/machine-type.h
index ea054415942..e7e10208d7b 100644
--- a/chromium/v8/src/codegen/machine-type.h
+++ b/chromium/v8/src/codegen/machine-type.h
@@ -188,50 +188,10 @@ class MachineType {
constexpr static MachineType Bool() {
return MachineType(MachineRepresentation::kBit, MachineSemantic::kBool);
}
- constexpr static MachineType TaggedBool() {
- return MachineType(MachineRepresentation::kTagged, MachineSemantic::kBool);
- }
- constexpr static MachineType CompressedBool() {
- return MachineType(MachineRepresentation::kCompressed,
- MachineSemantic::kBool);
- }
constexpr static MachineType None() {
return MachineType(MachineRepresentation::kNone, MachineSemantic::kNone);
}
- // These naked representations should eventually go away.
- constexpr static MachineType RepWord8() {
- return MachineType(MachineRepresentation::kWord8, MachineSemantic::kNone);
- }
- constexpr static MachineType RepWord16() {
- return MachineType(MachineRepresentation::kWord16, MachineSemantic::kNone);
- }
- constexpr static MachineType RepWord32() {
- return MachineType(MachineRepresentation::kWord32, MachineSemantic::kNone);
- }
- constexpr static MachineType RepWord64() {
- return MachineType(MachineRepresentation::kWord64, MachineSemantic::kNone);
- }
- constexpr static MachineType RepFloat32() {
- return MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone);
- }
- constexpr static MachineType RepFloat64() {
- return MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone);
- }
- constexpr static MachineType RepSimd128() {
- return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
- }
- constexpr static MachineType RepTagged() {
- return MachineType(MachineRepresentation::kTagged, MachineSemantic::kNone);
- }
- constexpr static MachineType RepCompressed() {
- return MachineType(MachineRepresentation::kCompressed,
- MachineSemantic::kNone);
- }
- constexpr static MachineType RepBit() {
- return MachineType(MachineRepresentation::kBit, MachineSemantic::kNone);
- }
-
static MachineType TypeForRepresentation(const MachineRepresentation& rep,
bool isSigned = true) {
switch (rep) {
diff --git a/chromium/v8/src/codegen/mips/assembler-mips.cc b/chromium/v8/src/codegen/mips/assembler-mips.cc
index 768b16b86c4..19a514b2d9d 100644
--- a/chromium/v8/src/codegen/mips/assembler-mips.cc
+++ b/chromium/v8/src/codegen/mips/assembler-mips.cc
@@ -3568,17 +3568,20 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
}
void Assembler::dd(uint32_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
}
void Assembler::dq(uint64_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint64_t*>(pc_) = data;
+ pc_ += sizeof(uint64_t);
}
void Assembler::dd(Label* label) {
@@ -3652,8 +3655,12 @@ void Assembler::CheckTrampolinePool() {
}
}
}
- bind(&after_pool);
+ // If unbound_labels_count_ is big enough, label after_pool will
+ // need a trampoline too, so we must create the trampoline before
+ // the bind operation to make sure function 'bind' can get this
+ // information.
trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+ bind(&after_pool);
trampoline_emitted_ = true;
// As we are only going to emit trampoline once, we need to prevent any
@@ -3794,6 +3801,7 @@ void Assembler::GenPCRelativeJumpAndLink(Register t, int32_t imm32,
addu(t, ra, t);
jalr(t);
if (bdslot == PROTECT) nop();
+ set_last_call_pc_(pc_);
}
UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
diff --git a/chromium/v8/src/codegen/mips/assembler-mips.h b/chromium/v8/src/codegen/mips/assembler-mips.h
index a414168a9f3..248bd1ac751 100644
--- a/chromium/v8/src/codegen/mips/assembler-mips.h
+++ b/chromium/v8/src/codegen/mips/assembler-mips.h
@@ -170,6 +170,35 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Unused on this architecture.
void MaybeEmitOutOfLineConstantPool() {}
+ // Mips uses BlockTrampolinePool to prevent generating trampoline inside a
+ // continuous instruction block. For Call instrution, it prevents generating
+ // trampoline between jalr and delay slot instruction. In the destructor of
+ // BlockTrampolinePool, it must check if it needs to generate trampoline
+ // immediately, if it does not do this, the branch range will go beyond the
+ // max branch offset, that means the pc_offset after call CheckTrampolinePool
+ // may be not the Call instruction's location. So we use last_call_pc here for
+ // safepoint record.
+ int pc_offset_for_safepoint() {
+#ifdef DEBUG
+ Instr instr1 =
+ instr_at(static_cast<int>(last_call_pc_ - buffer_start_ - kInstrSize));
+ Instr instr2 = instr_at(
+ static_cast<int>(last_call_pc_ - buffer_start_ - kInstrSize * 2));
+ if (GetOpcodeField(instr1) != SPECIAL) { // instr1 == jialc.
+ DCHECK(IsMipsArchVariant(kMips32r6) && GetOpcodeField(instr1) == POP76 &&
+ GetRs(instr1) == 0);
+ } else {
+ if (GetFunctionField(instr1) == SLL) { // instr1 == nop, instr2 == jalr.
+ DCHECK(GetOpcodeField(instr2) == SPECIAL &&
+ GetFunctionField(instr2) == JALR);
+ } else { // instr1 == jalr.
+ DCHECK(GetFunctionField(instr1) == JALR);
+ }
+ }
+#endif
+ return static_cast<int>(last_call_pc_ - buffer_start_);
+ }
+
// Label operations & relative jumps (PPUM Appendix D).
//
// Takes a branch opcode (cc) and a label (L) and generates
@@ -1593,6 +1622,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenPCRelativeJumpAndLink(Register t, int32_t imm32,
RelocInfo::Mode rmode, BranchDelaySlot bdslot);
+ void set_last_call_pc_(byte* pc) { last_call_pc_ = pc; }
+
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@@ -1856,6 +1887,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Trampoline trampoline_;
bool internal_trampoline_exception_;
+ // Keep track of the last Call's position to ensure that safepoint can get the
+ // correct information even if there is a trampoline immediately after the
+ // Call.
+ byte* last_call_pc_;
+
private:
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
diff --git a/chromium/v8/src/codegen/mips/interface-descriptors-mips.cc b/chromium/v8/src/codegen/mips/interface-descriptors-mips.cc
index 6770ab5cce8..c092ebc2c75 100644
--- a/chromium/v8/src/codegen/mips/interface-descriptors-mips.cc
+++ b/chromium/v8/src/codegen/mips/interface-descriptors-mips.cc
@@ -39,14 +39,6 @@ void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, t0};
@@ -56,14 +48,6 @@ void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
default_stub_registers);
}
-void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
@@ -233,12 +217,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
@@ -338,6 +316,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/mips/macro-assembler-mips.cc b/chromium/v8/src/codegen/mips/macro-assembler-mips.cc
index 48b2acf4562..efb2dc11e1f 100644
--- a/chromium/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/chromium/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -3906,6 +3906,7 @@ void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
+ set_last_call_pc_(pc_);
}
// Note: To call gcc-compiled C code on mips, you must call through t9.
@@ -3938,6 +3939,7 @@ void TurboAssembler::Call(Register target, Register base, int16_t offset,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
+ set_last_call_pc_(pc_);
}
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
@@ -5427,7 +5429,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
And(scratch, object, Operand(~kPageAlignmentMask));
- lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ lw(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
diff --git a/chromium/v8/src/codegen/mips64/assembler-mips64.cc b/chromium/v8/src/codegen/mips64/assembler-mips64.cc
index 751d0f8703e..3ec7bbb5e08 100644
--- a/chromium/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/chromium/v8/src/codegen/mips64/assembler-mips64.cc
@@ -3763,17 +3763,20 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
}
void Assembler::dd(uint32_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
}
void Assembler::dq(uint64_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint64_t*>(pc_) = data;
+ pc_ += sizeof(uint64_t);
}
void Assembler::dd(Label* label) {
@@ -3856,8 +3859,12 @@ void Assembler::CheckTrampolinePool() {
}
}
nop();
- bind(&after_pool);
+ // If unbound_labels_count_ is big enough, label after_pool will
+ // need a trampoline too, so we must create the trampoline before
+ // the bind operation to make sure function 'bind' can get this
+ // information.
trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+ bind(&after_pool);
trampoline_emitted_ = true;
// As we are only going to emit trampoline once, we need to prevent any
diff --git a/chromium/v8/src/codegen/mips64/assembler-mips64.h b/chromium/v8/src/codegen/mips64/assembler-mips64.h
index f70e46f81b3..b5edc75676f 100644
--- a/chromium/v8/src/codegen/mips64/assembler-mips64.h
+++ b/chromium/v8/src/codegen/mips64/assembler-mips64.h
@@ -168,6 +168,35 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Unused on this architecture.
void MaybeEmitOutOfLineConstantPool() {}
+ // Mips uses BlockTrampolinePool to prevent generating trampoline inside a
+ // continuous instruction block. For Call instruction, it prevents generating
+ // trampoline between jalr and delay slot instruction. In the destructor of
+ // BlockTrampolinePool, it must check if it needs to generate trampoline
+ // immediately, if it does not do this, the branch range will go beyond the
+ // max branch offset, that means the pc_offset after call CheckTrampolinePool
+ // may be not the Call instruction's location. So we use last_call_pc here for
+ // safepoint record.
+ int pc_offset_for_safepoint() {
+#ifdef DEBUG
+ Instr instr1 =
+ instr_at(static_cast<int>(last_call_pc_ - buffer_start_ - kInstrSize));
+ Instr instr2 = instr_at(
+ static_cast<int>(last_call_pc_ - buffer_start_ - kInstrSize * 2));
+ if (GetOpcodeField(instr1) != SPECIAL) { // instr1 == jialc.
+ DCHECK((kArchVariant == kMips64r6) && GetOpcodeField(instr1) == POP76 &&
+ GetRs(instr1) == 0);
+ } else {
+ if (GetFunctionField(instr1) == SLL) { // instr1 == nop, instr2 == jalr.
+ DCHECK(GetOpcodeField(instr2) == SPECIAL &&
+ GetFunctionField(instr2) == JALR);
+ } else { // instr1 == jalr.
+ DCHECK(GetFunctionField(instr1) == JALR);
+ }
+ }
+#endif
+ return static_cast<int>(last_call_pc_ - buffer_start_);
+ }
+
// Label operations & relative jumps (PPUM Appendix D).
//
// Takes a branch opcode (cc) and a label (L) and generates
@@ -1629,6 +1658,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
+ void set_last_call_pc_(byte* pc) { last_call_pc_ = pc; }
+
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@@ -1882,6 +1913,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Trampoline trampoline_;
bool internal_trampoline_exception_;
+ // Keep track of the last Call's position to ensure that safepoint can get the
+ // correct information even if there is a trampoline immediately after the
+ // Call.
+ byte* last_call_pc_;
+
RegList scratch_register_list_;
private:
diff --git a/chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc b/chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc
index 077b49fa999..00067454f1e 100644
--- a/chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc
+++ b/chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc
@@ -39,14 +39,6 @@ void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
@@ -56,14 +48,6 @@ void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
default_stub_registers);
}
-void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
@@ -233,12 +217,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
@@ -338,6 +316,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc
index a665b76e80e..785cf4aa5cc 100644
--- a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -4235,6 +4235,7 @@ void TurboAssembler::Call(Register target, Condition cond, Register rs,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
+ set_last_call_pc_(pc_);
}
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
@@ -5753,7 +5754,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
And(scratch, object, Operand(~kPageAlignmentMask));
- Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ Ld(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
diff --git a/chromium/v8/src/codegen/optimized-compilation-info.cc b/chromium/v8/src/codegen/optimized-compilation-info.cc
index 19f93e674e1..286f66e252b 100644
--- a/chromium/v8/src/codegen/optimized-compilation-info.cc
+++ b/chromium/v8/src/codegen/optimized-compilation-info.cc
@@ -19,7 +19,7 @@ namespace internal {
OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> closure)
+ Handle<JSFunction> closure, bool native_context_independent)
: OptimizedCompilationInfo(Code::OPTIMIZED_FUNCTION, zone) {
DCHECK_EQ(*shared, closure->shared());
DCHECK(shared->is_compiled());
@@ -32,9 +32,10 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
// is active, to be able to get more precise source positions at the price of
// more memory consumption.
if (isolate->NeedsDetailedOptimizedCodeLineInfo()) {
- MarkAsSourcePositionsEnabled();
+ set_source_positions();
}
+ if (native_context_independent) set_native_context_independent();
SetTracingFlags(shared->PassesFilter(FLAG_trace_turbo_filter));
}
@@ -53,59 +54,82 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(Code::Kind code_kind,
ConfigureFlags();
}
+#ifdef DEBUG
+bool OptimizedCompilationInfo::FlagSetIsValid(Flag flag) const {
+ switch (flag) {
+ case kPoisonRegisterArguments:
+ return untrusted_code_mitigations();
+ default:
+ return true;
+ }
+ UNREACHABLE();
+}
+
+bool OptimizedCompilationInfo::FlagGetIsValid(Flag flag) const {
+ switch (flag) {
+ case kPoisonRegisterArguments:
+ if (!GetFlag(kPoisonRegisterArguments)) return true;
+ return untrusted_code_mitigations() && called_with_code_start_register();
+ default:
+ return true;
+ }
+ UNREACHABLE();
+}
+#endif // DEBUG
+
void OptimizedCompilationInfo::ConfigureFlags() {
- if (FLAG_untrusted_code_mitigations) SetFlag(kUntrustedCodeMitigations);
+ if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
switch (code_kind_) {
case Code::OPTIMIZED_FUNCTION:
- SetFlag(kCalledWithCodeStartRegister);
- SetFlag(kSwitchJumpTableEnabled);
+ set_called_with_code_start_register();
+ set_switch_jump_table();
if (FLAG_function_context_specialization) {
- MarkAsFunctionContextSpecializing();
+ set_function_context_specializing();
}
if (FLAG_turbo_splitting) {
- MarkAsSplittingEnabled();
+ set_splitting();
}
if (FLAG_untrusted_code_mitigations) {
- MarkAsPoisoningRegisterArguments();
+ set_poison_register_arguments();
}
if (FLAG_analyze_environment_liveness) {
// TODO(yangguo): Disable this in case of debugging for crbug.com/826613
- MarkAsAnalyzeEnvironmentLiveness();
+ set_analyze_environment_liveness();
}
break;
case Code::BYTECODE_HANDLER:
- SetFlag(kCalledWithCodeStartRegister);
+ set_called_with_code_start_register();
if (FLAG_turbo_splitting) {
- MarkAsSplittingEnabled();
+ set_splitting();
}
break;
case Code::BUILTIN:
case Code::STUB:
if (FLAG_turbo_splitting) {
- MarkAsSplittingEnabled();
+ set_splitting();
}
#if ENABLE_GDB_JIT_INTERFACE && DEBUG
- MarkAsSourcePositionsEnabled();
+ set_source_positions();
#endif // ENABLE_GDB_JIT_INTERFACE && DEBUG
break;
case Code::WASM_FUNCTION:
case Code::WASM_TO_CAPI_FUNCTION:
- SetFlag(kSwitchJumpTableEnabled);
+ set_switch_jump_table();
break;
default:
break;
}
if (FLAG_turbo_control_flow_aware_allocation) {
- MarkAsTurboControlFlowAwareAllocation();
+ set_turbo_control_flow_aware_allocation();
} else {
- MarkAsTurboPreprocessRanges();
+ set_turbo_preprocess_ranges();
}
}
OptimizedCompilationInfo::~OptimizedCompilationInfo() {
- if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
+ if (disable_future_optimization() && has_shared_info()) {
shared_info()->DisableOptimization(bailout_reason());
}
}
@@ -134,12 +158,12 @@ void OptimizedCompilationInfo::AbortOptimization(BailoutReason reason) {
if (bailout_reason_ == BailoutReason::kNoReason) {
bailout_reason_ = reason;
}
- SetFlag(kDisableFutureOptimization);
+ set_disable_future_optimization();
}
void OptimizedCompilationInfo::RetryOptimization(BailoutReason reason) {
DCHECK_NE(reason, BailoutReason::kNoReason);
- if (GetFlag(kDisableFutureOptimization)) return;
+ if (disable_future_optimization()) return;
bailout_reason_ = reason;
}
@@ -225,11 +249,11 @@ int OptimizedCompilationInfo::AddInlinedFunction(
void OptimizedCompilationInfo::SetTracingFlags(bool passes_filter) {
if (!passes_filter) return;
- if (FLAG_trace_turbo) SetFlag(kTraceTurboJson);
- if (FLAG_trace_turbo_graph) SetFlag(kTraceTurboGraph);
- if (FLAG_trace_turbo_scheduled) SetFlag(kTraceTurboScheduled);
- if (FLAG_trace_turbo_alloc) SetFlag(kTraceTurboAllocation);
- if (FLAG_trace_heap_broker) SetFlag(kTraceHeapBroker);
+ if (FLAG_trace_turbo) set_trace_turbo_json();
+ if (FLAG_trace_turbo_graph) set_trace_turbo_graph();
+ if (FLAG_trace_turbo_scheduled) set_trace_turbo_scheduled();
+ if (FLAG_trace_turbo_alloc) set_trace_turbo_allocation();
+ if (FLAG_trace_heap_broker) set_trace_heap_broker();
}
OptimizedCompilationInfo::InlinedFunctionHolder::InlinedFunctionHolder(
diff --git a/chromium/v8/src/codegen/optimized-compilation-info.h b/chromium/v8/src/codegen/optimized-compilation-info.h
index d6d4c88c990..6a5b5631ba2 100644
--- a/chromium/v8/src/codegen/optimized-compilation-info.h
+++ b/chromium/v8/src/codegen/optimized-compilation-info.h
@@ -11,6 +11,7 @@
#include "src/codegen/source-position-table.h"
#include "src/codegen/tick-counter.h"
#include "src/common/globals.h"
+#include "src/diagnostics/basic-block-profiler.h"
#include "src/execution/frames.h"
#include "src/handles/handles.h"
#include "src/objects/objects.h"
@@ -43,35 +44,64 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
public:
// Various configuration flags for a compilation, as well as some properties
// of the compiled code produced by a compilation.
+
+#define FLAGS(V) \
+ V(FunctionContextSpecializing, function_context_specializing, 0) \
+ V(Inlining, inlining, 1) \
+ V(DisableFutureOptimization, disable_future_optimization, 2) \
+ V(Splitting, splitting, 3) \
+ V(SourcePositions, source_positions, 4) \
+ V(BailoutOnUninitialized, bailout_on_uninitialized, 5) \
+ V(LoopPeeling, loop_peeling, 6) \
+ V(UntrustedCodeMitigations, untrusted_code_mitigations, 7) \
+ V(SwitchJumpTable, switch_jump_table, 8) \
+ V(CalledWithCodeStartRegister, called_with_code_start_register, 9) \
+ V(PoisonRegisterArguments, poison_register_arguments, 10) \
+ V(AllocationFolding, allocation_folding, 11) \
+ V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 12) \
+ V(TraceTurboJson, trace_turbo_json, 13) \
+ V(TraceTurboGraph, trace_turbo_graph, 14) \
+ V(TraceTurboScheduled, trace_turbo_scheduled, 15) \
+ V(TraceTurboAllocation, trace_turbo_allocation, 16) \
+ V(TraceHeapBroker, trace_heap_broker, 17) \
+ V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 18) \
+ V(TurboControlFlowAwareAllocation, turbo_control_flow_aware_allocation, 19) \
+ V(TurboPreprocessRanges, turbo_preprocess_ranges, 20) \
+ V(ConcurrentInlining, concurrent_inlining, 21) \
+ V(NativeContextIndependent, native_context_independent, 22)
+
enum Flag {
- kFunctionContextSpecializing = 1 << 0,
- kInliningEnabled = 1 << 1,
- kDisableFutureOptimization = 1 << 2,
- kSplittingEnabled = 1 << 3,
- kSourcePositionsEnabled = 1 << 4,
- kBailoutOnUninitialized = 1 << 5,
- kLoopPeelingEnabled = 1 << 6,
- kUntrustedCodeMitigations = 1 << 7,
- kSwitchJumpTableEnabled = 1 << 8,
- kCalledWithCodeStartRegister = 1 << 9,
- kPoisonRegisterArguments = 1 << 10,
- kAllocationFoldingEnabled = 1 << 11,
- kAnalyzeEnvironmentLiveness = 1 << 12,
- kTraceTurboJson = 1 << 13,
- kTraceTurboGraph = 1 << 14,
- kTraceTurboScheduled = 1 << 15,
- kTraceTurboAllocation = 1 << 16,
- kTraceHeapBroker = 1 << 17,
- kWasmRuntimeExceptionSupport = 1 << 18,
- kTurboControlFlowAwareAllocation = 1 << 19,
- kTurboPreprocessRanges = 1 << 20,
- kConcurrentInlining = 1 << 21,
+#define DEF_ENUM(Camel, Lower, Bit) k##Camel = 1 << Bit,
+ FLAGS(DEF_ENUM)
+#undef DEF_ENUM
};
+#define DEF_GETTER(Camel, Lower, Bit) \
+ bool Lower() const { \
+ DCHECK(FlagGetIsValid(k##Camel)); \
+ return GetFlag(k##Camel); \
+ }
+ FLAGS(DEF_GETTER)
+#undef DEF_GETTER
+
+#define DEF_SETTER(Camel, Lower, Bit) \
+ void set_##Lower() { \
+ DCHECK(FlagSetIsValid(k##Camel)); \
+ SetFlag(k##Camel); \
+ }
+ FLAGS(DEF_SETTER)
+#undef DEF_SETTER
+
+#ifdef DEBUG
+ bool FlagGetIsValid(Flag flag) const;
+ bool FlagSetIsValid(Flag flag) const;
+#endif // DEBUG
+
// Construct a compilation info for optimized compilation.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> closure);
+ Handle<JSFunction> closure,
+ bool native_context_independent);
// Construct a compilation info for stub compilation, Wasm, and testing.
OptimizedCompilationInfo(Vector<const char> debug_name, Zone* zone,
Code::Kind code_kind);
@@ -92,38 +122,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
BailoutId osr_offset() const { return osr_offset_; }
JavaScriptFrame* osr_frame() const { return osr_frame_; }
- // Flags used by optimized compilation.
-
- void MarkAsConcurrentInlining() { SetFlag(kConcurrentInlining); }
- bool is_concurrent_inlining() const { return GetFlag(kConcurrentInlining); }
-
- void MarkAsTurboControlFlowAwareAllocation() {
- SetFlag(kTurboControlFlowAwareAllocation);
- }
- bool is_turbo_control_flow_aware_allocation() const {
- return GetFlag(kTurboControlFlowAwareAllocation);
- }
-
- void MarkAsTurboPreprocessRanges() { SetFlag(kTurboPreprocessRanges); }
- bool is_turbo_preprocess_ranges() const {
- return GetFlag(kTurboPreprocessRanges);
- }
-
- void MarkAsFunctionContextSpecializing() {
- SetFlag(kFunctionContextSpecializing);
- }
- bool is_function_context_specializing() const {
- return GetFlag(kFunctionContextSpecializing);
- }
-
- void MarkAsSourcePositionsEnabled() { SetFlag(kSourcePositionsEnabled); }
- bool is_source_positions_enabled() const {
- return GetFlag(kSourcePositionsEnabled);
- }
-
- void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
- bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
-
void SetPoisoningMitigationLevel(PoisoningMitigationLevel poisoning_level) {
poisoning_level_ = poisoning_level;
}
@@ -131,75 +129,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
return poisoning_level_;
}
- void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
- bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
-
- void MarkAsBailoutOnUninitialized() { SetFlag(kBailoutOnUninitialized); }
- bool is_bailout_on_uninitialized() const {
- return GetFlag(kBailoutOnUninitialized);
- }
-
- void MarkAsLoopPeelingEnabled() { SetFlag(kLoopPeelingEnabled); }
- bool is_loop_peeling_enabled() const { return GetFlag(kLoopPeelingEnabled); }
-
- bool has_untrusted_code_mitigations() const {
- return GetFlag(kUntrustedCodeMitigations);
- }
-
- bool switch_jump_table_enabled() const {
- return GetFlag(kSwitchJumpTableEnabled);
- }
-
- bool called_with_code_start_register() const {
- bool enabled = GetFlag(kCalledWithCodeStartRegister);
- return enabled;
- }
-
- void MarkAsPoisoningRegisterArguments() {
- DCHECK(has_untrusted_code_mitigations());
- SetFlag(kPoisonRegisterArguments);
- }
- bool is_poisoning_register_arguments() const {
- bool enabled = GetFlag(kPoisonRegisterArguments);
- DCHECK_IMPLIES(enabled, has_untrusted_code_mitigations());
- DCHECK_IMPLIES(enabled, called_with_code_start_register());
- return enabled;
- }
-
- void MarkAsAllocationFoldingEnabled() { SetFlag(kAllocationFoldingEnabled); }
- bool is_allocation_folding_enabled() const {
- return GetFlag(kAllocationFoldingEnabled);
- }
-
- void MarkAsAnalyzeEnvironmentLiveness() {
- SetFlag(kAnalyzeEnvironmentLiveness);
- }
- bool is_analyze_environment_liveness() const {
- return GetFlag(kAnalyzeEnvironmentLiveness);
- }
-
- void SetWasmRuntimeExceptionSupport() {
- SetFlag(kWasmRuntimeExceptionSupport);
- }
-
- bool wasm_runtime_exception_support() {
- return GetFlag(kWasmRuntimeExceptionSupport);
- }
-
- bool trace_turbo_json_enabled() const { return GetFlag(kTraceTurboJson); }
-
- bool trace_turbo_graph_enabled() const { return GetFlag(kTraceTurboGraph); }
-
- bool trace_turbo_allocation_enabled() const {
- return GetFlag(kTraceTurboAllocation);
- }
-
- bool trace_turbo_scheduled_enabled() const {
- return GetFlag(kTraceTurboScheduled);
- }
-
- bool trace_heap_broker_enabled() const { return GetFlag(kTraceHeapBroker); }
-
// Code getters and setters.
void SetCode(Handle<Code> code) { code_ = code; }
@@ -239,10 +168,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
BailoutReason bailout_reason() const { return bailout_reason_; }
- bool is_disable_future_optimization() const {
- return GetFlag(kDisableFutureOptimization);
- }
-
int optimization_id() const {
DCHECK(IsOptimizing());
return optimization_id_;
@@ -290,6 +215,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
TickCounter& tick_counter() { return tick_counter_; }
+ BasicBlockProfilerData* profiler_data() const { return profiler_data_; }
+ void set_profiler_data(BasicBlockProfilerData* profiler_data) {
+ profiler_data_ = profiler_data;
+ }
+
private:
OptimizedCompilationInfo(Code::Kind code_kind, Zone* zone);
void ConfigureFlags();
@@ -318,6 +248,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// The compiled code.
Handle<Code> code_;
+ // Basic block profiling support.
+ BasicBlockProfilerData* profiler_data_ = nullptr;
+
// The WebAssembly compilation result, not published in the NativeModule yet.
std::unique_ptr<wasm::WasmCompilationResult> wasm_compilation_result_;
diff --git a/chromium/v8/src/codegen/ppc/assembler-ppc.cc b/chromium/v8/src/codegen/ppc/assembler-ppc.cc
index b9f09e23f23..62e33bba369 100644
--- a/chromium/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/chromium/v8/src/codegen/ppc/assembler-ppc.cc
@@ -1758,31 +1758,21 @@ void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
}
// Vector instructions
-void Assembler::mfvsrd(const Register ra, const DoubleRegister rs) {
+void Assembler::mfvsrd(const Register ra, const Simd128Register rs) {
int SX = 1;
emit(MFVSRD | rs.code() * B21 | ra.code() * B16 | SX);
}
-void Assembler::mfvsrwz(const Register ra, const DoubleRegister rs) {
+void Assembler::mfvsrwz(const Register ra, const Simd128Register rs) {
int SX = 1;
emit(MFVSRWZ | rs.code() * B21 | ra.code() * B16 | SX);
}
-void Assembler::mtvsrd(const DoubleRegister rt, const Register ra) {
+void Assembler::mtvsrd(const Simd128Register rt, const Register ra) {
int TX = 1;
emit(MTVSRD | rt.code() * B21 | ra.code() * B16 | TX);
}
-void Assembler::vor(const DoubleRegister rt, const DoubleRegister ra,
- const DoubleRegister rb) {
- emit(VOR | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
-}
-
-void Assembler::vsro(const DoubleRegister rt, const DoubleRegister ra,
- const DoubleRegister rb) {
- emit(VSRO | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
-}
-
// Pseudo instructions.
void Assembler::nop(int type) {
Register reg = r0;
diff --git a/chromium/v8/src/codegen/ppc/assembler-ppc.h b/chromium/v8/src/codegen/ppc/assembler-ppc.h
index 778e94c1859..d8f1d8ef20d 100644
--- a/chromium/v8/src/codegen/ppc/assembler-ppc.h
+++ b/chromium/v8/src/codegen/ppc/assembler-ppc.h
@@ -435,9 +435,10 @@ class Assembler : public AssemblerBase {
inline void xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
DoubleRegister b) {
- int AX = ((a.code() & 0x20) >> 5) & 0x1;
- int BX = ((b.code() & 0x20) >> 5) & 0x1;
- int TX = ((t.code() & 0x20) >> 5) & 0x1;
+ // Using VR (high VSR) registers.
+ int AX = 1;
+ int BX = 1;
+ int TX = 1;
emit(instr | (t.code() & 0x1F) * B21 | (a.code() & 0x1F) * B16 |
(b.code() & 0x1F) * B11 | AX * B2 | BX * B1 | TX);
@@ -447,18 +448,59 @@ class Assembler : public AssemblerBase {
#undef DECLARE_PPC_XX3_INSTRUCTIONS
#define DECLARE_PPC_VX_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
- inline void name(const DoubleRegister rt, const DoubleRegister rb, \
+ inline void name(const Simd128Register rt, const Simd128Register rb, \
const Operand& imm) { \
vx_form(instr_name, rt, rb, imm); \
}
+#define DECLARE_PPC_VX_INSTRUCTIONS_B_FORM(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register ra, \
+ const Simd128Register rb) { \
+ vx_form(instr_name, rt, ra, rb); \
+ }
- inline void vx_form(Instr instr, DoubleRegister rt, DoubleRegister rb,
+ inline void vx_form(Instr instr, Simd128Register rt, Simd128Register rb,
const Operand& imm) {
emit(instr | rt.code() * B21 | imm.immediate() * B16 | rb.code() * B11);
}
+ inline void vx_form(Instr instr, Simd128Register rt, Simd128Register ra,
+ Simd128Register rb) {
+ emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
+ }
PPC_VX_OPCODE_A_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_A_FORM)
+ PPC_VX_OPCODE_B_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_B_FORM)
#undef DECLARE_PPC_VX_INSTRUCTIONS_A_FORM
+#undef DECLARE_PPC_VX_INSTRUCTIONS_B_FORM
+
+#define DECLARE_PPC_VA_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register ra, \
+ const Simd128Register rb, const Simd128Register rc) { \
+ va_form(instr_name, rt, ra, rb, rc); \
+ }
+
+ inline void va_form(Instr instr, Simd128Register rt, Simd128Register ra,
+ Simd128Register rb, Simd128Register rc) {
+ emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ rc.code() * B6);
+ }
+
+ PPC_VA_OPCODE_A_FORM_LIST(DECLARE_PPC_VA_INSTRUCTIONS_A_FORM)
+#undef DECLARE_PPC_VA_INSTRUCTIONS_A_FORM
+
+#define DECLARE_PPC_VC_INSTRUCTIONS(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register ra, \
+ const Simd128Register rb, const RCBit rc = LeaveRC) { \
+ vc_form(instr_name, rt, ra, rb, rc); \
+ }
+
+ inline void vc_form(Instr instr, Simd128Register rt, Simd128Register ra,
+ Simd128Register rb, int rc) {
+ emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ rc * B10);
+ }
+
+ PPC_VC_OPCODE_LIST(DECLARE_PPC_VC_INSTRUCTIONS)
+#undef DECLARE_PPC_VC_INSTRUCTIONS
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
@@ -947,13 +989,9 @@ class Assembler : public AssemblerBase {
RCBit rc = LeaveRC);
// Vector instructions
- void mfvsrd(const Register ra, const DoubleRegister r);
- void mfvsrwz(const Register ra, const DoubleRegister r);
- void mtvsrd(const DoubleRegister rt, const Register ra);
- void vor(const DoubleRegister rt, const DoubleRegister ra,
- const DoubleRegister rb);
- void vsro(const DoubleRegister rt, const DoubleRegister ra,
- const DoubleRegister rb);
+ void mfvsrd(const Register ra, const Simd128Register r);
+ void mfvsrwz(const Register ra, const Simd128Register r);
+ void mtvsrd(const Simd128Register rt, const Register ra);
// Pseudo instructions
diff --git a/chromium/v8/src/codegen/ppc/constants-ppc.h b/chromium/v8/src/codegen/ppc/constants-ppc.h
index b75c3e32576..f784bef54b8 100644
--- a/chromium/v8/src/codegen/ppc/constants-ppc.h
+++ b/chromium/v8/src/codegen/ppc/constants-ppc.h
@@ -1707,8 +1707,6 @@ using Instr = uint32_t;
V(stvewx, STVEWX, 0x7C00018E) \
/* Store Vector Indexed Last */ \
V(stvxl, STVXL, 0x7C0003CE) \
- /* Vector Minimum Signed Doubleword */ \
- V(vminsd, VMINSD, 0x100003C2) \
/* Floating Merge Even Word */ \
V(fmrgew, FMRGEW, 0xFC00078C) \
/* Floating Merge Odd Word */ \
@@ -1920,7 +1918,15 @@ using Instr = uint32_t;
/* Floating Reciprocal Square Root Estimate Single */ \
V(frsqrtes, FRSQRTES, 0xEC000034)
-#define PPC_VA_OPCODE_LIST(V) \
+#define PPC_VA_OPCODE_A_FORM_LIST(V) \
+ /* Vector Permute */ \
+ V(vperm, VPERM, 0x1000002B) \
+ /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
+ V(vmladduhm, VMLADDUHM, 0x10000022) \
+ /* Vector Select */ \
+ V(vsel, VSEL, 0x1000002A)
+
+#define PPC_VA_OPCODE_UNUSED_LIST(V) \
/* Vector Add Extended & write Carry Unsigned Quadword */ \
V(vaddecuq, VADDECUQ, 0x1000003D) \
/* Vector Add Extended Unsigned Quadword Modulo */ \
@@ -1931,8 +1937,6 @@ using Instr = uint32_t;
V(vmhaddshs, VMHADDSHS, 0x10000020) \
/* Vector Multiply-High-Round-Add Signed Halfword Saturate */ \
V(vmhraddshs, VMHRADDSHS, 0x10000021) \
- /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
- V(vmladduhm, VMLADDUHM, 0x10000022) \
/* Vector Multiply-Sum Mixed Byte Modulo */ \
V(vmsummbm, VMSUMMBM, 0x10000025) \
/* Vector Multiply-Sum Signed Halfword Modulo */ \
@@ -1947,10 +1951,6 @@ using Instr = uint32_t;
V(vmsumuhs, VMSUMUHS, 0x10000027) \
/* Vector Negative Multiply-Subtract Single-Precision */ \
V(vnmsubfp, VNMSUBFP, 0x1000002F) \
- /* Vector Permute */ \
- V(vperm, VPERM, 0x1000002B) \
- /* Vector Select */ \
- V(vsel, VSEL, 0x1000002A) \
/* Vector Shift Left Double by Octet Immediate */ \
V(vsldoi, VSLDOI, 0x1000002C) \
/* Vector Subtract Extended & write Carry Unsigned Quadword */ \
@@ -1960,6 +1960,10 @@ using Instr = uint32_t;
/* Vector Permute and Exclusive-OR */ \
V(vpermxor, VPERMXOR, 0x1000002D)
+#define PPC_VA_OPCODE_LIST(V) \
+ PPC_VA_OPCODE_A_FORM_LIST(V) \
+ PPC_VA_OPCODE_UNUSED_LIST(V)
+
#define PPC_XX1_OPCODE_LIST(V) \
/* Load VSR Scalar Doubleword Indexed */ \
V(lxsdx, LXSDX, 0x7C000498) \
@@ -2200,6 +2204,112 @@ using Instr = uint32_t;
/* Vector Splat Halfword */ \
V(vsplth, VSPLTH, 0x1000024C)
+#define PPC_VX_OPCODE_B_FORM_LIST(V) \
+ /* Vector Logical OR */ \
+ V(vor, VOR, 0x10000484) \
+ /* Vector Logical XOR */ \
+ V(vxor, VXOR, 0x100004C4) \
+ /* Vector Logical NOR */ \
+ V(vnor, VNOR, 0x10000504) \
+ /* Vector Shift Right by Octet */ \
+ V(vsro, VSRO, 0x1000044C) \
+ /* Vector Shift Left by Octet */ \
+ V(vslo, VSLO, 0x1000040C) \
+ /* Vector Add Unsigned Doubleword Modulo */ \
+ V(vaddudm, VADDUDM, 0x100000C0) \
+ /* Vector Add Unsigned Word Modulo */ \
+ V(vadduwm, VADDUWM, 0x10000080) \
+ /* Vector Add Unsigned Halfword Modulo */ \
+ V(vadduhm, VADDUHM, 0x10000040) \
+ /* Vector Add Unsigned Byte Modulo */ \
+ V(vaddubm, VADDUBM, 0x10000000) \
+ /* Vector Add Single-Precision */ \
+ V(vaddfp, VADDFP, 0x1000000A) \
+ /* Vector Subtract Single-Precision */ \
+ V(vsubfp, VSUBFP, 0x1000004A) \
+ /* Vector Subtract Unsigned Doubleword Modulo */ \
+ V(vsubudm, VSUBUDM, 0x100004C0) \
+ /* Vector Subtract Unsigned Word Modulo */ \
+ V(vsubuwm, VSUBUWM, 0x10000480) \
+ /* Vector Subtract Unsigned Halfword Modulo */ \
+ V(vsubuhm, VSUBUHM, 0x10000440) \
+ /* Vector Subtract Unsigned Byte Modulo */ \
+ V(vsububm, VSUBUBM, 0x10000400) \
+ /* Vector Multiply Unsigned Word Modulo */ \
+ V(vmuluwm, VMULUWM, 0x10000089) \
+ /* Vector Pack Unsigned Halfword Unsigned Modulo */ \
+ V(vpkuhum, VPKUHUM, 0x1000000E) \
+ /* Vector Multiply Even Unsigned Byte */ \
+ V(vmuleub, VMULEUB, 0x10000208) \
+ /* Vector Multiply Odd Unsigned Byte */ \
+ V(vmuloub, VMULOUB, 0x10000008) \
+ /* Vector Sum across Quarter Signed Halfword Saturate */ \
+ V(vsum4shs, VSUM4SHS, 0x10000648) \
+ /* Vector Pack Unsigned Word Unsigned Saturate */ \
+ V(vpkuwus, VPKUWUS, 0x100000CE) \
+ /* Vector Sum across Half Signed Word Saturate */ \
+ V(vsum2sws, VSUM2SWS, 0x10000688) \
+ /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \
+ V(vpkudum, VPKUDUM, 0x1000044E) \
+ /* Vector Maximum Signed Byte */ \
+ V(vmaxsb, VMAXSB, 0x10000102) \
+ /* Vector Maximum Unsigned Byte */ \
+ V(vmaxub, VMAXUB, 0x10000002) \
+ /* Vector Maximum Signed Doubleword */ \
+ V(vmaxsd, VMAXSD, 0x100001C2) \
+ /* Vector Maximum Unsigned Doubleword */ \
+ V(vmaxud, VMAXUD, 0x100000C2) \
+ /* Vector Maximum Signed Halfword */ \
+ V(vmaxsh, VMAXSH, 0x10000142) \
+ /* Vector Maximum Unsigned Halfword */ \
+ V(vmaxuh, VMAXUH, 0x10000042) \
+ /* Vector Maximum Signed Word */ \
+ V(vmaxsw, VMAXSW, 0x10000182) \
+ /* Vector Maximum Unsigned Word */ \
+ V(vmaxuw, VMAXUW, 0x10000082) \
+ /* Vector Minimum Signed Byte */ \
+ V(vminsb, VMINSB, 0x10000302) \
+ /* Vector Minimum Unsigned Byte */ \
+ V(vminub, VMINUB, 0x10000202) \
+ /* Vector Minimum Signed Doubleword */ \
+ V(vminsd, VMINSD, 0x100003C2) \
+ /* Vector Minimum Unsigned Doubleword */ \
+ V(vminud, VMINUD, 0x100002C2) \
+ /* Vector Minimum Signed Halfword */ \
+ V(vminsh, VMINSH, 0x10000342) \
+ /* Vector Minimum Unsigned Halfword */ \
+ V(vminuh, VMINUH, 0x10000242) \
+ /* Vector Minimum Signed Word */ \
+ V(vminsw, VMINSW, 0x10000382) \
+ /* Vector Minimum Unsigned Word */ \
+ V(vminuw, VMINUW, 0x10000282) \
+ /* Vector Shift Left Byte */ \
+ V(vslb, VSLB, 0x10000104) \
+ /* Vector Shift Left Word */ \
+ V(vslw, VSLW, 0x10000184) \
+ /* Vector Shift Left Halfword */ \
+ V(vslh, VSLH, 0x10000144) \
+ /* Vector Shift Left Doubleword */ \
+ V(vsld, VSLD, 0x100005C4) \
+ /* Vector Shift Right Byte */ \
+ V(vsrb, VSRB, 0x10000204) \
+ /* Vector Shift Right Word */ \
+ V(vsrw, VSRW, 0x10000284) \
+ /* Vector Shift Right Halfword */ \
+ V(vsrh, VSRH, 0x10000244) \
+ /* Vector Shift Right Doubleword */ \
+ V(vsrd, VSRD, 0x100006C4) \
+ /* Vector Shift Right Algebraic Byte */ \
+ V(vsrab, VSRAB, 0x10000304) \
+ /* Vector Shift Right Algebraic Word */ \
+ V(vsraw, VSRAW, 0x10000384) \
+ /* Vector Shift Right Algebraic Halfword */ \
+ V(vsrah, VSRAH, 0x10000344) \
+ /* Vector Shift Right Algebraic Doubleword */ \
+ V(vsrad, VSRAD, 0x100003C4) \
+ /* Vector Logical AND */ \
+ V(vand, VAND, 0x10000404)
+
#define PPC_VX_OPCODE_UNUSED_LIST(V) \
/* Decimal Add Modulo */ \
V(bcdadd, BCDADD, 0xF0000400) \
@@ -2213,32 +2323,20 @@ using Instr = uint32_t;
V(vaddcuq, VADDCUQ, 0x10000140) \
/* Vector Add and Write Carry-Out Unsigned Word */ \
V(vaddcuw, VADDCUW, 0x10000180) \
- /* Vector Add Single-Precision */ \
- V(vaddfp, VADDFP, 0x1000000A) \
/* Vector Add Signed Byte Saturate */ \
V(vaddsbs, VADDSBS, 0x10000300) \
/* Vector Add Signed Halfword Saturate */ \
V(vaddshs, VADDSHS, 0x10000340) \
/* Vector Add Signed Word Saturate */ \
V(vaddsws, VADDSWS, 0x10000380) \
- /* Vector Add Unsigned Byte Modulo */ \
- V(vaddubm, VADDUBM, 0x10000000) \
/* Vector Add Unsigned Byte Saturate */ \
V(vaddubs, VADDUBS, 0x10000200) \
- /* Vector Add Unsigned Doubleword Modulo */ \
- V(vaddudm, VADDUDM, 0x100000C0) \
- /* Vector Add Unsigned Halfword Modulo */ \
- V(vadduhm, VADDUHM, 0x10000040) \
/* Vector Add Unsigned Halfword Saturate */ \
V(vadduhs, VADDUHS, 0x10000240) \
/* Vector Add Unsigned Quadword Modulo */ \
V(vadduqm, VADDUQM, 0x10000100) \
- /* Vector Add Unsigned Word Modulo */ \
- V(vadduwm, VADDUWM, 0x10000080) \
/* Vector Add Unsigned Word Saturate */ \
V(vadduws, VADDUWS, 0x10000280) \
- /* Vector Logical AND */ \
- V(vand, VAND, 0x10000404) \
/* Vector Logical AND with Complement */ \
V(vandc, VANDC, 0x10000444) \
/* Vector Average Signed Byte */ \
@@ -2283,38 +2381,8 @@ using Instr = uint32_t;
V(vlogefp, VLOGEFP, 0x100001CA) \
/* Vector Maximum Single-Precision */ \
V(vmaxfp, VMAXFP, 0x1000040A) \
- /* Vector Maximum Signed Byte */ \
- V(vmaxsb, VMAXSB, 0x10000102) \
- /* Vector Maximum Signed Doubleword */ \
- V(vmaxsd, VMAXSD, 0x100001C2) \
- /* Vector Maximum Signed Halfword */ \
- V(vmaxsh, VMAXSH, 0x10000142) \
- /* Vector Maximum Signed Word */ \
- V(vmaxsw, VMAXSW, 0x10000182) \
- /* Vector Maximum Unsigned Byte */ \
- V(vmaxub, VMAXUB, 0x10000002) \
- /* Vector Maximum Unsigned Doubleword */ \
- V(vmaxud, VMAXUD, 0x100000C2) \
- /* Vector Maximum Unsigned Halfword */ \
- V(vmaxuh, VMAXUH, 0x10000042) \
- /* Vector Maximum Unsigned Word */ \
- V(vmaxuw, VMAXUW, 0x10000082) \
/* Vector Minimum Single-Precision */ \
V(vminfp, VMINFP, 0x1000044A) \
- /* Vector Minimum Signed Byte */ \
- V(vminsb, VMINSB, 0x10000302) \
- /* Vector Minimum Signed Halfword */ \
- V(vminsh, VMINSH, 0x10000342) \
- /* Vector Minimum Signed Word */ \
- V(vminsw, VMINSW, 0x10000382) \
- /* Vector Minimum Unsigned Byte */ \
- V(vminub, VMINUB, 0x10000202) \
- /* Vector Minimum Unsigned Doubleword */ \
- V(vminud, VMINUD, 0x100002C2) \
- /* Vector Minimum Unsigned Halfword */ \
- V(vminuh, VMINUH, 0x10000242) \
- /* Vector Minimum Unsigned Word */ \
- V(vminuw, VMINUW, 0x10000282) \
/* Vector Merge High Byte */ \
V(vmrghb, VMRGHB, 0x1000000C) \
/* Vector Merge High Halfword */ \
@@ -2333,8 +2401,6 @@ using Instr = uint32_t;
V(vmulesh, VMULESH, 0x10000348) \
/* Vector Multiply Even Signed Word */ \
V(vmulesw, VMULESW, 0x10000388) \
- /* Vector Multiply Even Unsigned Byte */ \
- V(vmuleub, VMULEUB, 0x10000208) \
/* Vector Multiply Even Unsigned Halfword */ \
V(vmuleuh, VMULEUH, 0x10000248) \
/* Vector Multiply Even Unsigned Word */ \
@@ -2345,20 +2411,12 @@ using Instr = uint32_t;
V(vmulosh, VMULOSH, 0x10000148) \
/* Vector Multiply Odd Signed Word */ \
V(vmulosw, VMULOSW, 0x10000188) \
- /* Vector Multiply Odd Unsigned Byte */ \
- V(vmuloub, VMULOUB, 0x10000008) \
/* Vector Multiply Odd Unsigned Halfword */ \
V(vmulouh, VMULOUH, 0x10000048) \
/* Vector Multiply Odd Unsigned Word */ \
V(vmulouw, VMULOUW, 0x10000088) \
- /* Vector Multiply Unsigned Word Modulo */ \
- V(vmuluwm, VMULUWM, 0x10000089) \
/* Vector NAND */ \
V(vnand, VNAND, 0x10000584) \
- /* Vector Logical NOR */ \
- V(vnor, VNOR, 0x10000504) \
- /* Vector Logical OR */ \
- V(vor, VOR, 0x10000484) \
/* Vector OR with Complement */ \
V(vorc, VORC, 0x10000544) \
/* Vector Pack Pixel */ \
@@ -2375,18 +2433,12 @@ using Instr = uint32_t;
V(vpkswss, VPKSWSS, 0x100001CE) \
/* Vector Pack Signed Word Unsigned Saturate */ \
V(vpkswus, VPKSWUS, 0x1000014E) \
- /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \
- V(vpkudum, VPKUDUM, 0x1000044E) \
/* Vector Pack Unsigned Doubleword Unsigned Saturate */ \
V(vpkudus, VPKUDUS, 0x100004CE) \
- /* Vector Pack Unsigned Halfword Unsigned Modulo */ \
- V(vpkuhum, VPKUHUM, 0x1000000E) \
/* Vector Pack Unsigned Halfword Unsigned Saturate */ \
V(vpkuhus, VPKUHUS, 0x1000008E) \
/* Vector Pack Unsigned Word Unsigned Modulo */ \
V(vpkuwum, VPKUWUM, 0x1000004E) \
- /* Vector Pack Unsigned Word Unsigned Saturate */ \
- V(vpkuwus, VPKUWUS, 0x100000CE) \
/* Vector Polynomial Multiply-Sum Byte */ \
V(vpmsumb, VPMSUMB, 0x10000408) \
/* Vector Polynomial Multiply-Sum Doubleword */ \
@@ -2425,16 +2477,6 @@ using Instr = uint32_t;
V(vrsqrtefp, VRSQRTEFP, 0x1000014A) \
/* Vector Shift Left */ \
V(vsl, VSL, 0x100001C4) \
- /* Vector Shift Left Byte */ \
- V(vslb, VSLB, 0x10000104) \
- /* Vector Shift Left Doubleword */ \
- V(vsld, VSLD, 0x100005C4) \
- /* Vector Shift Left Halfword */ \
- V(vslh, VSLH, 0x10000144) \
- /* Vector Shift Left by Octet */ \
- V(vslo, VSLO, 0x1000040C) \
- /* Vector Shift Left Word */ \
- V(vslw, VSLW, 0x10000184) \
/* Vector Splat Immediate Signed Byte */ \
V(vspltisb, VSPLTISB, 0x1000030C) \
/* Vector Splat Immediate Signed Halfword */ \
@@ -2443,58 +2485,26 @@ using Instr = uint32_t;
V(vspltisw, VSPLTISW, 0x1000038C) \
/* Vector Shift Right */ \
V(vsr, VSR, 0x100002C4) \
- /* Vector Shift Right Algebraic Byte */ \
- V(vsrab, VSRAB, 0x10000304) \
- /* Vector Shift Right Algebraic Doubleword */ \
- V(vsrad, VSRAD, 0x100003C4) \
- /* Vector Shift Right Algebraic Halfword */ \
- V(vsrah, VSRAH, 0x10000344) \
- /* Vector Shift Right Algebraic Word */ \
- V(vsraw, VSRAW, 0x10000384) \
- /* Vector Shift Right Byte */ \
- V(vsrb, VSRB, 0x10000204) \
- /* Vector Shift Right Doubleword */ \
- V(vsrd, VSRD, 0x100006C4) \
- /* Vector Shift Right Halfword */ \
- V(vsrh, VSRH, 0x10000244) \
- /* Vector Shift Right by Octet */ \
- V(vsro, VSRO, 0x1000044C) \
- /* Vector Shift Right Word */ \
- V(vsrw, VSRW, 0x10000284) \
/* Vector Subtract & write Carry Unsigned Quadword */ \
V(vsubcuq, VSUBCUQ, 0x10000540) \
/* Vector Subtract and Write Carry-Out Unsigned Word */ \
V(vsubcuw, VSUBCUW, 0x10000580) \
- /* Vector Subtract Single-Precision */ \
- V(vsubfp, VSUBFP, 0x1000004A) \
/* Vector Subtract Signed Byte Saturate */ \
V(vsubsbs, VSUBSBS, 0x10000700) \
/* Vector Subtract Signed Halfword Saturate */ \
V(vsubshs, VSUBSHS, 0x10000740) \
/* Vector Subtract Signed Word Saturate */ \
V(vsubsws, VSUBSWS, 0x10000780) \
- /* Vector Subtract Unsigned Byte Modulo */ \
- V(vsububm, VSUBUBM, 0x10000400) \
/* Vector Subtract Unsigned Byte Saturate */ \
V(vsububs, VSUBUBS, 0x10000600) \
- /* Vector Subtract Unsigned Doubleword Modulo */ \
- V(vsubudm, VSUBUDM, 0x100004C0) \
- /* Vector Subtract Unsigned Halfword Modulo */ \
- V(vsubuhm, VSUBUHM, 0x10000440) \
/* Vector Subtract Unsigned Halfword Saturate */ \
V(vsubuhs, VSUBUHS, 0x10000640) \
/* Vector Subtract Unsigned Quadword Modulo */ \
V(vsubuqm, VSUBUQM, 0x10000500) \
- /* Vector Subtract Unsigned Word Modulo */ \
- V(vsubuwm, VSUBUWM, 0x10000480) \
/* Vector Subtract Unsigned Word Saturate */ \
V(vsubuws, VSUBUWS, 0x10000680) \
- /* Vector Sum across Half Signed Word Saturate */ \
- V(vsum2sws, VSUM2SWS, 0x10000688) \
/* Vector Sum across Quarter Signed Byte Saturate */ \
V(vsum4sbs, VSUM4SBS, 0x10000708) \
- /* Vector Sum across Quarter Signed Halfword Saturate */ \
- V(vsum4shs, VSUM4SHS, 0x10000648) \
/* Vector Sum across Quarter Unsigned Byte Saturate */ \
V(vsum4bus, VSUM4BUS, 0x10000608) \
/* Vector Sum across Signed Word Saturate */ \
@@ -2515,8 +2525,6 @@ using Instr = uint32_t;
V(vupklsh, VUPKLSH, 0x100002CE) \
/* Vector Unpack Low Signed Word */ \
V(vupklsw, VUPKLSW, 0x100006CE) \
- /* Vector Logical XOR */ \
- V(vxor, VXOR, 0x100004C4) \
/* Vector AES Cipher */ \
V(vcipher, VCIPHER, 0x10000508) \
/* Vector AES Cipher Last */ \
@@ -2538,6 +2546,7 @@ using Instr = uint32_t;
#define PPC_VX_OPCODE_LIST(V) \
PPC_VX_OPCODE_A_FORM_LIST(V) \
+ PPC_VX_OPCODE_B_FORM_LIST(V) \
PPC_VX_OPCODE_UNUSED_LIST(V)
#define PPC_XS_OPCODE_LIST(V) \
diff --git a/chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc b/chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc
index cd0ab1a3281..65f574d1b30 100644
--- a/chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc
+++ b/chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc
@@ -191,11 +191,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3};
@@ -295,6 +290,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 3cf819f1028..14ed9682275 100644
--- a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -2099,7 +2099,7 @@ void TurboAssembler::CheckPageFlag(
int mask, Condition cc, Label* condition_met) {
DCHECK(cc == ne || cc == eq);
ClearRightImm(scratch, object, Operand(kPageSizeBits));
- LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
mov(r0, Operand(mask));
and_(r0, scratch, r0, SetRC);
diff --git a/chromium/v8/src/codegen/register.h b/chromium/v8/src/codegen/register.h
index bf499668bb1..2dcf0fbe8fd 100644
--- a/chromium/v8/src/codegen/register.h
+++ b/chromium/v8/src/codegen/register.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_REGISTER_H_
#define V8_CODEGEN_REGISTER_H_
+#include "src/base/bounds.h"
#include "src/codegen/reglist.h"
namespace v8 {
@@ -32,10 +33,7 @@ class RegisterBase {
static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
static constexpr SubType from_code(int code) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_LE(0, code);
- DCHECK_GT(kNumRegisters, code);
-#endif
+ CONSTEXPR_DCHECK(base::IsInRange(code, 0, kNumRegisters - 1));
return SubType{code};
}
@@ -47,9 +45,7 @@ class RegisterBase {
constexpr bool is_valid() const { return reg_code_ != kCode_no_reg; }
constexpr int code() const {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(is_valid());
-#endif
+ CONSTEXPR_DCHECK(is_valid());
return reg_code_;
}
diff --git a/chromium/v8/src/codegen/reloc-info.h b/chromium/v8/src/codegen/reloc-info.h
index 50ce001103e..a4ea9b1ee90 100644
--- a/chromium/v8/src/codegen/reloc-info.h
+++ b/chromium/v8/src/codegen/reloc-info.h
@@ -54,6 +54,8 @@ class RelocInfo {
// Please note the order is important (see IsRealRelocMode, IsGCRelocMode,
// and IsShareableRelocMode predicates below).
+ NONE, // Never recorded value. Most common one, hence value 0.
+
CODE_TARGET,
RELATIVE_CODE_TARGET, // LAST_CODE_TARGET_MODE
COMPRESSED_EMBEDDED_OBJECT,
@@ -89,7 +91,6 @@ class RelocInfo {
// Pseudo-types
NUMBER_OF_MODES,
- NONE, // never recorded value
LAST_CODE_TARGET_MODE = RELATIVE_CODE_TARGET,
FIRST_REAL_RELOC_MODE = CODE_TARGET,
@@ -123,10 +124,8 @@ class RelocInfo {
return mode <= LAST_GCED_ENUM;
}
static constexpr bool IsShareableRelocMode(Mode mode) {
- static_assert(RelocInfo::NONE >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE,
- "Users of this function rely on NONE being a sharable "
- "relocation mode.");
- return mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE;
+ return mode == RelocInfo::NONE ||
+ mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE;
}
static constexpr bool IsCodeTarget(Mode mode) { return mode == CODE_TARGET; }
static constexpr bool IsCodeTargetMode(Mode mode) {
diff --git a/chromium/v8/src/codegen/s390/constants-s390.h b/chromium/v8/src/codegen/s390/constants-s390.h
index 6cd5e4d9faf..5c524350518 100644
--- a/chromium/v8/src/codegen/s390/constants-s390.h
+++ b/chromium/v8/src/codegen/s390/constants-s390.h
@@ -567,11 +567,12 @@ using SixByteInstr = uint64_t;
V(va, VA, 0xE7F3) /* type = VRR_C VECTOR ADD */ \
V(vscbi, VSCBI, \
0xE7F5) /* type = VRR_C VECTOR SUBTRACT COMPUTE BORROW INDICATION */ \
- V(vs, VS, 0xE7F7) /* type = VRR_C VECTOR SUBTRACT */ \
- V(vmnl, VMNL, 0xE7FC) /* type = VRR_C VECTOR MINIMUM LOGICAL */ \
- V(vmxl, VMXL, 0xE7FD) /* type = VRR_C VECTOR MAXIMUM LOGICAL */ \
- V(vmn, VMN, 0xE7FE) /* type = VRR_C VECTOR MINIMUM */ \
- V(vmx, VMX, 0xE7FF) /* type = VRR_C VECTOR MAXIMUM */
+ V(vs, VS, 0xE7F7) /* type = VRR_C VECTOR SUBTRACT */ \
+ V(vmnl, VMNL, 0xE7FC) /* type = VRR_C VECTOR MINIMUM LOGICAL */ \
+ V(vmxl, VMXL, 0xE7FD) /* type = VRR_C VECTOR MAXIMUM LOGICAL */ \
+ V(vmn, VMN, 0xE7FE) /* type = VRR_C VECTOR MINIMUM */ \
+ V(vmx, VMX, 0xE7FF) /* type = VRR_C VECTOR MAXIMUM */ \
+ V(vbperm, VBPERM, 0xE785) /* type = VRR_C VECTOR BIT PERMUTE */
#define S390_VRI_A_OPCODE_LIST(V) \
V(vleib, VLEIB, 0xE740) /* type = VRI_A VECTOR LOAD ELEMENT IMMEDIATE (8) */ \
diff --git a/chromium/v8/src/codegen/s390/interface-descriptors-s390.cc b/chromium/v8/src/codegen/s390/interface-descriptors-s390.cc
index 8e0e9a4cf54..b23ecb7289b 100644
--- a/chromium/v8/src/codegen/s390/interface-descriptors-s390.cc
+++ b/chromium/v8/src/codegen/s390/interface-descriptors-s390.cc
@@ -191,11 +191,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2};
@@ -295,6 +290,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
index 7e7d1434c44..193f05929c6 100644
--- a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -2024,10 +2024,10 @@ void TurboAssembler::CheckPageFlag(
// Reverse the byte_offset if emulating on little endian platform
byte_offset = kSystemPointerSize - byte_offset - 1;
#endif
- tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
+ tm(MemOperand(scratch, BasicMemoryChunk::kFlagsOffset + byte_offset),
Operand(shifted_mask));
} else {
- LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
AndP(r0, scratch, Operand(mask));
}
// Should be okay to remove rc
diff --git a/chromium/v8/src/codegen/safepoint-table.cc b/chromium/v8/src/codegen/safepoint-table.cc
index 962b1ea17f8..e50a2fada01 100644
--- a/chromium/v8/src/codegen/safepoint-table.cc
+++ b/chromium/v8/src/codegen/safepoint-table.cc
@@ -90,7 +90,7 @@ void SafepointTable::PrintBits(std::ostream& os, // NOLINT
Safepoint SafepointTableBuilder::DefineSafepoint(
Assembler* assembler, Safepoint::DeoptMode deopt_mode) {
deoptimization_info_.push_back(
- DeoptimizationInfo(zone_, assembler->pc_offset()));
+ DeoptimizationInfo(zone_, assembler->pc_offset_for_safepoint()));
DeoptimizationInfo& new_info = deoptimization_info_.back();
return Safepoint(new_info.indexes);
}
diff --git a/chromium/v8/src/codegen/source-position-table.cc b/chromium/v8/src/codegen/source-position-table.cc
index 0f03867331e..a07f76e4d10 100644
--- a/chromium/v8/src/codegen/source-position-table.cc
+++ b/chromium/v8/src/codegen/source-position-table.cc
@@ -49,10 +49,10 @@ void SubtractFromEntry(PositionTableEntry* value,
// Helper: Encode an integer.
template <typename T>
-void EncodeInt(std::vector<byte>* bytes, T value) {
+void EncodeInt(ZoneVector<byte>* bytes, T value) {
using unsigned_type = typename std::make_unsigned<T>::type;
// Zig-zag encoding.
- static const int kShift = sizeof(T) * kBitsPerByte - 1;
+ static constexpr int kShift = sizeof(T) * kBitsPerByte - 1;
value = ((static_cast<unsigned_type>(value) << 1) ^ (value >> kShift));
DCHECK_GE(value, 0);
unsigned_type encoded = static_cast<unsigned_type>(value);
@@ -67,7 +67,7 @@ void EncodeInt(std::vector<byte>* bytes, T value) {
}
// Encode a PositionTableEntry.
-void EncodeEntry(std::vector<byte>* bytes, const PositionTableEntry& entry) {
+void EncodeEntry(ZoneVector<byte>* bytes, const PositionTableEntry& entry) {
// We only accept ascending code offsets.
DCHECK_GE(entry.code_offset, 0);
// Since code_offset is not negative, we use sign to encode is_statement.
@@ -115,7 +115,7 @@ Vector<const byte> VectorFromByteArray(ByteArray byte_array) {
}
#ifdef ENABLE_SLOW_DCHECKS
-void CheckTableEquals(const std::vector<PositionTableEntry>& raw_entries,
+void CheckTableEquals(const ZoneVector<PositionTableEntry>& raw_entries,
SourcePositionTableIterator* encoded) {
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
@@ -133,8 +133,14 @@ void CheckTableEquals(const std::vector<PositionTableEntry>& raw_entries,
} // namespace
SourcePositionTableBuilder::SourcePositionTableBuilder(
- SourcePositionTableBuilder::RecordingMode mode)
- : mode_(mode), previous_() {}
+ Zone* zone, SourcePositionTableBuilder::RecordingMode mode)
+ : mode_(mode),
+ bytes_(zone),
+#ifdef ENABLE_SLOW_DCHECKS
+ raw_entries_(zone),
+#endif
+ previous_() {
+}
void SourcePositionTableBuilder::AddPosition(size_t code_offset,
SourcePosition source_position,
diff --git a/chromium/v8/src/codegen/source-position-table.h b/chromium/v8/src/codegen/source-position-table.h
index 024eca54fa5..a42c6a44a3c 100644
--- a/chromium/v8/src/codegen/source-position-table.h
+++ b/chromium/v8/src/codegen/source-position-table.h
@@ -49,7 +49,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
};
explicit SourcePositionTableBuilder(
- RecordingMode mode = RECORD_SOURCE_POSITIONS);
+ Zone* zone, RecordingMode mode = RECORD_SOURCE_POSITIONS);
void AddPosition(size_t code_offset, SourcePosition source_position,
bool is_statement);
@@ -66,9 +66,9 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
void AddEntry(const PositionTableEntry& entry);
RecordingMode mode_;
- std::vector<byte> bytes_;
+ ZoneVector<byte> bytes_;
#ifdef ENABLE_SLOW_DCHECKS
- std::vector<PositionTableEntry> raw_entries_;
+ ZoneVector<PositionTableEntry> raw_entries_;
#endif
PositionTableEntry previous_; // Previously written entry, to compute delta.
};
diff --git a/chromium/v8/src/codegen/x64/assembler-x64.cc b/chromium/v8/src/codegen/x64/assembler-x64.cc
index 287de802be4..c1e2ec9808d 100644
--- a/chromium/v8/src/codegen/x64/assembler-x64.cc
+++ b/chromium/v8/src/codegen/x64/assembler-x64.cc
@@ -132,168 +132,53 @@ uint32_t RelocInfo::wasm_call_tag() const {
// -----------------------------------------------------------------------------
// Implementation of Operand
-namespace {
-class OperandBuilder {
- public:
- OperandBuilder(Register base, int32_t disp) {
- if (base == rsp || base == r12) {
- // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
- set_sib(times_1, rsp, base);
- }
-
- if (disp == 0 && base != rbp && base != r13) {
- set_modrm(0, base);
- } else if (is_int8(disp)) {
- set_modrm(1, base);
- set_disp8(disp);
- } else {
- set_modrm(2, base);
- set_disp32(disp);
- }
- }
-
- OperandBuilder(Register base, Register index, ScaleFactor scale,
- int32_t disp) {
- DCHECK(index != rsp);
- set_sib(scale, index, base);
- if (disp == 0 && base != rbp && base != r13) {
- // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
- // possibly set by set_sib.
- set_modrm(0, rsp);
- } else if (is_int8(disp)) {
- set_modrm(1, rsp);
- set_disp8(disp);
- } else {
- set_modrm(2, rsp);
- set_disp32(disp);
- }
- }
-
- OperandBuilder(Register index, ScaleFactor scale, int32_t disp) {
- DCHECK(index != rsp);
- set_modrm(0, rsp);
- set_sib(scale, index, rbp);
- set_disp32(disp);
- }
-
- OperandBuilder(Label* label, int addend) {
- data_.addend = addend;
- DCHECK_NOT_NULL(label);
- DCHECK(addend == 0 || (is_int8(addend) && label->is_bound()));
- set_modrm(0, rbp);
- set_disp64(reinterpret_cast<intptr_t>(label));
+Operand::Operand(Operand operand, int32_t offset) {
+ DCHECK_GE(operand.data().len, 1);
+ // Operand encodes REX ModR/M [SIB] [Disp].
+ byte modrm = operand.data().buf[0];
+ DCHECK_LT(modrm, 0xC0); // Disallow mode 3 (register target).
+ bool has_sib = ((modrm & 0x07) == 0x04);
+ byte mode = modrm & 0xC0;
+ int disp_offset = has_sib ? 2 : 1;
+ int base_reg = (has_sib ? operand.data().buf[1] : modrm) & 0x07;
+ // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
+ // displacement.
+ bool is_baseless = (mode == 0) && (base_reg == 0x05); // No base or RIP base.
+ int32_t disp_value = 0;
+ if (mode == 0x80 || is_baseless) {
+ // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
+ disp_value = ReadUnalignedValue<int32_t>(
+ reinterpret_cast<Address>(&operand.data().buf[disp_offset]));
+ } else if (mode == 0x40) {
+ // Mode 1: Byte displacement.
+ disp_value = static_cast<signed char>(operand.data().buf[disp_offset]);
}
- OperandBuilder(Operand operand, int32_t offset) {
- DCHECK_GE(operand.data().len, 1);
- // Operand encodes REX ModR/M [SIB] [Disp].
- byte modrm = operand.data().buf[0];
- DCHECK_LT(modrm, 0xC0); // Disallow mode 3 (register target).
- bool has_sib = ((modrm & 0x07) == 0x04);
- byte mode = modrm & 0xC0;
- int disp_offset = has_sib ? 2 : 1;
- int base_reg = (has_sib ? operand.data().buf[1] : modrm) & 0x07;
- // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
- // displacement.
- bool is_baseless =
- (mode == 0) && (base_reg == 0x05); // No base or RIP base.
- int32_t disp_value = 0;
- if (mode == 0x80 || is_baseless) {
- // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
- disp_value = ReadUnalignedValue<int32_t>(
- reinterpret_cast<Address>(&operand.data().buf[disp_offset]));
- } else if (mode == 0x40) {
- // Mode 1: Byte displacement.
- disp_value = static_cast<signed char>(operand.data().buf[disp_offset]);
- }
-
- // Write new operand with same registers, but with modified displacement.
- DCHECK(offset >= 0 ? disp_value + offset > disp_value
- : disp_value + offset < disp_value); // No overflow.
- disp_value += offset;
- data_.rex = operand.data().rex;
- if (!is_int8(disp_value) || is_baseless) {
- // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
- data_.buf[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
- data_.len = disp_offset + 4;
- WriteUnalignedValue(reinterpret_cast<Address>(&data_.buf[disp_offset]),
- disp_value);
- } else if (disp_value != 0 || (base_reg == 0x05)) {
- // Need 8 bits of displacement.
- data_.buf[0] = (modrm & 0x3F) | 0x40; // Mode 1.
- data_.len = disp_offset + 1;
- data_.buf[disp_offset] = static_cast<byte>(disp_value);
- } else {
- // Need no displacement.
- data_.buf[0] = (modrm & 0x3F); // Mode 0.
- data_.len = disp_offset;
- }
- if (has_sib) {
- data_.buf[1] = operand.data().buf[1];
- }
- }
-
- void set_modrm(int mod, Register rm_reg) {
- DCHECK(is_uint2(mod));
- data_.buf[0] = mod << 6 | rm_reg.low_bits();
- // Set REX.B to the high bit of rm.code().
- data_.rex |= rm_reg.high_bit();
- }
-
- void set_sib(ScaleFactor scale, Register index, Register base) {
- DCHECK_EQ(data_.len, 1);
- DCHECK(is_uint2(scale));
- // Use SIB with no index register only for base rsp or r12. Otherwise we
- // would skip the SIB byte entirely.
- DCHECK(index != rsp || base == rsp || base == r12);
- data_.buf[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
- data_.rex |= index.high_bit() << 1 | base.high_bit();
- data_.len = 2;
- }
-
- void set_disp8(int disp) {
- DCHECK(is_int8(disp));
- DCHECK(data_.len == 1 || data_.len == 2);
- int8_t* p = reinterpret_cast<int8_t*>(&data_.buf[data_.len]);
- *p = disp;
- data_.len += sizeof(int8_t);
- }
-
- void set_disp32(int disp) {
- DCHECK(data_.len == 1 || data_.len == 2);
- Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
- WriteUnalignedValue(p, disp);
- data_.len += sizeof(int32_t);
+ // Write new operand with same registers, but with modified displacement.
+ DCHECK(offset >= 0 ? disp_value + offset > disp_value
+ : disp_value + offset < disp_value); // No overflow.
+ disp_value += offset;
+ data_.rex = operand.data().rex;
+ if (!is_int8(disp_value) || is_baseless) {
+ // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
+ data_.buf[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
+ data_.len = disp_offset + 4;
+ WriteUnalignedValue(reinterpret_cast<Address>(&data_.buf[disp_offset]),
+ disp_value);
+ } else if (disp_value != 0 || (base_reg == 0x05)) {
+ // Need 8 bits of displacement.
+ data_.buf[0] = (modrm & 0x3F) | 0x40; // Mode 1.
+ data_.len = disp_offset + 1;
+ data_.buf[disp_offset] = static_cast<byte>(disp_value);
+ } else {
+ // Need no displacement.
+ data_.buf[0] = (modrm & 0x3F); // Mode 0.
+ data_.len = disp_offset;
}
-
- void set_disp64(int64_t disp) {
- DCHECK_EQ(1, data_.len);
- Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
- WriteUnalignedValue(p, disp);
- data_.len += sizeof(disp);
+ if (has_sib) {
+ data_.buf[1] = operand.data().buf[1];
}
-
- const Operand::Data& data() const { return data_; }
-
- private:
- Operand::Data data_;
-};
-} // namespace
-
-Operand::Operand(Register base, int32_t disp)
- : data_(OperandBuilder(base, disp).data()) {}
-
-Operand::Operand(Register base, Register index, ScaleFactor scale, int32_t disp)
- : data_(OperandBuilder(base, index, scale, disp).data()) {}
-
-Operand::Operand(Register index, ScaleFactor scale, int32_t disp)
- : data_(OperandBuilder(index, scale, disp).data()) {}
-
-Operand::Operand(Label* label, int addend)
- : data_(OperandBuilder(label, addend).data()) {}
-
-Operand::Operand(Operand operand, int32_t offset)
- : data_(OperandBuilder(operand, offset).data()) {}
+}
bool Operand::AddressUsesRegister(Register reg) const {
int code = reg.code();
@@ -3424,6 +3309,20 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
emit(static_cast<byte>(mode) | 0x8);
}
+void Assembler::roundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
+ sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x08);
+ // Mask precision exception.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
+void Assembler::roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
+ sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x09);
+ // Mask precision exception.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
void Assembler::movmskpd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3443,8 +3342,8 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
void Assembler::pmovmskb(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
emit(0x66);
+ emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xD7);
emit_sse_operand(dst, src);
diff --git a/chromium/v8/src/codegen/x64/assembler-x64.h b/chromium/v8/src/codegen/x64/assembler-x64.h
index 24eb9765782..bf876945265 100644
--- a/chromium/v8/src/codegen/x64/assembler-x64.h
+++ b/chromium/v8/src/codegen/x64/assembler-x64.h
@@ -173,13 +173,48 @@ class V8_EXPORT_PRIVATE Operand {
};
// [base + disp/r]
- Operand(Register base, int32_t disp);
+ V8_INLINE Operand(Register base, int32_t disp) {
+ if (base == rsp || base == r12) {
+ // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
+ set_sib(times_1, rsp, base);
+ }
+
+ if (disp == 0 && base != rbp && base != r13) {
+ set_modrm(0, base);
+ } else if (is_int8(disp)) {
+ set_modrm(1, base);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, base);
+ set_disp32(disp);
+ }
+ }
// [base + index*scale + disp/r]
- Operand(Register base, Register index, ScaleFactor scale, int32_t disp);
+ V8_INLINE Operand(Register base, Register index, ScaleFactor scale,
+ int32_t disp) {
+ DCHECK(index != rsp);
+ set_sib(scale, index, base);
+ if (disp == 0 && base != rbp && base != r13) {
+ // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
+ // possibly set by set_sib.
+ set_modrm(0, rsp);
+ } else if (is_int8(disp)) {
+ set_modrm(1, rsp);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, rsp);
+ set_disp32(disp);
+ }
+ }
// [index*scale + disp/r]
- Operand(Register index, ScaleFactor scale, int32_t disp);
+ V8_INLINE Operand(Register index, ScaleFactor scale, int32_t disp) {
+ DCHECK(index != rsp);
+ set_modrm(0, rsp);
+ set_sib(scale, index, rbp);
+ set_disp32(disp);
+ }
// Offset from existing memory operand.
// Offset is added to existing displacement as 32-bit signed values and
@@ -187,25 +222,64 @@ class V8_EXPORT_PRIVATE Operand {
Operand(Operand base, int32_t offset);
// [rip + disp/r]
- explicit Operand(Label* label, int addend = 0);
+ V8_INLINE explicit Operand(Label* label, int addend = 0) {
+ data_.addend = addend;
+ DCHECK_NOT_NULL(label);
+ DCHECK(addend == 0 || (is_int8(addend) && label->is_bound()));
+ set_modrm(0, rbp);
+ set_disp64(reinterpret_cast<intptr_t>(label));
+ }
Operand(const Operand&) V8_NOEXCEPT = default;
+ const Data& data() const { return data_; }
+
// Checks whether either base or index register is the given register.
// Does not check the "reg" part of the Operand.
bool AddressUsesRegister(Register reg) const;
- // Queries related to the size of the generated instruction.
- // Whether the generated instruction will have a REX prefix.
- bool requires_rex() const { return data_.rex != 0; }
- // Size of the ModR/M, SIB and displacement parts of the generated
- // instruction.
- int operand_size() const { return data_.len; }
+ private:
+ V8_INLINE void set_modrm(int mod, Register rm_reg) {
+ DCHECK(is_uint2(mod));
+ data_.buf[0] = mod << 6 | rm_reg.low_bits();
+ // Set REX.B to the high bit of rm.code().
+ data_.rex |= rm_reg.high_bit();
+ }
- const Data& data() const { return data_; }
+ V8_INLINE void set_sib(ScaleFactor scale, Register index, Register base) {
+ DCHECK_EQ(data_.len, 1);
+ DCHECK(is_uint2(scale));
+ // Use SIB with no index register only for base rsp or r12. Otherwise we
+ // would skip the SIB byte entirely.
+ DCHECK(index != rsp || base == rsp || base == r12);
+ data_.buf[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
+ data_.rex |= index.high_bit() << 1 | base.high_bit();
+ data_.len = 2;
+ }
- private:
- const Data data_;
+ V8_INLINE void set_disp8(int disp) {
+ DCHECK(is_int8(disp));
+ DCHECK(data_.len == 1 || data_.len == 2);
+ int8_t* p = reinterpret_cast<int8_t*>(&data_.buf[data_.len]);
+ *p = disp;
+ data_.len += sizeof(int8_t);
+ }
+
+ V8_INLINE void set_disp32(int disp) {
+ DCHECK(data_.len == 1 || data_.len == 2);
+ Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
+ WriteUnalignedValue(p, disp);
+ data_.len += sizeof(int32_t);
+ }
+
+ V8_INLINE void set_disp64(int64_t disp) {
+ DCHECK_EQ(1, data_.len);
+ Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
+ WriteUnalignedValue(p, disp);
+ data_.len += sizeof(disp);
+ }
+
+ Data data_;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
@@ -1141,6 +1215,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void roundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
void cmpps(XMMRegister dst, Operand src, int8_t cmp);
@@ -1358,6 +1434,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
+ void vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ vinstr(0x08, dst, xmm0, src, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
+ void vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ vinstr(0x09, dst, xmm0, src, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
diff --git a/chromium/v8/src/codegen/x64/interface-descriptors-x64.cc b/chromium/v8/src/codegen/x64/interface-descriptors-x64.cc
index 6b9754efca1..31b2b67a4fb 100644
--- a/chromium/v8/src/codegen/x64/interface-descriptors-x64.cc
+++ b/chromium/v8/src/codegen/x64/interface-descriptors-x64.cc
@@ -129,6 +129,16 @@ void CallWithSpreadDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rbx : the object to spread
+ // rdx : the feedback slot
+ Register registers[] = {rdi, rax, rbx, rdx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rdi : the target to call
@@ -137,6 +147,16 @@ void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rdi : the target to call
+ // rbx : the arguments list
+ // rdx : the feedback slot
+ // rax : the feedback vector
+ Register registers[] = {rdi, rbx, rdx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments (on the stack, not including receiver)
@@ -168,6 +188,16 @@ void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rdx : the new target
+ // rbx : the feedback slot
+ Register registers[] = {rdi, rdx, rax, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rdi : the target to call
@@ -177,6 +207,16 @@ void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rdi : the target to call
+ // rdx : the new target
+ // rbx : the arguments list
+ // rax : the feedback slot
+ Register registers[] = {rdi, rdx, rbx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
@@ -193,11 +233,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdx, rax};
@@ -289,7 +324,7 @@ void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdx, // kLeft
rax, // kRight
- rdi, // Slot
+ rdi, // kSlot
rbx}; // kMaybeFeedbackVector
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -307,7 +342,7 @@ void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdx, // kLeft
rax, // kRight
- rdi, // Slot
+ rdi, // kSlot
rbx}; // kMaybeFeedbackVector
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/chromium/v8/src/codegen/x64/macro-assembler-x64.cc b/chromium/v8/src/codegen/x64/macro-assembler-x64.cc
index 7d6fdc5eb3d..fabf70bb036 100644
--- a/chromium/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/chromium/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -2756,10 +2756,10 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
andq(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
- testb(Operand(scratch, MemoryChunk::kFlagsOffset),
+ testb(Operand(scratch, BasicMemoryChunk::kFlagsOffset),
Immediate(static_cast<uint8_t>(mask)));
} else {
- testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ testl(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
}
j(cc, condition_met, condition_met_distance);
}
diff --git a/chromium/v8/src/codegen/x64/macro-assembler-x64.h b/chromium/v8/src/codegen/x64/macro-assembler-x64.h
index 8382bf5a287..8c4fd898064 100644
--- a/chromium/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/chromium/v8/src/codegen/x64/macro-assembler-x64.h
@@ -174,8 +174,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Cmpneqpd, cmpneqpd)
AVX_OP(Cmpnltpd, cmpnltpd)
AVX_OP(Cmpnlepd, cmpnlepd)
- AVX_OP(Roundss, roundss)
- AVX_OP(Roundsd, roundsd)
AVX_OP(Sqrtss, sqrtss)
AVX_OP(Sqrtsd, sqrtsd)
AVX_OP(Sqrtps, sqrtps)
@@ -204,6 +202,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Psrlw, psrlw)
AVX_OP(Psrld, psrld)
AVX_OP(Psrlq, psrlq)
+ AVX_OP(Pmaddwd, pmaddwd)
AVX_OP(Paddb, paddb)
AVX_OP(Paddw, paddw)
AVX_OP(Paddd, paddd)
@@ -283,6 +282,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
AVX_OP_SSE4_1(Pmovzxdq, pmovzxdq)
AVX_OP_SSE4_1(Pextrq, pextrq)
+ AVX_OP_SSE4_1(Roundps, roundps)
+ AVX_OP_SSE4_1(Roundpd, roundpd)
+ AVX_OP_SSE4_1(Roundss, roundss)
+ AVX_OP_SSE4_1(Roundsd, roundsd)
AVX_OP_SSE4_2(Pcmpgtq, pcmpgtq)
#undef AVX_OP
@@ -442,7 +445,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register dst, Address ptr, RelocInfo::Mode rmode) {
// This method must not be used with heap object references. The stored
// address is not GC safe. Use the handle version instead.
- DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
+ DCHECK(rmode == RelocInfo::NONE || rmode > RelocInfo::LAST_GCED_ENUM);
movq(dst, Immediate64(ptr, rmode));
}
diff --git a/chromium/v8/src/codegen/x64/sse-instr.h b/chromium/v8/src/codegen/x64/sse-instr.h
index 74ec16d6a23..52107ed6b93 100644
--- a/chromium/v8/src/codegen/x64/sse-instr.h
+++ b/chromium/v8/src/codegen/x64/sse-instr.h
@@ -57,6 +57,7 @@
V(packssdw, 66, 0F, 6B) \
V(punpcklqdq, 66, 0F, 6C) \
V(punpckhqdq, 66, 0F, 6D) \
+ V(pmaddwd, 66, 0F, F5) \
V(paddb, 66, 0F, FC) \
V(paddw, 66, 0F, FD) \
V(paddd, 66, 0F, FE) \
diff --git a/chromium/v8/src/common/checks.h b/chromium/v8/src/common/checks.h
index ef9eb27ca07..eef59701d1d 100644
--- a/chromium/v8/src/common/checks.h
+++ b/chromium/v8/src/common/checks.h
@@ -18,9 +18,11 @@ namespace internal {
#ifdef ENABLE_SLOW_DCHECKS
#define SLOW_DCHECK(condition) \
CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
+#define SLOW_DCHECK_IMPLIES(lhs, rhs) SLOW_DCHECK(!(lhs) || (rhs))
V8_EXPORT_PRIVATE extern bool FLAG_enable_slow_asserts;
#else
#define SLOW_DCHECK(condition) ((void)0)
+#define SLOW_DCHECK_IMPLIES(v1, v2) ((void)0)
static const bool FLAG_enable_slow_asserts = false;
#endif
diff --git a/chromium/v8/src/common/globals.h b/chromium/v8/src/common/globals.h
index 4309b702347..dcb1d4e13d4 100644
--- a/chromium/v8/src/common/globals.h
+++ b/chromium/v8/src/common/globals.h
@@ -68,10 +68,13 @@ constexpr int GB = MB * 1024;
#define V8_EMBEDDED_CONSTANT_POOL false
#endif
-#ifdef V8_TARGET_ARCH_ARM
-// Set stack limit lower for ARM than for other architectures because
-// stack allocating MacroAssembler takes 120K bytes.
-// See issue crbug.com/405338
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
+// Set stack limit lower for ARM and ARM64 than for other architectures because:
+// - on Arm stack allocating MacroAssembler takes 120K bytes.
+// See issue crbug.com/405338
+// - on Arm64 when running in single-process mode for Android WebView, when
+// initializing V8 we already have a large stack and so have to set the
+// limit lower. See issue crbug.com/v8/10575
#define V8_DEFAULT_STACK_SIZE_KB 864
#else
// Slightly less than 1MB, since Windows' default stack size for
@@ -151,6 +154,8 @@ constexpr int kMaxInt16 = (1 << 15) - 1;
constexpr int kMinInt16 = -(1 << 15);
constexpr int kMaxUInt16 = (1 << 16) - 1;
constexpr int kMinUInt16 = 0;
+constexpr int kMaxInt31 = kMaxInt / 2;
+constexpr int kMinInt31 = kMinInt / 2;
constexpr uint32_t kMaxUInt32 = 0xFFFFFFFFu;
constexpr int kMinUInt32 = 0;
@@ -327,7 +332,7 @@ constexpr uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
// Code-point values in Unicode 4.0 are 21 bits wide.
// Code units in UTF-16 are 16 bits wide.
using uc16 = uint16_t;
-using uc32 = int32_t;
+using uc32 = uint32_t;
constexpr int kOneByteSize = kCharSize;
constexpr int kUC16Size = sizeof(uc16); // NOLINT
@@ -1337,31 +1342,46 @@ class BinaryOperationFeedback {
};
// Type feedback is encoded in such a way that, we can combine the feedback
-// at different points by performing an 'OR' operation. Type feedback moves
-// to a more generic type when we combine feedback.
-//
-// kSignedSmall -> kNumber -> kNumberOrOddball -> kAny
-// kReceiver -> kReceiverOrNullOrUndefined -> kAny
-// kInternalizedString -> kString -> kAny
-// kSymbol -> kAny
-// kBigInt -> kAny
-//
+// at different points by performing an 'OR' operation.
// This is distinct from BinaryOperationFeedback on purpose, because the
// feedback that matters differs greatly as well as the way it is consumed.
class CompareOperationFeedback {
- public:
enum {
- kNone = 0x000,
- kSignedSmall = 0x001,
- kNumber = 0x003,
- kNumberOrOddball = 0x007,
- kInternalizedString = 0x008,
- kString = 0x018,
- kSymbol = 0x020,
- kBigInt = 0x040,
- kReceiver = 0x080,
- kReceiverOrNullOrUndefined = 0x180,
- kAny = 0x1ff
+ kSignedSmallFlag = 1 << 0,
+ kOtherNumberFlag = 1 << 1,
+ kBooleanFlag = 1 << 2,
+ kNullOrUndefinedFlag = 1 << 3,
+ kInternalizedStringFlag = 1 << 4,
+ kOtherStringFlag = 1 << 5,
+ kSymbolFlag = 1 << 6,
+ kBigIntFlag = 1 << 7,
+ kReceiverFlag = 1 << 8,
+ kAnyMask = 0x1FF,
+ };
+
+ public:
+ enum Type {
+ kNone = 0,
+
+ kBoolean = kBooleanFlag,
+ kNullOrUndefined = kNullOrUndefinedFlag,
+ kOddball = kBoolean | kNullOrUndefined,
+
+ kSignedSmall = kSignedSmallFlag,
+ kNumber = kSignedSmall | kOtherNumberFlag,
+ kNumberOrBoolean = kNumber | kBoolean,
+ kNumberOrOddball = kNumber | kOddball,
+
+ kInternalizedString = kInternalizedStringFlag,
+ kString = kInternalizedString | kOtherStringFlag,
+
+ kReceiver = kReceiverFlag,
+ kReceiverOrNullOrUndefined = kReceiver | kNullOrUndefined,
+
+ kBigInt = kBigIntFlag,
+ kSymbol = kSymbolFlag,
+
+ kAny = kAnyMask,
};
};
@@ -1592,8 +1612,8 @@ enum class LoadSensitivity {
V(TrapDataSegmentDropped) \
V(TrapElemSegmentDropped) \
V(TrapTableOutOfBounds) \
- V(TrapBrOnExnNullRef) \
- V(TrapRethrowNullRef) \
+ V(TrapBrOnExnNull) \
+ V(TrapRethrowNull) \
V(TrapNullDereference) \
V(TrapIllegalCast) \
V(TrapArrayOutOfBounds)
diff --git a/chromium/v8/src/common/message-template.h b/chromium/v8/src/common/message-template.h
index e6a25de2663..cf8d66b8bb6 100644
--- a/chromium/v8/src/common/message-template.h
+++ b/chromium/v8/src/common/message-template.h
@@ -320,7 +320,7 @@ namespace internal {
"a location, got %") \
T(InvalidArrayBufferLength, "Invalid array buffer length") \
T(ArrayBufferAllocationFailed, "Array buffer allocation failed") \
- T(Invalid, "Invalid %s : %") \
+ T(Invalid, "Invalid % : %") \
T(InvalidArrayLength, "Invalid array length") \
T(InvalidAtomicAccessIndex, "Invalid atomic access index") \
T(InvalidCodePoint, "Invalid code point %") \
@@ -551,8 +551,8 @@ namespace internal {
T(WasmTrapDataSegmentDropped, "data segment has been dropped") \
T(WasmTrapElemSegmentDropped, "element segment has been dropped") \
T(WasmTrapTableOutOfBounds, "table access out of bounds") \
- T(WasmTrapBrOnExnNullRef, "br_on_exn on nullref value") \
- T(WasmTrapRethrowNullRef, "rethrowing nullref value") \
+ T(WasmTrapBrOnExnNull, "br_on_exn on null value") \
+ T(WasmTrapRethrowNull, "rethrowing null value") \
T(WasmTrapNullDereference, "dereferencing a null pointer") \
T(WasmTrapIllegalCast, "illegal cast") \
T(WasmTrapArrayOutOfBounds, "array element access out of bounds") \
diff --git a/chromium/v8/src/compiler/access-builder.cc b/chromium/v8/src/compiler/access-builder.cc
index e19067f3c1c..656b250a1c2 100644
--- a/chromium/v8/src/compiler/access-builder.cc
+++ b/chromium/v8/src/compiler/access-builder.cc
@@ -17,6 +17,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table.h"
#include "src/objects/source-text-module.h"
+#include "torque-generated/exported-class-definitions-tq.h"
namespace v8 {
namespace internal {
@@ -530,6 +531,26 @@ FieldAccess AccessBuilder::ForFixedArrayLength() {
}
// static
+FieldAccess AccessBuilder::ForSloppyArgumentsElementsContext() {
+ FieldAccess access = {
+ kTaggedBase, SloppyArgumentsElements::kContextOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForSloppyArgumentsElementsArguments() {
+ FieldAccess access = {
+ kTaggedBase, SloppyArgumentsElements::kArgumentsOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForPropertyArrayLengthAndHash() {
FieldAccess access = {
kTaggedBase, PropertyArray::kLengthAndHashOffset,
@@ -867,6 +888,14 @@ ElementAccess AccessBuilder::ForFixedArrayElement() {
}
// static
+ElementAccess AccessBuilder::ForSloppyArgumentsElementsMappedEntry() {
+ ElementAccess access = {
+ kTaggedBase, SloppyArgumentsElements::kMappedEntriesOffset, Type::Any(),
+ MachineType::AnyTagged(), kFullWriteBarrier};
+ return access;
+}
+
+// statics
ElementAccess AccessBuilder::ForFixedArrayElement(
ElementsKind kind, LoadSensitivity load_sensitivity) {
ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize,
@@ -1139,6 +1168,15 @@ FieldAccess AccessBuilder::ForDictionaryObjectHashIndex() {
return access;
}
+// static
+FieldAccess AccessBuilder::ForFeedbackCellValue() {
+ FieldAccess access = {kTaggedBase, FeedbackCell::kValueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TaggedPointer(),
+ kFullWriteBarrier};
+ return access;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/access-builder.h b/chromium/v8/src/compiler/access-builder.h
index 622dc1d76c2..9edd3272a19 100644
--- a/chromium/v8/src/compiler/access-builder.h
+++ b/chromium/v8/src/compiler/access-builder.h
@@ -179,6 +179,12 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to FixedArray::length() field.
static FieldAccess ForFixedArrayLength();
+ // Provides access to SloppyArgumentsElements::context() field.
+ static FieldAccess ForSloppyArgumentsElementsContext();
+
+ // Provides access to SloppyArgumentsElements::arguments() field.
+ static FieldAccess ForSloppyArgumentsElementsArguments();
+
// Provides access to PropertyArray::length() field.
static FieldAccess ForPropertyArrayLengthAndHash();
@@ -283,6 +289,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
ElementsKind kind,
LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe);
+ // Provides access to SloppyArgumentsElements elements.
+ static ElementAccess ForSloppyArgumentsElementsMappedEntry();
+
// Provides access to stack arguments
static ElementAccess ForStackArgument();
@@ -318,6 +327,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForDictionaryNextEnumerationIndex();
static FieldAccess ForDictionaryObjectHashIndex();
+ // Provides access to a FeedbackCell's value.
+ static FieldAccess ForFeedbackCellValue();
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
};
diff --git a/chromium/v8/src/compiler/access-info.cc b/chromium/v8/src/compiler/access-info.cc
index 9a2a56cd8b1..db195c1bf9e 100644
--- a/chromium/v8/src/compiler/access-info.cc
+++ b/chromium/v8/src/compiler/access-info.cc
@@ -36,7 +36,7 @@ bool CanInlinePropertyAccess(Handle<Map> map) {
if (map->instance_type() < LAST_PRIMITIVE_HEAP_OBJECT_TYPE) return true;
return map->IsJSObjectMap() && !map->is_dictionary_map() &&
!map->has_named_interceptor() &&
- // TODO(verwaest): Whitelist contexts to which we have access.
+ // TODO(verwaest): Allowlist contexts to which we have access.
!map->is_access_check_needed();
}
@@ -505,8 +505,10 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
MaybeHandle<JSObject> holder;
while (true) {
// Lookup the named property on the {map}.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
- InternalIndex const number = descriptors->Search(*name, *map);
+ Handle<DescriptorArray> descriptors(
+ map->synchronized_instance_descriptors(), isolate());
+ InternalIndex const number =
+ descriptors->Search(*name, *map, broker()->is_concurrent_inlining());
if (number.is_found()) {
PropertyDetails const details = descriptors->GetDetails(number);
if (access_mode == AccessMode::kStore ||
diff --git a/chromium/v8/src/compiler/allocation-builder-inl.h b/chromium/v8/src/compiler/allocation-builder-inl.h
index 26fbe503c36..2b6109f49e2 100644
--- a/chromium/v8/src/compiler/allocation-builder-inl.h
+++ b/chromium/v8/src/compiler/allocation-builder-inl.h
@@ -5,10 +5,11 @@
#ifndef V8_COMPILER_ALLOCATION_BUILDER_INL_H_
#define V8_COMPILER_ALLOCATION_BUILDER_INL_H_
-#include "src/compiler/allocation-builder.h"
-
#include "src/compiler/access-builder.h"
+#include "src/compiler/allocation-builder.h"
#include "src/objects/map-inl.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
+#include "torque-generated/exported-class-definitions-tq.h"
namespace v8 {
namespace internal {
@@ -40,6 +41,14 @@ void AllocationBuilder::AllocateArray(int length, MapRef map,
Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
}
+void AllocationBuilder::AllocateSloppyArgumentElements(
+ int length, MapRef map, AllocationType allocation) {
+ int size = SloppyArgumentsElements::SizeFor(length);
+ Allocate(size, allocation, Type::OtherInternal());
+ Store(AccessBuilder::ForMap(), map);
+ Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/allocation-builder.h b/chromium/v8/src/compiler/allocation-builder.h
index 040dd014051..709146950c6 100644
--- a/chromium/v8/src/compiler/allocation-builder.h
+++ b/chromium/v8/src/compiler/allocation-builder.h
@@ -55,6 +55,11 @@ class AllocationBuilder final {
inline void AllocateArray(int length, MapRef map,
AllocationType allocation = AllocationType::kYoung);
+ // Compound allocation of a SloppyArgumentsElements
+ inline void AllocateSloppyArgumentElements(
+ int length, MapRef map,
+ AllocationType allocation = AllocationType::kYoung);
+
// Compound store of a constant into a field.
void Store(const FieldAccess& access, const ObjectRef& value) {
Store(access, jsgraph()->Constant(value));
diff --git a/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc b/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc
index d453cf0188d..f50c0c858a7 100644
--- a/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -1456,7 +1456,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmVrintmF32: {
CpuFeatureScope scope(tasm(), ARMv8);
- __ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ __ vrintm(NeonS32, i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ } else {
+ __ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
+ }
break;
}
case kArmVrintmF64: {
@@ -1466,7 +1471,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmVrintpF32: {
CpuFeatureScope scope(tasm(), ARMv8);
- __ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ __ vrintp(NeonS32, i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ } else {
+ __ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
+ }
break;
}
case kArmVrintpF64: {
@@ -1476,7 +1486,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmVrintzF32: {
CpuFeatureScope scope(tasm(), ARMv8);
- __ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ __ vrintz(NeonS32, i.OutputSimd128Register(),
+ i.InputSimd128Register(0));
+ } else {
+ __ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
+ }
break;
}
case kArmVrintzF64: {
@@ -1960,43 +1975,61 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmF64x2Lt: {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
- __ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
i.InputSimd128Register(1).low());
- __ mov(scratch, Operand(-1), LeaveCC, lt);
- // Check for NaN.
- __ mov(scratch, Operand(0), LeaveCC, vs);
+ __ mov(scratch, Operand(0), LeaveCC, cs);
+ __ mov(scratch, Operand(-1), LeaveCC, mi);
__ vmov(i.OutputSimd128Register().low(), scratch, scratch);
- __ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).high(),
i.InputSimd128Register(1).high());
- __ mov(scratch, Operand(-1), LeaveCC, lt);
- // Check for NaN.
- __ mov(scratch, Operand(0), LeaveCC, vs);
+ __ mov(scratch, Operand(0), LeaveCC, cs);
+ __ mov(scratch, Operand(-1), LeaveCC, mi);
__ vmov(i.OutputSimd128Register().high(), scratch, scratch);
break;
}
case kArmF64x2Le: {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
- __ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
i.InputSimd128Register(1).low());
- __ mov(scratch, Operand(-1), LeaveCC, le);
- // Check for NaN.
- __ mov(scratch, Operand(0), LeaveCC, vs);
+ __ mov(scratch, Operand(0), LeaveCC, hi);
+ __ mov(scratch, Operand(-1), LeaveCC, ls);
__ vmov(i.OutputSimd128Register().low(), scratch, scratch);
- __ mov(scratch, Operand(0));
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).high(),
i.InputSimd128Register(1).high());
- __ mov(scratch, Operand(-1), LeaveCC, le);
- // Check for NaN.
- __ mov(scratch, Operand(0), LeaveCC, vs);
+ __ mov(scratch, Operand(0), LeaveCC, hi);
+ __ mov(scratch, Operand(-1), LeaveCC, ls);
__ vmov(i.OutputSimd128Register().high(), scratch, scratch);
break;
}
+ case kArmF64x2Pmin: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ DCHECK_EQ(dst, lhs);
+
+ // Move rhs only when rhs is strictly greater (mi).
+ __ VFPCompareAndSetFlags(rhs.low(), lhs.low());
+ __ vmov(dst.low(), rhs.low(), mi);
+ __ VFPCompareAndSetFlags(rhs.high(), lhs.high());
+ __ vmov(dst.high(), rhs.high(), mi);
+ break;
+ }
+ case kArmF64x2Pmax: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ DCHECK_EQ(dst, lhs);
+
+ // Move rhs only when rhs is strictly greater (mi).
+ __ VFPCompareAndSetFlags(rhs.low(), lhs.low());
+ __ vmov(dst.low(), rhs.low(), gt);
+ __ VFPCompareAndSetFlags(rhs.high(), lhs.high());
+ __ vmov(dst.high(), rhs.high(), gt);
+ break;
+ }
case kArmI64x2SplatI32Pair: {
Simd128Register dst = i.OutputSimd128Register();
__ vdup(Neon32, dst, i.InputRegister(0));
@@ -2068,7 +2101,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmI64x2Neg: {
Simd128Register dst = i.OutputSimd128Register();
- __ vmov(dst, static_cast<uint64_t>(0));
+ __ vmov(dst, uint64_t{0});
__ vqsub(NeonS64, dst, dst, i.InputSimd128Register(0));
break;
}
@@ -2220,6 +2253,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
+ case kArmF32x4Pmin: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ DCHECK_NE(dst, lhs);
+ DCHECK_NE(dst, rhs);
+
+ // f32x4.pmin(lhs, rhs)
+ // = v128.bitselect(rhs, lhs, f32x4.lt(rhs, lhs))
+ // = v128.bitselect(rhs, lhs, f32x4.gt(lhs, rhs))
+ __ vcgt(dst, lhs, rhs);
+ __ vbsl(dst, rhs, lhs);
+ break;
+ }
+ case kArmF32x4Pmax: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ DCHECK_NE(dst, lhs);
+ DCHECK_NE(dst, rhs);
+
+ // f32x4.pmax(lhs, rhs)
+ // = v128.bitselect(rhs, lhs, f32x4.gt(rhs, lhs))
+ __ vcgt(dst, rhs, lhs);
+ __ vbsl(dst, rhs, lhs);
+ break;
+ }
case kArmI32x4Splat: {
__ vdup(Neon32, i.OutputSimd128Register(), i.InputRegister(0));
break;
@@ -2361,8 +2421,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vshr(NeonS32, tmp2, src, 31);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
- __ vmov(mask.low(), Double((uint64_t)0x0000'0002'0000'0001));
- __ vmov(mask.high(), Double((uint64_t)0x0000'0008'0000'0004));
+ __ vmov(mask.low(), Double(uint64_t{0x0000'0002'0000'0001}));
+ __ vmov(mask.high(), Double(uint64_t{0x0000'0008'0000'0004}));
__ vand(tmp2, mask, tmp2);
__ vpadd(Neon32, tmp2.low(), tmp2.low(), tmp2.high());
__ vpadd(Neon32, tmp2.low(), tmp2.low(), kDoubleRegZero);
@@ -2538,8 +2598,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vshr(NeonS16, tmp2, src, 15);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
- __ vmov(mask.low(), Double((uint64_t)0x0008'0004'0002'0001));
- __ vmov(mask.high(), Double((uint64_t)0x0080'0040'0020'0010));
+ __ vmov(mask.low(), Double(uint64_t{0x0008'0004'0002'0001}));
+ __ vmov(mask.high(), Double(uint64_t{0x0080'0040'0020'0010}));
__ vand(tmp2, mask, tmp2);
__ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.high());
__ vpadd(Neon16, tmp2.low(), tmp2.low(), tmp2.low());
@@ -2692,8 +2752,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vshr(NeonS8, tmp2, src, 7);
// Set i-th bit of each lane i. When AND with tmp, the lanes that
// are signed will have i-th bit set, unsigned will be 0.
- __ vmov(mask.low(), Double((uint64_t)0x8040'2010'0804'0201));
- __ vmov(mask.high(), Double((uint64_t)0x8040'2010'0804'0201));
+ __ vmov(mask.low(), Double(uint64_t{0x8040'2010'0804'0201}));
+ __ vmov(mask.high(), Double(uint64_t{0x8040'2010'0804'0201}));
__ vand(tmp2, mask, tmp2);
__ vext(mask, tmp2, tmp2, 8);
__ vzip(Neon8, mask, tmp2);
@@ -3028,7 +3088,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vrev16(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kArmS1x4AnyTrue: {
+ case kArmV32x4AnyTrue:
+ case kArmV16x8AnyTrue:
+ case kArmV8x16AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
DwVfpRegister scratch = temps.AcquireD();
@@ -3039,7 +3101,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
- case kArmS1x4AllTrue: {
+ case kArmV32x4AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
DwVfpRegister scratch = temps.AcquireD();
@@ -3050,19 +3112,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
- case kArmS1x8AnyTrue: {
- const QwNeonRegister& src = i.InputSimd128Register(0);
- UseScratchRegisterScope temps(tasm());
- DwVfpRegister scratch = temps.AcquireD();
- __ vpmax(NeonU16, scratch, src.low(), src.high());
- __ vpmax(NeonU16, scratch, scratch, scratch);
- __ vpmax(NeonU16, scratch, scratch, scratch);
- __ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0);
- __ cmp(i.OutputRegister(), Operand(0));
- __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
- break;
- }
- case kArmS1x8AllTrue: {
+ case kArmV16x8AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
DwVfpRegister scratch = temps.AcquireD();
@@ -3074,23 +3124,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
- case kArmS1x16AnyTrue: {
- const QwNeonRegister& src = i.InputSimd128Register(0);
- UseScratchRegisterScope temps(tasm());
- QwNeonRegister q_scratch = temps.AcquireQ();
- DwVfpRegister d_scratch = q_scratch.low();
- __ vpmax(NeonU8, d_scratch, src.low(), src.high());
- __ vpmax(NeonU8, d_scratch, d_scratch, d_scratch);
- // vtst to detect any bits in the bottom 32 bits of d_scratch.
- // This saves an instruction vs. the naive sequence of vpmax.
- // kDoubleRegZero is not changed, since it is 0.
- __ vtst(Neon32, q_scratch, q_scratch, q_scratch);
- __ ExtractLane(i.OutputRegister(), d_scratch, NeonS32, 0);
- __ cmp(i.OutputRegister(), Operand(0));
- __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
- break;
- }
- case kArmS1x16AllTrue: {
+ case kArmV8x16AllTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
UseScratchRegisterScope temps(tasm());
DwVfpRegister scratch = temps.AcquireD();
diff --git a/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h b/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h
index c6365bf7a50..39ed658fc4b 100644
--- a/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/chromium/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -144,6 +144,8 @@ namespace compiler {
V(ArmF64x2Ne) \
V(ArmF64x2Lt) \
V(ArmF64x2Le) \
+ V(ArmF64x2Pmin) \
+ V(ArmF64x2Pmax) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
V(ArmF32x4ReplaceLane) \
@@ -165,6 +167,8 @@ namespace compiler {
V(ArmF32x4Ne) \
V(ArmF32x4Lt) \
V(ArmF32x4Le) \
+ V(ArmF32x4Pmin) \
+ V(ArmF32x4Pmax) \
V(ArmI64x2SplatI32Pair) \
V(ArmI64x2ReplaceLaneI32Pair) \
V(ArmI64x2Neg) \
@@ -304,12 +308,12 @@ namespace compiler {
V(ArmS8x8Reverse) \
V(ArmS8x4Reverse) \
V(ArmS8x2Reverse) \
- V(ArmS1x4AnyTrue) \
- V(ArmS1x4AllTrue) \
- V(ArmS1x8AnyTrue) \
- V(ArmS1x8AllTrue) \
- V(ArmS1x16AnyTrue) \
- V(ArmS1x16AllTrue) \
+ V(ArmV32x4AnyTrue) \
+ V(ArmV32x4AllTrue) \
+ V(ArmV16x8AnyTrue) \
+ V(ArmV16x8AllTrue) \
+ V(ArmV8x16AnyTrue) \
+ V(ArmV8x16AllTrue) \
V(ArmS8x16LoadSplat) \
V(ArmS16x8LoadSplat) \
V(ArmS32x4LoadSplat) \
diff --git a/chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 8c09acd6df8..196aa1ce6c0 100644
--- a/chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/chromium/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -124,6 +124,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF64x2Ne:
case kArmF64x2Lt:
case kArmF64x2Le:
+ case kArmF64x2Pmin:
+ case kArmF64x2Pmax:
case kArmF32x4Splat:
case kArmF32x4ExtractLane:
case kArmF32x4ReplaceLane:
@@ -145,6 +147,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF32x4Ne:
case kArmF32x4Lt:
case kArmF32x4Le:
+ case kArmF32x4Pmin:
+ case kArmF32x4Pmax:
case kArmI64x2SplatI32Pair:
case kArmI64x2ReplaceLaneI32Pair:
case kArmI64x2Neg:
@@ -284,12 +288,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmS8x8Reverse:
case kArmS8x4Reverse:
case kArmS8x2Reverse:
- case kArmS1x4AnyTrue:
- case kArmS1x4AllTrue:
- case kArmS1x8AnyTrue:
- case kArmS1x8AllTrue:
- case kArmS1x16AnyTrue:
- case kArmS1x16AllTrue:
+ case kArmV32x4AnyTrue:
+ case kArmV32x4AllTrue:
+ case kArmV16x8AnyTrue:
+ case kArmV16x8AllTrue:
+ case kArmV8x16AnyTrue:
+ case kArmV8x16AllTrue:
return kNoOpcodeFlags;
case kArmVldrF32:
diff --git a/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 74658697b50..de0e7c4162c 100644
--- a/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -1495,7 +1495,10 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
V(Float64RoundTruncate, kArmVrintzF64) \
V(Float64RoundTiesAway, kArmVrintaF64) \
V(Float32RoundTiesEven, kArmVrintnF32) \
- V(Float64RoundTiesEven, kArmVrintnF64)
+ V(Float64RoundTiesEven, kArmVrintnF64) \
+ V(F32x4Ceil, kArmVrintpF32) \
+ V(F32x4Floor, kArmVrintmF32) \
+ V(F32x4Trunc, kArmVrintzF32)
#define RRR_OP_LIST(V) \
V(Int32MulHigh, kArmSmmul) \
@@ -2525,12 +2528,12 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16Neg, kArmI8x16Neg) \
V(I8x16Abs, kArmI8x16Abs) \
V(S128Not, kArmS128Not) \
- V(S1x4AnyTrue, kArmS1x4AnyTrue) \
- V(S1x4AllTrue, kArmS1x4AllTrue) \
- V(S1x8AnyTrue, kArmS1x8AnyTrue) \
- V(S1x8AllTrue, kArmS1x8AllTrue) \
- V(S1x16AnyTrue, kArmS1x16AnyTrue) \
- V(S1x16AllTrue, kArmS1x16AllTrue)
+ V(V32x4AnyTrue, kArmV32x4AnyTrue) \
+ V(V32x4AllTrue, kArmV32x4AllTrue) \
+ V(V16x8AnyTrue, kArmV16x8AnyTrue) \
+ V(V16x8AllTrue, kArmV16x8AllTrue) \
+ V(V8x16AnyTrue, kArmV8x16AnyTrue) \
+ V(V8x16AllTrue, kArmV8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl, 64) \
@@ -2941,6 +2944,42 @@ void InstructionSelector::VisitI32x4BitMask(Node* node) {
VisitBitMask<kArmI32x4BitMask>(this, node);
}
+namespace {
+void VisitF32x4PminOrPmax(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ ArmOperandGenerator g(selector);
+ // Need all unique registers because we first compare the two inputs, then we
+ // need the inputs to remain unchanged for the bitselect later.
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+void VisitF64x2PminOrPMax(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ ArmOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+} // namespace
+
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitF32x4PminOrPmax(this, kArmF32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitF32x4PminOrPmax(this, kArmF32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitF64x2PminOrPMax(this, kArmF64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitF64x2PminOrPMax(this, kArmF64x2Pmax, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 4cf19a5d802..d21440c35b3 100644
--- a/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -502,8 +502,9 @@ void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
__ asm_imm(i.OutputSimd128Register().format(), \
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
} else { \
- VRegister tmp = i.TempSimd128Register(0); \
- Register shift = i.TempRegister(1).gp(); \
+ UseScratchRegisterScope temps(tasm()); \
+ VRegister tmp = temps.AcquireQ(); \
+ Register shift = temps.Acquire##gp(); \
constexpr int mask = (1 << width) - 1; \
__ And(shift, i.InputRegister32(1), mask); \
__ Dup(tmp.format(), shift); \
@@ -521,8 +522,9 @@ void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
__ asm_imm(i.OutputSimd128Register().format(), \
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
} else { \
- VRegister tmp = i.TempSimd128Register(0); \
- Register shift = i.TempRegister(1).gp(); \
+ UseScratchRegisterScope temps(tasm()); \
+ VRegister tmp = temps.AcquireQ(); \
+ Register shift = temps.Acquire##gp(); \
constexpr int mask = (1 << width) - 1; \
__ And(shift, i.InputRegister32(1), mask); \
__ Dup(tmp.format(), shift); \
@@ -1901,6 +1903,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfma, Fmla, 2D);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfms, Fmls, 2D);
+ case kArm64F64x2Pmin: {
+ VRegister dst = i.OutputSimd128Register().V2D();
+ VRegister lhs = i.InputSimd128Register(0).V2D();
+ VRegister rhs = i.InputSimd128Register(1).V2D();
+ // f64x2.pmin(lhs, rhs)
+ // = v128.bitselect(rhs, lhs, f64x2.lt(rhs,lhs))
+ // = v128.bitselect(rhs, lhs, f64x2.gt(lhs,rhs))
+ __ Fcmgt(dst, lhs, rhs);
+ __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
+ break;
+ }
+ case kArm64F64x2Pmax: {
+ VRegister dst = i.OutputSimd128Register().V2D();
+ VRegister lhs = i.InputSimd128Register(0).V2D();
+ VRegister rhs = i.InputSimd128Register(1).V2D();
+ // f64x2.pmax(lhs, rhs)
+ // = v128.bitselect(rhs, lhs, f64x2.gt(rhs, lhs))
+ __ Fcmgt(dst, rhs, lhs);
+ __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
+ break;
+ }
+ case kArm64F64x2RoundUp:
+ __ Frintp(i.OutputSimd128Register().V2D(),
+ i.InputSimd128Register(0).V2D());
+ break;
+ case kArm64F64x2RoundDown:
+ __ Frintm(i.OutputSimd128Register().V2D(),
+ i.InputSimd128Register(0).V2D());
+ break;
+ case kArm64F64x2RoundTruncate:
+ __ Frintz(i.OutputSimd128Register().V2D(),
+ i.InputSimd128Register(0).V2D());
+ break;
+ case kArm64F64x2RoundTiesEven:
+ __ Frintn(i.OutputSimd128Register().V2D(),
+ i.InputSimd128Register(0).V2D());
+ break;
case kArm64F32x4Splat: {
__ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
break;
@@ -1953,6 +1992,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfma, Fmla, 4S);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfms, Fmls, 4S);
+ case kArm64F32x4Pmin: {
+ VRegister dst = i.OutputSimd128Register().V4S();
+ VRegister lhs = i.InputSimd128Register(0).V4S();
+ VRegister rhs = i.InputSimd128Register(1).V4S();
+ // f32x4.pmin(lhs, rhs)
+ // = v128.bitselect(rhs, lhs, f32x4.lt(rhs, lhs))
+ // = v128.bitselect(rhs, lhs, f32x4.gt(lhs, rhs))
+ __ Fcmgt(dst, lhs, rhs);
+ __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
+ break;
+ }
+ case kArm64F32x4Pmax: {
+ VRegister dst = i.OutputSimd128Register().V4S();
+ VRegister lhs = i.InputSimd128Register(0).V4S();
+ VRegister rhs = i.InputSimd128Register(1).V4S();
+ // f32x4.pmax(lhs, rhs)
+ // = v128.bitselect(rhs, lhs, f32x4.gt(rhs, lhs))
+ __ Fcmgt(dst, rhs, lhs);
+ __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
+ break;
+ }
+ case kArm64F32x4RoundUp:
+ __ Frintp(i.OutputSimd128Register().V4S(),
+ i.InputSimd128Register(0).V4S());
+ break;
+ case kArm64F32x4RoundDown:
+ __ Frintm(i.OutputSimd128Register().V4S(),
+ i.InputSimd128Register(0).V4S());
+ break;
+ case kArm64F32x4RoundTruncate:
+ __ Frintz(i.OutputSimd128Register().V4S(),
+ i.InputSimd128Register(0).V4S());
+ break;
+ case kArm64F32x4RoundTiesEven:
+ __ Frintn(i.OutputSimd128Register().V4S(),
+ i.InputSimd128Register(0).V4S());
+ break;
case kArm64I64x2Splat: {
__ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0));
break;
@@ -2132,6 +2208,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst.W(), tmp.V4S(), 0);
break;
}
+ case kArm64I32x4DotI16x8S: {
+ UseScratchRegisterScope scope(tasm());
+ VRegister lhs = i.InputSimd128Register(0);
+ VRegister rhs = i.InputSimd128Register(1);
+ VRegister tmp1 = scope.AcquireV(kFormat4S);
+ VRegister tmp2 = scope.AcquireV(kFormat4S);
+ __ Smull(tmp1, lhs.V4H(), rhs.V4H());
+ __ Smull2(tmp2, lhs.V8H(), rhs.V8H());
+ __ Addp(i.OutputSimd128Register().V4S(), tmp1, tmp2);
+ break;
+ }
case kArm64I16x8Splat: {
__ Dup(i.OutputSimd128Register().V8H(), i.InputRegister32(0));
break;
@@ -2480,7 +2567,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_UNOP_CASE(kArm64S8x8Reverse, Rev64, 16B);
SIMD_UNOP_CASE(kArm64S8x4Reverse, Rev32, 16B);
SIMD_UNOP_CASE(kArm64S8x2Reverse, Rev16, 16B);
- case kArm64S1x2AllTrue: {
+ case kArm64V64x2AllTrue: {
UseScratchRegisterScope scope(tasm());
VRegister temp1 = scope.AcquireV(kFormat2D);
VRegister temp2 = scope.AcquireV(kFormatS);
@@ -2508,32 +2595,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64I16x8Load8x8S: {
- __ ld1(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
+ __ Ldr(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V8H(), i.OutputSimd128Register().V8B());
break;
}
case kArm64I16x8Load8x8U: {
- __ ld1(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
+ __ Ldr(i.OutputSimd128Register().V8B(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V8H(), i.OutputSimd128Register().V8B());
break;
}
case kArm64I32x4Load16x4S: {
- __ ld1(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
+ __ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H());
break;
}
case kArm64I32x4Load16x4U: {
- __ ld1(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
+ __ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H());
break;
}
case kArm64I64x2Load32x2S: {
- __ ld1(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
+ __ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
__ Sxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
case kArm64I64x2Load32x2U: {
- __ ld1(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
+ __ Ldr(i.OutputSimd128Register().V2S(), i.MemoryOperand(0));
__ Uxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
@@ -2548,13 +2635,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; \
}
// for AnyTrue, the format does not matter, umaxv does not support 2D
- SIMD_REDUCE_OP_CASE(kArm64S1x2AnyTrue, Umaxv, kFormatS, 4S);
- SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S);
- SIMD_REDUCE_OP_CASE(kArm64S1x4AllTrue, Uminv, kFormatS, 4S);
- SIMD_REDUCE_OP_CASE(kArm64S1x8AnyTrue, Umaxv, kFormatH, 8H);
- SIMD_REDUCE_OP_CASE(kArm64S1x8AllTrue, Uminv, kFormatH, 8H);
- SIMD_REDUCE_OP_CASE(kArm64S1x16AnyTrue, Umaxv, kFormatB, 16B);
- SIMD_REDUCE_OP_CASE(kArm64S1x16AllTrue, Uminv, kFormatB, 16B);
+ SIMD_REDUCE_OP_CASE(kArm64V64x2AnyTrue, Umaxv, kFormatS, 4S);
+ SIMD_REDUCE_OP_CASE(kArm64V32x4AnyTrue, Umaxv, kFormatS, 4S);
+ SIMD_REDUCE_OP_CASE(kArm64V32x4AllTrue, Uminv, kFormatS, 4S);
+ SIMD_REDUCE_OP_CASE(kArm64V16x8AnyTrue, Umaxv, kFormatH, 8H);
+ SIMD_REDUCE_OP_CASE(kArm64V16x8AllTrue, Uminv, kFormatH, 8H);
+ SIMD_REDUCE_OP_CASE(kArm64V8x16AnyTrue, Umaxv, kFormatB, 16B);
+ SIMD_REDUCE_OP_CASE(kArm64V8x16AllTrue, Uminv, kFormatB, 16B);
}
return kSuccess;
} // NOLINT(readability/fn_size)
diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index a8e2b52c028..41f9d78550e 100644
--- a/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -186,6 +186,12 @@ namespace compiler {
V(Arm64F64x2Le) \
V(Arm64F64x2Qfma) \
V(Arm64F64x2Qfms) \
+ V(Arm64F64x2Pmin) \
+ V(Arm64F64x2Pmax) \
+ V(Arm64F64x2RoundUp) \
+ V(Arm64F64x2RoundDown) \
+ V(Arm64F64x2RoundTruncate) \
+ V(Arm64F64x2RoundTiesEven) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
V(Arm64F32x4ReplaceLane) \
@@ -209,6 +215,12 @@ namespace compiler {
V(Arm64F32x4Le) \
V(Arm64F32x4Qfma) \
V(Arm64F32x4Qfms) \
+ V(Arm64F32x4Pmin) \
+ V(Arm64F32x4Pmax) \
+ V(Arm64F32x4RoundUp) \
+ V(Arm64F32x4RoundDown) \
+ V(Arm64F32x4RoundTruncate) \
+ V(Arm64F32x4RoundTiesEven) \
V(Arm64I64x2Splat) \
V(Arm64I64x2ExtractLane) \
V(Arm64I64x2ReplaceLane) \
@@ -256,6 +268,7 @@ namespace compiler {
V(Arm64I32x4GeU) \
V(Arm64I32x4Abs) \
V(Arm64I32x4BitMask) \
+ V(Arm64I32x4DotI16x8S) \
V(Arm64I16x8Splat) \
V(Arm64I16x8ExtractLaneU) \
V(Arm64I16x8ExtractLaneS) \
@@ -361,14 +374,14 @@ namespace compiler {
V(Arm64S8x8Reverse) \
V(Arm64S8x4Reverse) \
V(Arm64S8x2Reverse) \
- V(Arm64S1x2AnyTrue) \
- V(Arm64S1x2AllTrue) \
- V(Arm64S1x4AnyTrue) \
- V(Arm64S1x4AllTrue) \
- V(Arm64S1x8AnyTrue) \
- V(Arm64S1x8AllTrue) \
- V(Arm64S1x16AnyTrue) \
- V(Arm64S1x16AllTrue) \
+ V(Arm64V64x2AnyTrue) \
+ V(Arm64V64x2AllTrue) \
+ V(Arm64V32x4AnyTrue) \
+ V(Arm64V32x4AllTrue) \
+ V(Arm64V16x8AnyTrue) \
+ V(Arm64V16x8AllTrue) \
+ V(Arm64V8x16AnyTrue) \
+ V(Arm64V8x16AllTrue) \
V(Arm64S8x16LoadSplat) \
V(Arm64S16x8LoadSplat) \
V(Arm64S32x4LoadSplat) \
diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 128ebdac957..3ea84730801 100644
--- a/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -156,6 +156,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2Le:
case kArm64F64x2Qfma:
case kArm64F64x2Qfms:
+ case kArm64F64x2Pmin:
+ case kArm64F64x2Pmax:
+ case kArm64F64x2RoundUp:
+ case kArm64F64x2RoundDown:
+ case kArm64F64x2RoundTruncate:
+ case kArm64F64x2RoundTiesEven:
case kArm64F32x4Splat:
case kArm64F32x4ExtractLane:
case kArm64F32x4ReplaceLane:
@@ -179,6 +185,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F32x4Le:
case kArm64F32x4Qfma:
case kArm64F32x4Qfms:
+ case kArm64F32x4Pmin:
+ case kArm64F32x4Pmax:
+ case kArm64F32x4RoundUp:
+ case kArm64F32x4RoundDown:
+ case kArm64F32x4RoundTruncate:
+ case kArm64F32x4RoundTiesEven:
case kArm64I64x2Splat:
case kArm64I64x2ExtractLane:
case kArm64I64x2ReplaceLane:
@@ -226,6 +238,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64I32x4GeU:
case kArm64I32x4Abs:
case kArm64I32x4BitMask:
+ case kArm64I32x4DotI16x8S:
case kArm64I16x8Splat:
case kArm64I16x8ExtractLaneU:
case kArm64I16x8ExtractLaneS:
@@ -331,14 +344,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S8x8Reverse:
case kArm64S8x4Reverse:
case kArm64S8x2Reverse:
- case kArm64S1x2AnyTrue:
- case kArm64S1x2AllTrue:
- case kArm64S1x4AnyTrue:
- case kArm64S1x4AllTrue:
- case kArm64S1x8AnyTrue:
- case kArm64S1x8AllTrue:
- case kArm64S1x16AnyTrue:
- case kArm64S1x16AllTrue:
+ case kArm64V64x2AnyTrue:
+ case kArm64V64x2AllTrue:
+ case kArm64V32x4AnyTrue:
+ case kArm64V32x4AllTrue:
+ case kArm64V16x8AnyTrue:
+ case kArm64V16x8AllTrue:
+ case kArm64V8x16AnyTrue:
+ case kArm64V8x16AllTrue:
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
case kArm64CompareAndBranch32:
diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 06a87a8aab7..2e0d977c3c7 100644
--- a/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -163,13 +163,9 @@ void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
g.UseImmediate(node->InputAt(1)));
}
} else {
- InstructionOperand temps[] = {g.TempSimd128Register(), g.TempRegister()};
- // We only need a unique register for the first input (src), since in
- // the codegen we use tmp to store the shifts, and then later use it with
- // src. The second input can be the same as the second temp (shift).
selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)), arraysize(temps), temps);
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
}
}
@@ -608,18 +604,23 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
void InstructionSelector::VisitLoadTransform(Node* node) {
LoadTransformParameters params = LoadTransformParametersOf(node->op());
InstructionCode opcode = kArchNop;
+ bool require_add = false;
switch (params.transformation) {
case LoadTransformation::kS8x16LoadSplat:
opcode = kArm64S8x16LoadSplat;
+ require_add = true;
break;
case LoadTransformation::kS16x8LoadSplat:
opcode = kArm64S16x8LoadSplat;
+ require_add = true;
break;
case LoadTransformation::kS32x4LoadSplat:
opcode = kArm64S32x4LoadSplat;
+ require_add = true;
break;
case LoadTransformation::kS64x2LoadSplat:
opcode = kArm64S64x2LoadSplat;
+ require_add = true;
break;
case LoadTransformation::kI16x8Load8x8S:
opcode = kArm64I16x8Load8x8S;
@@ -655,13 +656,17 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
inputs[1] = g.UseRegister(index);
outputs[0] = g.DefineAsRegister(node);
- // ld1r uses post-index, so construct address first.
- // TODO(v8:9886) If index can be immediate, use vldr without this add.
- InstructionOperand addr = g.TempRegister();
- Emit(kArm64Add, 1, &addr, 2, inputs);
- inputs[0] = addr;
- inputs[1] = g.TempImmediate(0);
- opcode |= AddressingModeField::encode(kMode_MRI);
+ if (require_add) {
+ // ld1r uses post-index, so construct address first.
+ // TODO(v8:9886) If index can be immediate, use vldr without this add.
+ InstructionOperand addr = g.TempRegister();
+ Emit(kArm64Add, 1, &addr, 2, inputs);
+ inputs[0] = addr;
+ inputs[1] = g.TempImmediate(0);
+ opcode |= AddressingModeField::encode(kMode_MRI);
+ } else {
+ opcode |= AddressingModeField::encode(kMode_MRR);
+ }
Emit(opcode, 1, outputs, 2, inputs);
}
@@ -1360,7 +1365,15 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(Float64RoundTiesEven, kArm64Float64RoundTiesEven) \
V(Float64ExtractLowWord32, kArm64Float64ExtractLowWord32) \
V(Float64ExtractHighWord32, kArm64Float64ExtractHighWord32) \
- V(Float64SilenceNaN, kArm64Float64SilenceNaN)
+ V(Float64SilenceNaN, kArm64Float64SilenceNaN) \
+ V(F32x4Ceil, kArm64F32x4RoundUp) \
+ V(F32x4Floor, kArm64F32x4RoundDown) \
+ V(F32x4Trunc, kArm64F32x4RoundTruncate) \
+ V(F32x4NearestInt, kArm64F32x4RoundTiesEven) \
+ V(F64x2Ceil, kArm64F64x2RoundUp) \
+ V(F64x2Floor, kArm64F64x2RoundDown) \
+ V(F64x2Trunc, kArm64F64x2RoundTruncate) \
+ V(F64x2NearestInt, kArm64F64x2RoundTiesEven)
#define RRR_OP_LIST(V) \
V(Int32Div, kArm64Idiv32) \
@@ -3184,14 +3197,14 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16Neg, kArm64I8x16Neg) \
V(I8x16Abs, kArm64I8x16Abs) \
V(S128Not, kArm64S128Not) \
- V(S1x2AnyTrue, kArm64S1x2AnyTrue) \
- V(S1x2AllTrue, kArm64S1x2AllTrue) \
- V(S1x4AnyTrue, kArm64S1x4AnyTrue) \
- V(S1x4AllTrue, kArm64S1x4AllTrue) \
- V(S1x8AnyTrue, kArm64S1x8AnyTrue) \
- V(S1x8AllTrue, kArm64S1x8AllTrue) \
- V(S1x16AnyTrue, kArm64S1x16AnyTrue) \
- V(S1x16AllTrue, kArm64S1x16AllTrue)
+ V(V64x2AnyTrue, kArm64V64x2AnyTrue) \
+ V(V64x2AllTrue, kArm64V64x2AllTrue) \
+ V(V32x4AnyTrue, kArm64V32x4AnyTrue) \
+ V(V32x4AllTrue, kArm64V32x4AllTrue) \
+ V(V16x8AnyTrue, kArm64V16x8AnyTrue) \
+ V(V16x8AllTrue, kArm64V16x8AllTrue) \
+ V(V8x16AnyTrue, kArm64V8x16AnyTrue) \
+ V(V8x16AllTrue, kArm64V8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl, 64) \
@@ -3249,6 +3262,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4MaxU, kArm64I32x4MaxU) \
V(I32x4GtU, kArm64I32x4GtU) \
V(I32x4GeU, kArm64I32x4GeU) \
+ V(I32x4DotI16x8S, kArm64I32x4DotI16x8S) \
V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \
V(I16x8AddSaturateS, kArm64I16x8AddSaturateS) \
V(I16x8AddHoriz, kArm64I16x8AddHoriz) \
@@ -3613,6 +3627,34 @@ void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
VisitRR(this, kArm64Sxtw, node);
}
+namespace {
+void VisitPminOrPmax(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Arm64OperandGenerator g(selector);
+ // Need all unique registers because we first compare the two inputs, then we
+ // need the inputs to remain unchanged for the bitselect later.
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+} // namespace
+
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitPminOrPmax(this, kArm64F32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitPminOrPmax(this, kArm64F32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitPminOrPmax(this, kArm64F64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitPminOrPmax(this, kArm64F64x2Pmax, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/code-generator.cc b/chromium/v8/src/compiler/backend/code-generator.cc
index 72c5750035a..83dccf69e82 100644
--- a/chromium/v8/src/compiler/backend/code-generator.cc
+++ b/chromium/v8/src/compiler/backend/code-generator.cc
@@ -55,19 +55,20 @@ CodeGenerator::CodeGenerator(
frame_access_state_(nullptr),
linkage_(linkage),
instructions_(instructions),
- unwinding_info_writer_(zone()),
+ unwinding_info_writer_(codegen_zone),
info_(info),
- labels_(zone()->NewArray<Label>(instructions->InstructionBlockCount())),
+ labels_(
+ codegen_zone->NewArray<Label>(instructions->InstructionBlockCount())),
current_block_(RpoNumber::Invalid()),
start_source_position_(start_source_position),
current_source_position_(SourcePosition::Unknown()),
tasm_(isolate, options, CodeObjectRequired::kNo, std::move(buffer)),
resolver_(this),
- safepoints_(zone()),
- handlers_(zone()),
- deoptimization_exits_(zone()),
- deoptimization_literals_(zone()),
- translations_(zone()),
+ safepoints_(codegen_zone),
+ handlers_(codegen_zone),
+ deoptimization_exits_(codegen_zone),
+ deoptimization_literals_(codegen_zone),
+ translations_(codegen_zone),
max_unoptimized_frame_height_(max_unoptimized_frame_height),
max_pushed_argument_count_(max_pushed_argument_count),
caller_registers_saved_(false),
@@ -77,12 +78,12 @@ CodeGenerator::CodeGenerator(
osr_pc_offset_(-1),
optimized_out_literal_id_(-1),
source_position_table_builder_(
- SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
- protected_instructions_(zone()),
+ codegen_zone, SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
+ protected_instructions_(codegen_zone),
result_(kSuccess),
poisoning_level_(poisoning_level),
- block_starts_(zone()),
- instr_starts_(zone()) {
+ block_starts_(codegen_zone),
+ instr_starts_(codegen_zone) {
for (int i = 0; i < instructions->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
@@ -161,7 +162,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
DeoptimizeReason deoptimization_reason = exit->reason();
Address deopt_entry =
Deoptimizer::GetDeoptimizationEntry(tasm()->isolate(), deopt_kind);
- if (info()->is_source_positions_enabled()) {
+ if (info()->source_positions()) {
tasm()->RecordDeoptReason(deoptimization_reason, exit->pos(),
deoptimization_id);
}
@@ -191,7 +192,7 @@ void CodeGenerator::AssembleCode() {
// the frame (that is done in AssemblePrologue).
FrameScope frame_scope(tasm(), StackFrame::MANUAL);
- if (info->is_source_positions_enabled()) {
+ if (info->source_positions()) {
AssembleSourcePosition(start_source_position());
}
offsets_info_.code_start_register_check = tasm()->pc_offset();
@@ -242,7 +243,7 @@ void CodeGenerator::AssembleCode() {
unwinding_info_writer_.SetNumberOfInstructionBlocks(
instructions()->InstructionBlockCount());
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
block_starts_.assign(instructions()->instruction_blocks().size(), -1);
instr_starts_.assign(instructions()->instructions().size(), {});
}
@@ -253,7 +254,7 @@ void CodeGenerator::AssembleCode() {
if (block->ShouldAlign() && !tasm()->jump_optimization_info()) {
tasm()->CodeTargetAlign();
}
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset();
}
// Bind a label for a block.
@@ -503,6 +504,7 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
.set_deoptimization_data(deopt_data)
.set_is_turbofanned()
.set_stack_slots(frame()->GetTotalFrameSlotCount())
+ .set_profiler_data(info()->profiler_data())
.TryBuild();
Handle<Code> code;
@@ -721,7 +723,7 @@ RpoNumber CodeGenerator::ComputeBranchInfo(BranchInfo* branch,
CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
int instruction_index, const InstructionBlock* block) {
Instruction* instr = instructions()->InstructionAt(instruction_index);
- if (info()->trace_turbo_json_enabled()) {
+ if (info()->trace_turbo_json()) {
instr_starts_[instruction_index].gap_pc_offset = tasm()->pc_offset();
}
int first_unused_stack_slot;
@@ -741,14 +743,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
if (instr->IsJump() && block->must_deconstruct_frame()) {
AssembleDeconstructFrame();
}
- if (info()->trace_turbo_json_enabled()) {
+ if (info()->trace_turbo_json()) {
instr_starts_[instruction_index].arch_instr_pc_offset = tasm()->pc_offset();
}
// Assemble architecture-specific code for the instruction.
CodeGenResult result = AssembleArchInstruction(instr);
if (result != kSuccess) return result;
- if (info()->trace_turbo_json_enabled()) {
+ if (info()->trace_turbo_json()) {
instr_starts_[instruction_index].condition_pc_offset = tasm()->pc_offset();
}
@@ -832,7 +834,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
buffer << "-- ";
// Turbolizer only needs the source position, as it can reconstruct
// the inlining stack from other information.
- if (info->trace_turbo_json_enabled() || !tasm()->isolate() ||
+ if (info->trace_turbo_json() || !tasm()->isolate() ||
tasm()->isolate()->concurrent_recompilation_enabled()) {
buffer << source_position;
} else {
@@ -979,7 +981,8 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
InstructionOperandConverter i(this, instr);
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
- handlers_.push_back({GetLabel(handler_rpo), tasm()->pc_offset()});
+ handlers_.push_back(
+ {GetLabel(handler_rpo), tasm()->pc_offset_for_safepoint()});
}
if (needs_frame_state) {
@@ -989,7 +992,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
size_t frame_state_offset = 2;
FrameStateDescriptor* descriptor =
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
- int pc_offset = tasm()->pc_offset();
+ int pc_offset = tasm()->pc_offset_for_safepoint();
BuildTranslation(instr, pc_offset, frame_state_offset,
descriptor->state_combine());
}
@@ -1329,7 +1332,7 @@ void CodeGenerator::InitializeSpeculationPoison() {
if (info()->called_with_code_start_register()) {
tasm()->RecordComment("-- Prologue: generate speculation poison --");
GenerateSpeculationPoisonFromCodeStartRegister();
- if (info()->is_poisoning_register_arguments()) {
+ if (info()->poison_register_arguments()) {
AssembleRegisterArgumentPoisoning();
}
} else {
diff --git a/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index c673458c753..f5a69eec3ea 100644
--- a/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -2032,6 +2032,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Maxpd(dst, dst, i.InputSimd128Register(1));
break;
}
+ case kIA32F64x2Round: {
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ Roundpd(i.OutputSimd128Register(), i.InputDoubleRegister(0), mode);
+ break;
+ }
case kIA32I64x2SplatI32Pair: {
XMMRegister dst = i.OutputSimd128Register();
__ Pinsrd(dst, i.InputRegister(0), 0);
@@ -2442,6 +2448,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Maxps(dst, dst, i.InputSimd128Register(1));
break;
}
+ case kIA32F32x4Round: {
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ Roundps(i.OutputSimd128Register(), i.InputDoubleRegister(0), mode);
+ break;
+ }
case kIA32I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@@ -2795,6 +2807,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movmskps(i.OutputRegister(), i.InputSimd128Register(0));
break;
}
+ case kIA32I32x4DotI16x8S: {
+ __ Pmaddwd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
case kIA32I16x8Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@@ -3687,7 +3704,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Out-of-range indices should return 0, add 112 so that any value > 15
// saturates to 128 (top bit set), so pshufb will zero that lane.
- __ Move(mask, (uint32_t)0x70707070);
+ __ Move(mask, uint32_t{0x70707070});
__ Pshufd(mask, mask, 0x0);
__ Paddusb(mask, i.InputSimd128Register(1));
__ Pshufb(dst, mask);
@@ -4094,9 +4111,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpor(dst, dst, kScratchDoubleReg);
break;
}
- case kIA32S1x4AnyTrue:
- case kIA32S1x8AnyTrue:
- case kIA32S1x16AnyTrue: {
+ case kIA32V32x4AnyTrue:
+ case kIA32V16x8AnyTrue:
+ case kIA32V8x16AnyTrue: {
Register dst = i.OutputRegister();
XMMRegister src = i.InputSimd128Register(0);
Register tmp = i.TempRegister(0);
@@ -4110,13 +4127,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
- case kIA32S1x4AllTrue:
+ case kIA32V32x4AllTrue:
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
break;
- case kIA32S1x8AllTrue:
+ case kIA32V16x8AllTrue:
ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw);
break;
- case kIA32S1x16AllTrue: {
+ case kIA32V8x16AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
break;
}
diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index d347d672021..4c49539c4e9 100644
--- a/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -136,6 +136,7 @@ namespace compiler {
V(IA32F64x2Le) \
V(IA32F64x2Pmin) \
V(IA32F64x2Pmax) \
+ V(IA32F64x2Round) \
V(IA32I64x2SplatI32Pair) \
V(IA32I64x2ReplaceLaneI32Pair) \
V(IA32I64x2Neg) \
@@ -186,6 +187,7 @@ namespace compiler {
V(AVXF32x4Le) \
V(IA32F32x4Pmin) \
V(IA32F32x4Pmax) \
+ V(IA32F32x4Round) \
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
V(SSEI32x4ReplaceLane) \
@@ -232,6 +234,7 @@ namespace compiler {
V(AVXI32x4GeU) \
V(IA32I32x4Abs) \
V(IA32I32x4BitMask) \
+ V(IA32I32x4DotI16x8S) \
V(IA32I16x8Splat) \
V(IA32I16x8ExtractLaneU) \
V(IA32I16x8ExtractLaneS) \
@@ -396,12 +399,12 @@ namespace compiler {
V(AVXS8x4Reverse) \
V(SSES8x2Reverse) \
V(AVXS8x2Reverse) \
- V(IA32S1x4AnyTrue) \
- V(IA32S1x4AllTrue) \
- V(IA32S1x8AnyTrue) \
- V(IA32S1x8AllTrue) \
- V(IA32S1x16AnyTrue) \
- V(IA32S1x16AllTrue) \
+ V(IA32V32x4AnyTrue) \
+ V(IA32V32x4AllTrue) \
+ V(IA32V16x8AnyTrue) \
+ V(IA32V16x8AllTrue) \
+ V(IA32V8x16AnyTrue) \
+ V(IA32V8x16AllTrue) \
V(IA32Word32AtomicPairLoad) \
V(IA32Word32AtomicPairStore) \
V(IA32Word32AtomicPairAdd) \
diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 52f0b0356ff..6d0062ba09e 100644
--- a/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -117,6 +117,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F64x2Le:
case kIA32F64x2Pmin:
case kIA32F64x2Pmax:
+ case kIA32F64x2Round:
case kIA32I64x2SplatI32Pair:
case kIA32I64x2ReplaceLaneI32Pair:
case kIA32I64x2Neg:
@@ -167,6 +168,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXF32x4Le:
case kIA32F32x4Pmin:
case kIA32F32x4Pmax:
+ case kIA32F32x4Round:
case kIA32I32x4Splat:
case kIA32I32x4ExtractLane:
case kSSEI32x4ReplaceLane:
@@ -213,6 +215,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI32x4GeU:
case kIA32I32x4Abs:
case kIA32I32x4BitMask:
+ case kIA32I32x4DotI16x8S:
case kIA32I16x8Splat:
case kIA32I16x8ExtractLaneU:
case kIA32I16x8ExtractLaneS:
@@ -367,12 +370,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXS8x4Reverse:
case kSSES8x2Reverse:
case kAVXS8x2Reverse:
- case kIA32S1x4AnyTrue:
- case kIA32S1x4AllTrue:
- case kIA32S1x8AnyTrue:
- case kIA32S1x8AllTrue:
- case kIA32S1x16AnyTrue:
- case kIA32S1x16AllTrue:
+ case kIA32V32x4AnyTrue:
+ case kIA32V32x4AllTrue:
+ case kIA32V16x8AnyTrue:
+ case kIA32V16x8AllTrue:
+ case kIA32V8x16AnyTrue:
+ case kIA32V8x16AllTrue:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index c50464f4b86..5ed7c24e6bf 100644
--- a/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -277,6 +277,23 @@ void VisitRRSimd(InstructionSelector* selector, Node* node,
}
}
+// TODO(v8:9198): Like VisitRROFloat, but for SIMD. SSE requires operand1 to be
+// a register as we don't have memory alignment yet. For AVX, memory operands
+// are fine, but can have performance issues if not aligned to 16/32 bytes
+// (based on load size), see SDM Vol 1, chapter 14.9
+void VisitRROSimd(InstructionSelector* selector, Node* node,
+ ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0,
+ g.Use(node->InputAt(1)));
+ } else {
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0,
+ g.UseRegister(node->InputAt(1)));
+ }
+}
+
void VisitRRISimd(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
IA32OperandGenerator g(selector);
@@ -941,7 +958,16 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
V(Float32RoundTiesEven, \
kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \
- V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
+ V(Float64RoundTiesEven, \
+ kSSEFloat64Round | MiscField::encode(kRoundToNearest)) \
+ V(F32x4Ceil, kIA32F32x4Round | MiscField::encode(kRoundUp)) \
+ V(F32x4Floor, kIA32F32x4Round | MiscField::encode(kRoundDown)) \
+ V(F32x4Trunc, kIA32F32x4Round | MiscField::encode(kRoundToZero)) \
+ V(F32x4NearestInt, kIA32F32x4Round | MiscField::encode(kRoundToNearest)) \
+ V(F64x2Ceil, kIA32F64x2Round | MiscField::encode(kRoundUp)) \
+ V(F64x2Floor, kIA32F64x2Round | MiscField::encode(kRoundDown)) \
+ V(F64x2Trunc, kIA32F64x2Round | MiscField::encode(kRoundToZero)) \
+ V(F64x2NearestInt, kIA32F64x2Round | MiscField::encode(kRoundToNearest))
#define RRO_FLOAT_OP_LIST(V) \
V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \
@@ -2100,6 +2126,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#define SIMD_BINOP_UNIFIED_SSE_AVX_LIST(V) \
V(I64x2Add) \
V(I64x2Sub) \
+ V(I32x4DotI16x8S) \
V(I16x8RoundingAverageU) \
V(I8x16RoundingAverageU)
@@ -2131,14 +2158,14 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(S128Not)
#define SIMD_ANYTRUE_LIST(V) \
- V(S1x4AnyTrue) \
- V(S1x8AnyTrue) \
- V(S1x16AnyTrue)
+ V(V32x4AnyTrue) \
+ V(V16x8AnyTrue) \
+ V(V8x16AnyTrue)
#define SIMD_ALLTRUE_LIST(V) \
- V(S1x4AllTrue) \
- V(S1x8AllTrue) \
- V(S1x16AllTrue)
+ V(V32x4AllTrue) \
+ V(V16x8AllTrue) \
+ V(V8x16AllTrue)
#define SIMD_SHIFT_OPCODES_UNIFED_SSE_AVX(V) \
V(I64x2Shl) \
@@ -2372,10 +2399,15 @@ SIMD_SHIFT_OPCODES_UNIFED_SSE_AVX(VISIT_SIMD_SHIFT_UNIFIED_SSE_AVX)
#undef VISIT_SIMD_SHIFT_UNIFIED_SSE_AVX
#undef SIMD_SHIFT_OPCODES_UNIFED_SSE_AVX
-#define VISIT_SIMD_UNOP(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- IA32OperandGenerator g(this); \
- Emit(kIA32##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
+// TODO(v8:9198): SSE requires operand0 to be a register as we don't have memory
+// alignment yet. For AVX, memory operands are fine, but can have performance
+// issues if not aligned to 16/32 bytes (based on load size), see SDM Vol 1,
+// chapter 14.9
+#define VISIT_SIMD_UNOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ IA32OperandGenerator g(this); \
+ Emit(kIA32##Opcode, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0))); \
}
SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
#undef VISIT_SIMD_UNOP
@@ -2407,23 +2439,23 @@ SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
IA32OperandGenerator g(this); \
InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \
Emit(kIA32##Opcode, g.DefineAsRegister(node), \
- g.UseUnique(node->InputAt(0)), arraysize(temps), temps); \
+ g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
}
SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE)
#undef VISIT_SIMD_ALLTRUE
#undef SIMD_ALLTRUE_LIST
-#define VISIT_SIMD_BINOP(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- VisitRROFloat(this, node, kAVX##Opcode, kSSE##Opcode); \
+#define VISIT_SIMD_BINOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ VisitRROSimd(this, node, kAVX##Opcode, kSSE##Opcode); \
}
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
-#define VISIT_SIMD_BINOP_UNIFIED_SSE_AVX(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- VisitRROFloat(this, node, kIA32##Opcode, kIA32##Opcode); \
+#define VISIT_SIMD_BINOP_UNIFIED_SSE_AVX(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ VisitRROSimd(this, node, kIA32##Opcode, kIA32##Opcode); \
}
SIMD_BINOP_UNIFIED_SSE_AVX_LIST(VISIT_SIMD_BINOP_UNIFIED_SSE_AVX)
#undef VISIT_SIMD_BINOP_UNIFIED_SSE_AVX
diff --git a/chromium/v8/src/compiler/backend/instruction-selector-impl.h b/chromium/v8/src/compiler/backend/instruction-selector-impl.h
index aa7da85e42b..7e1f183fb71 100644
--- a/chromium/v8/src/compiler/backend/instruction-selector-impl.h
+++ b/chromium/v8/src/compiler/backend/instruction-selector-impl.h
@@ -356,6 +356,8 @@ class OperandGenerator {
case MachineRepresentation::kCompressed:
case MachineRepresentation::kCompressedPointer:
return Constant(static_cast<int32_t>(0));
+ case MachineRepresentation::kWord64:
+ return Constant(static_cast<int64_t>(0));
case MachineRepresentation::kFloat64:
return Constant(static_cast<double>(0));
case MachineRepresentation::kFloat32:
diff --git a/chromium/v8/src/compiler/backend/instruction-selector.cc b/chromium/v8/src/compiler/backend/instruction-selector.cc
index c2022b574ee..8ad88b946b4 100644
--- a/chromium/v8/src/compiler/backend/instruction-selector.cc
+++ b/chromium/v8/src/compiler/backend/instruction-selector.cc
@@ -1043,7 +1043,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
InstructionOperand op = g.UseLocation(*iter, location);
UnallocatedOperand unallocated = UnallocatedOperand::cast(op);
if (unallocated.HasFixedSlotPolicy() && !call_tail) {
- int stack_index = -unallocated.fixed_slot_index() - 1;
+ int stack_index = buffer->descriptor->GetStackIndexFromSlot(
+ unallocated.fixed_slot_index());
// This can insert empty slots before stack_index and will insert enough
// slots after stack_index to store the parameter.
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
@@ -1888,6 +1889,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF64x2Pmin(node);
case IrOpcode::kF64x2Pmax:
return MarkAsSimd128(node), VisitF64x2Pmax(node);
+ case IrOpcode::kF64x2Ceil:
+ return MarkAsSimd128(node), VisitF64x2Ceil(node);
+ case IrOpcode::kF64x2Floor:
+ return MarkAsSimd128(node), VisitF64x2Floor(node);
+ case IrOpcode::kF64x2Trunc:
+ return MarkAsSimd128(node), VisitF64x2Trunc(node);
+ case IrOpcode::kF64x2NearestInt:
+ return MarkAsSimd128(node), VisitF64x2NearestInt(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
@@ -1938,6 +1947,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Pmin(node);
case IrOpcode::kF32x4Pmax:
return MarkAsSimd128(node), VisitF32x4Pmax(node);
+ case IrOpcode::kF32x4Ceil:
+ return MarkAsSimd128(node), VisitF32x4Ceil(node);
+ case IrOpcode::kF32x4Floor:
+ return MarkAsSimd128(node), VisitF32x4Floor(node);
+ case IrOpcode::kF32x4Trunc:
+ return MarkAsSimd128(node), VisitF32x4Trunc(node);
+ case IrOpcode::kF32x4NearestInt:
+ return MarkAsSimd128(node), VisitF32x4NearestInt(node);
case IrOpcode::kI64x2Splat:
return MarkAsSimd128(node), VisitI64x2Splat(node);
case IrOpcode::kI64x2SplatI32Pair:
@@ -2040,6 +2057,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI32x4Abs(node);
case IrOpcode::kI32x4BitMask:
return MarkAsWord32(node), VisitI32x4BitMask(node);
+ case IrOpcode::kI32x4DotI16x8S:
+ return MarkAsSimd128(node), VisitI32x4DotI16x8S(node);
case IrOpcode::kI16x8Splat:
return MarkAsSimd128(node), VisitI16x8Splat(node);
case IrOpcode::kI16x8ExtractLaneU:
@@ -2188,22 +2207,22 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitS8x16Swizzle(node);
case IrOpcode::kS8x16Shuffle:
return MarkAsSimd128(node), VisitS8x16Shuffle(node);
- case IrOpcode::kS1x2AnyTrue:
- return MarkAsWord32(node), VisitS1x2AnyTrue(node);
- case IrOpcode::kS1x2AllTrue:
- return MarkAsWord32(node), VisitS1x2AllTrue(node);
- case IrOpcode::kS1x4AnyTrue:
- return MarkAsWord32(node), VisitS1x4AnyTrue(node);
- case IrOpcode::kS1x4AllTrue:
- return MarkAsWord32(node), VisitS1x4AllTrue(node);
- case IrOpcode::kS1x8AnyTrue:
- return MarkAsWord32(node), VisitS1x8AnyTrue(node);
- case IrOpcode::kS1x8AllTrue:
- return MarkAsWord32(node), VisitS1x8AllTrue(node);
- case IrOpcode::kS1x16AnyTrue:
- return MarkAsWord32(node), VisitS1x16AnyTrue(node);
- case IrOpcode::kS1x16AllTrue:
- return MarkAsWord32(node), VisitS1x16AllTrue(node);
+ case IrOpcode::kV64x2AnyTrue:
+ return MarkAsWord32(node), VisitV64x2AnyTrue(node);
+ case IrOpcode::kV64x2AllTrue:
+ return MarkAsWord32(node), VisitV64x2AllTrue(node);
+ case IrOpcode::kV32x4AnyTrue:
+ return MarkAsWord32(node), VisitV32x4AnyTrue(node);
+ case IrOpcode::kV32x4AllTrue:
+ return MarkAsWord32(node), VisitV32x4AllTrue(node);
+ case IrOpcode::kV16x8AnyTrue:
+ return MarkAsWord32(node), VisitV16x8AnyTrue(node);
+ case IrOpcode::kV16x8AllTrue:
+ return MarkAsWord32(node), VisitV16x8AllTrue(node);
+ case IrOpcode::kV8x16AnyTrue:
+ return MarkAsWord32(node), VisitV8x16AnyTrue(node);
+ case IrOpcode::kV8x16AllTrue:
+ return MarkAsWord32(node), VisitV8x16AllTrue(node);
default:
FATAL("Unexpected operator #%d:%s @ node #%d", node->opcode(),
node->op()->mnemonic(), node->id());
@@ -2638,8 +2657,8 @@ void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV64x2AnyTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV64x2AllTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
@@ -2651,23 +2670,45 @@ void InstructionSelector::VisitI64x2MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2MaxU(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X
+#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 && \
+ !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
// TODO(v8:10308) Bitmask operations are in prototype now, we can remove these
// guards when they go into the proposal.
-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_X64
void InstructionSelector::VisitI8x16BitMask(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8BitMask(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4BitMask(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32
- // && !V8_TARGET_ARCH_X64
-
// TODO(v8:10501) Prototyping pmin and pmax instructions.
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF32x4Pmin(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Pmax(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Pmin(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Pmax(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32
+#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32
+ // && !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X &&
+ // !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_S390X && \
+ !V8_TARGET_ARCH_IA32
+// TODO(v8:10553) Prototyping floating point rounding instructions.
+// TODO(zhin): Temporary convoluted way to for unimplemented opcodes on ARM as
+// we are implementing them one at a time.
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitF32x4Ceil(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Floor(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Trunc(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitF64x2Ceil(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Floor(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Trunc(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2NearestInt(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4NearestInt(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_S390X
+ // && !V8_TARGET_ARCH_IA32
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
+// TODO(v8:10583) Prototype i32x4.dot_i16x8_s
+void InstructionSelector::VisitI32x4DotI16x8S(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
@@ -2808,7 +2849,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
switch (call_descriptor->kind()) {
case CallDescriptor::kCallAddress: {
int misc_field = static_cast<int>(call_descriptor->ParameterCount());
-#if defined(_AIX)
+#if ABI_USES_FUNCTION_DESCRIPTORS
// Highest misc_field bit is used on AIX to indicate if a CFunction call
// has function descriptor or not.
if (!call_descriptor->NoFunctionDescriptor()) {
@@ -3038,7 +3079,7 @@ void InstructionSelector::VisitUnreachable(Node* node) {
void InstructionSelector::VisitStaticAssert(Node* node) {
Node* asserted = node->InputAt(0);
- asserted->Print(2);
+ asserted->Print(4);
FATAL("Expected turbofan static assert to hold, but got non-true input!\n");
}
diff --git a/chromium/v8/src/compiler/backend/instruction.h b/chromium/v8/src/compiler/backend/instruction.h
index e189100c346..f40a4198f81 100644
--- a/chromium/v8/src/compiler/backend/instruction.h
+++ b/chromium/v8/src/compiler/backend/instruction.h
@@ -1536,7 +1536,7 @@ class V8_EXPORT_PRIVATE InstructionSequence final
return virtual_register;
}
Constant GetConstant(int virtual_register) const {
- ConstantMap::const_iterator it = constants_.find(virtual_register);
+ auto it = constants_.find(virtual_register);
DCHECK(it != constants_.end());
DCHECK_EQ(virtual_register, it->first);
return it->second;
diff --git a/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc b/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc
index c83a4e28ee1..b9c1eb11d92 100644
--- a/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -2159,6 +2159,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ insert_w(dst, i.InputInt8(1) * 2 + 1, kScratchReg);
break;
}
+ case kMipsF64x2Pmin: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = rhs < lhs ? rhs : lhs
+ __ fclt_d(dst, rhs, lhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
+ case kMipsF64x2Pmax: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = lhs < rhs ? rhs : lhs
+ __ fclt_d(dst, lhs, rhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
case kMipsI64x2Add: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2395,6 +2415,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMipsF32x4Pmin: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = rhs < lhs ? rhs : lhs
+ __ fclt_w(dst, rhs, lhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
+ case kMipsF32x4Pmax: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = lhs < rhs ? rhs : lhs
+ __ fclt_w(dst, lhs, rhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
case kMipsI32x4SConvertF32x4: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
@@ -2442,6 +2482,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMipsI32x4BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_w(scratch0, src, 31);
+ __ srli_d(scratch1, scratch0, 31);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ slli_d(scratch1, scratch1, 2);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ copy_u_b(dst, scratch0, 0);
+ break;
+ }
case kMipsI16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2609,6 +2664,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMipsI16x8BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_h(scratch0, src, 15);
+ __ srli_w(scratch1, scratch0, 15);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_d(scratch1, scratch0, 30);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ slli_d(scratch1, scratch1, 4);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ copy_u_b(dst, scratch0, 0);
+ break;
+ }
case kMipsI8x16Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2776,6 +2848,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMipsI8x16BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_b(scratch0, src, 7);
+ __ srli_h(scratch1, scratch0, 7);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_w(scratch1, scratch0, 14);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_d(scratch1, scratch0, 28);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ ilvev_b(scratch0, scratch1, scratch0);
+ __ copy_u_h(dst, scratch0, 0);
+ break;
+ }
case kMipsS128And: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -2800,9 +2890,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kMipsS1x4AnyTrue:
- case kMipsS1x8AnyTrue:
- case kMipsS1x16AnyTrue: {
+ case kMipsV32x4AnyTrue:
+ case kMipsV16x8AnyTrue:
+ case kMipsV8x16AnyTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_false;
@@ -2814,7 +2904,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_false);
break;
}
- case kMipsS1x4AllTrue: {
+ case kMipsV32x4AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -2825,7 +2915,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMipsS1x8AllTrue: {
+ case kMipsV16x8AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -2836,7 +2926,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMipsS1x16AllTrue: {
+ case kMipsV8x16AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
diff --git a/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h b/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h
index 0a37dd70683..27418935dd3 100644
--- a/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/chromium/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -155,6 +155,8 @@ namespace compiler {
V(MipsF64x2Ne) \
V(MipsF64x2Lt) \
V(MipsF64x2Le) \
+ V(MipsF64x2Pmin) \
+ V(MipsF64x2Pmax) \
V(MipsI64x2Add) \
V(MipsI64x2Sub) \
V(MipsI64x2Mul) \
@@ -196,6 +198,8 @@ namespace compiler {
V(MipsF32x4Ne) \
V(MipsF32x4Lt) \
V(MipsF32x4Le) \
+ V(MipsF32x4Pmin) \
+ V(MipsF32x4Pmax) \
V(MipsI32x4SConvertF32x4) \
V(MipsI32x4UConvertF32x4) \
V(MipsI32x4Neg) \
@@ -204,6 +208,7 @@ namespace compiler {
V(MipsI32x4GtU) \
V(MipsI32x4GeU) \
V(MipsI32x4Abs) \
+ V(MipsI32x4BitMask) \
V(MipsI16x8Splat) \
V(MipsI16x8ExtractLaneU) \
V(MipsI16x8ExtractLaneS) \
@@ -232,6 +237,7 @@ namespace compiler {
V(MipsI16x8GeU) \
V(MipsI16x8RoundingAverageU) \
V(MipsI16x8Abs) \
+ V(MipsI16x8BitMask) \
V(MipsI8x16Splat) \
V(MipsI8x16ExtractLaneU) \
V(MipsI8x16ExtractLaneS) \
@@ -259,18 +265,19 @@ namespace compiler {
V(MipsI8x16GeU) \
V(MipsI8x16RoundingAverageU) \
V(MipsI8x16Abs) \
+ V(MipsI8x16BitMask) \
V(MipsS128And) \
V(MipsS128Or) \
V(MipsS128Xor) \
V(MipsS128Not) \
V(MipsS128Select) \
V(MipsS128AndNot) \
- V(MipsS1x4AnyTrue) \
- V(MipsS1x4AllTrue) \
- V(MipsS1x8AnyTrue) \
- V(MipsS1x8AllTrue) \
- V(MipsS1x16AnyTrue) \
- V(MipsS1x16AllTrue) \
+ V(MipsV32x4AnyTrue) \
+ V(MipsV32x4AllTrue) \
+ V(MipsV16x8AnyTrue) \
+ V(MipsV16x8AllTrue) \
+ V(MipsV8x16AnyTrue) \
+ V(MipsV8x16AllTrue) \
V(MipsS32x4InterleaveRight) \
V(MipsS32x4InterleaveLeft) \
V(MipsS32x4PackEven) \
diff --git a/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 81bbfbbfb9b..5180a1d4ed0 100644
--- a/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -57,6 +57,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsF64x2Splat:
case kMipsF64x2ExtractLane:
case kMipsF64x2ReplaceLane:
+ case kMipsF64x2Pmin:
+ case kMipsF64x2Pmax:
case kMipsI64x2Add:
case kMipsI64x2Sub:
case kMipsI64x2Mul:
@@ -85,6 +87,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsF32x4Splat:
case kMipsF32x4Sub:
case kMipsF32x4UConvertI32x4:
+ case kMipsF32x4Pmin:
+ case kMipsF32x4Pmax:
case kMipsFloat32Max:
case kMipsFloat32Min:
case kMipsFloat32RoundDown:
@@ -138,6 +142,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI16x8UConvertI8x16High:
case kMipsI16x8UConvertI8x16Low:
case kMipsI16x8Abs:
+ case kMipsI16x8BitMask:
case kMipsI32x4Add:
case kMipsI32x4AddHoriz:
case kMipsI32x4Eq:
@@ -166,6 +171,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI32x4UConvertI16x8High:
case kMipsI32x4UConvertI16x8Low:
case kMipsI32x4Abs:
+ case kMipsI32x4BitMask:
case kMipsI8x16Add:
case kMipsI8x16AddSaturateS:
case kMipsI8x16AddSaturateU:
@@ -195,6 +201,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsI8x16SubSaturateU:
case kMipsI8x16UConvertI16x8:
case kMipsI8x16Abs:
+ case kMipsI8x16BitMask:
case kMipsIns:
case kMipsLsa:
case kMipsMaddD:
@@ -238,12 +245,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsS16x8InterleaveRight:
case kMipsS16x8PackEven:
case kMipsS16x8PackOdd:
- case kMipsS1x16AllTrue:
- case kMipsS1x16AnyTrue:
- case kMipsS1x4AllTrue:
- case kMipsS1x4AnyTrue:
- case kMipsS1x8AllTrue:
- case kMipsS1x8AnyTrue:
+ case kMipsV8x16AllTrue:
+ case kMipsV8x16AnyTrue:
+ case kMipsV32x4AllTrue:
+ case kMipsV32x4AnyTrue:
+ case kMipsV16x8AllTrue:
+ case kMipsV16x8AnyTrue:
case kMipsS32x4InterleaveEven:
case kMipsS32x4InterleaveLeft:
case kMipsS32x4InterleaveOdd:
diff --git a/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index dac94fae272..2785186b827 100644
--- a/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -113,6 +113,14 @@ static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
g.UseRegister(node->InputAt(1)));
}
+static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ MipsOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
MipsOperandGenerator g(selector);
selector->Emit(
@@ -2111,12 +2119,12 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \
V(I8x16Neg, kMipsI8x16Neg) \
V(S128Not, kMipsS128Not) \
- V(S1x4AnyTrue, kMipsS1x4AnyTrue) \
- V(S1x4AllTrue, kMipsS1x4AllTrue) \
- V(S1x8AnyTrue, kMipsS1x8AnyTrue) \
- V(S1x8AllTrue, kMipsS1x8AllTrue) \
- V(S1x16AnyTrue, kMipsS1x16AnyTrue) \
- V(S1x16AllTrue, kMipsS1x16AllTrue)
+ V(V32x4AnyTrue, kMipsV32x4AnyTrue) \
+ V(V32x4AllTrue, kMipsV32x4AllTrue) \
+ V(V16x8AnyTrue, kMipsV16x8AnyTrue) \
+ V(V16x8AllTrue, kMipsV16x8AllTrue) \
+ V(V8x16AnyTrue, kMipsV8x16AnyTrue) \
+ V(V8x16AllTrue, kMipsV8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
@@ -2172,6 +2180,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GtU, kMipsI32x4GtU) \
V(I32x4GeU, kMipsI32x4GeU) \
V(I32x4Abs, kMipsI32x4Abs) \
+ V(I32x4BitMask, kMipsI32x4BitMask) \
V(I16x8Add, kMipsI16x8Add) \
V(I16x8AddSaturateS, kMipsI16x8AddSaturateS) \
V(I16x8AddSaturateU, kMipsI16x8AddSaturateU) \
@@ -2194,6 +2203,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8UConvertI32x4, kMipsI16x8UConvertI32x4) \
V(I16x8RoundingAverageU, kMipsI16x8RoundingAverageU) \
V(I16x8Abs, kMipsI16x8Abs) \
+ V(I16x8BitMask, kMipsI16x8BitMask) \
V(I8x16Add, kMipsI8x16Add) \
V(I8x16AddSaturateS, kMipsI8x16AddSaturateS) \
V(I8x16AddSaturateU, kMipsI8x16AddSaturateU) \
@@ -2215,6 +2225,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16SConvertI16x8, kMipsI8x16SConvertI16x8) \
V(I8x16UConvertI16x8, kMipsI8x16UConvertI16x8) \
V(I8x16Abs, kMipsI8x16Abs) \
+ V(I8x16BitMask, kMipsI8x16BitMask) \
V(S128And, kMipsS128And) \
V(S128Or, kMipsS128Or) \
V(S128Xor, kMipsS128Xor) \
@@ -2406,6 +2417,22 @@ void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
Emit(kMipsSeh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitUniqueRRR(this, kMipsF32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitUniqueRRR(this, kMipsF32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitUniqueRRR(this, kMipsF64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitUniqueRRR(this, kMipsF64x2Pmax, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 197167c01cd..9acd6459de5 100644
--- a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -2265,6 +2265,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt8(1));
break;
}
+ case kMips64F64x2Pmin: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = rhs < lhs ? rhs : lhs
+ __ fclt_d(dst, rhs, lhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
+ case kMips64F64x2Pmax: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = lhs < rhs ? rhs : lhs
+ __ fclt_d(dst, lhs, rhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
case kMips64I64x2ReplaceLane: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
@@ -2581,6 +2601,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kMips64F32x4Pmin: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = rhs < lhs ? rhs : lhs
+ __ fclt_w(dst, rhs, lhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
+ case kMips64F32x4Pmax: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register lhs = i.InputSimd128Register(0);
+ Simd128Register rhs = i.InputSimd128Register(1);
+ // dst = lhs < rhs ? rhs : lhs
+ __ fclt_w(dst, lhs, rhs);
+ __ bsel_v(dst, lhs, rhs);
+ break;
+ }
case kMips64I32x4SConvertF32x4: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
@@ -2634,6 +2674,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMips64I32x4BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_w(scratch0, src, 31);
+ __ srli_d(scratch1, scratch0, 31);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ slli_d(scratch1, scratch1, 2);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ copy_u_b(dst, scratch0, 0);
+ break;
+ }
case kMips64I16x8Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
@@ -2820,6 +2875,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMips64I16x8BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_h(scratch0, src, 15);
+ __ srli_w(scratch1, scratch0, 15);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_d(scratch1, scratch0, 30);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ slli_d(scratch1, scratch1, 4);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ copy_u_b(dst, scratch0, 0);
+ break;
+ }
case kMips64I8x16Splat: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
@@ -3006,6 +3078,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kSimd128RegZero);
break;
}
+ case kMips64I8x16BitMask: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register scratch0 = kSimd128RegZero;
+ Simd128Register scratch1 = kSimd128ScratchReg;
+ __ srli_b(scratch0, src, 7);
+ __ srli_h(scratch1, scratch0, 7);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_w(scratch1, scratch0, 14);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ srli_d(scratch1, scratch0, 28);
+ __ or_v(scratch0, scratch0, scratch1);
+ __ shf_w(scratch1, scratch0, 0x0E);
+ __ ilvev_b(scratch0, scratch1, scratch0);
+ __ copy_u_h(dst, scratch0, 0);
+ break;
+ }
case kMips64S128And: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3030,9 +3120,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0));
break;
}
- case kMips64S1x4AnyTrue:
- case kMips64S1x8AnyTrue:
- case kMips64S1x16AnyTrue: {
+ case kMips64V32x4AnyTrue:
+ case kMips64V16x8AnyTrue:
+ case kMips64V8x16AnyTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_false;
@@ -3043,7 +3133,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_false);
break;
}
- case kMips64S1x4AllTrue: {
+ case kMips64V32x4AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3054,7 +3144,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMips64S1x8AllTrue: {
+ case kMips64V16x8AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
@@ -3065,7 +3155,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(&all_true);
break;
}
- case kMips64S1x16AllTrue: {
+ case kMips64V8x16AllTrue: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Register dst = i.OutputRegister();
Label all_true;
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 9303b4572f3..0c42c059ea5 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -203,6 +203,8 @@ namespace compiler {
V(Mips64F64x2Splat) \
V(Mips64F64x2ExtractLane) \
V(Mips64F64x2ReplaceLane) \
+ V(Mips64F64x2Pmin) \
+ V(Mips64F64x2Pmax) \
V(Mips64I64x2Splat) \
V(Mips64I64x2ExtractLane) \
V(Mips64I64x2ReplaceLane) \
@@ -229,6 +231,8 @@ namespace compiler {
V(Mips64F32x4Ne) \
V(Mips64F32x4Lt) \
V(Mips64F32x4Le) \
+ V(Mips64F32x4Pmin) \
+ V(Mips64F32x4Pmax) \
V(Mips64I32x4SConvertF32x4) \
V(Mips64I32x4UConvertF32x4) \
V(Mips64I32x4Neg) \
@@ -237,6 +241,7 @@ namespace compiler {
V(Mips64I32x4GtU) \
V(Mips64I32x4GeU) \
V(Mips64I32x4Abs) \
+ V(Mips64I32x4BitMask) \
V(Mips64I16x8Splat) \
V(Mips64I16x8ExtractLaneU) \
V(Mips64I16x8ExtractLaneS) \
@@ -265,6 +270,7 @@ namespace compiler {
V(Mips64I16x8GeU) \
V(Mips64I16x8RoundingAverageU) \
V(Mips64I16x8Abs) \
+ V(Mips64I16x8BitMask) \
V(Mips64I8x16Splat) \
V(Mips64I8x16ExtractLaneU) \
V(Mips64I8x16ExtractLaneS) \
@@ -292,18 +298,19 @@ namespace compiler {
V(Mips64I8x16GeU) \
V(Mips64I8x16RoundingAverageU) \
V(Mips64I8x16Abs) \
+ V(Mips64I8x16BitMask) \
V(Mips64S128And) \
V(Mips64S128Or) \
V(Mips64S128Xor) \
V(Mips64S128Not) \
V(Mips64S128Select) \
V(Mips64S128AndNot) \
- V(Mips64S1x4AnyTrue) \
- V(Mips64S1x4AllTrue) \
- V(Mips64S1x8AnyTrue) \
- V(Mips64S1x8AllTrue) \
- V(Mips64S1x16AnyTrue) \
- V(Mips64S1x16AllTrue) \
+ V(Mips64V32x4AnyTrue) \
+ V(Mips64V32x4AllTrue) \
+ V(Mips64V16x8AnyTrue) \
+ V(Mips64V16x8AllTrue) \
+ V(Mips64V8x16AnyTrue) \
+ V(Mips64V8x16AllTrue) \
V(Mips64S32x4InterleaveRight) \
V(Mips64S32x4InterleaveLeft) \
V(Mips64S32x4PackEven) \
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 81fc3b2ca9a..2f8a2722015 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -82,6 +82,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F64x2Ne:
case kMips64F64x2Lt:
case kMips64F64x2Le:
+ case kMips64F64x2Pmin:
+ case kMips64F64x2Pmax:
case kMips64I64x2Splat:
case kMips64I64x2ExtractLane:
case kMips64I64x2ReplaceLane:
@@ -113,6 +115,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64F32x4Splat:
case kMips64F32x4Sub:
case kMips64F32x4UConvertI32x4:
+ case kMips64F32x4Pmin:
+ case kMips64F32x4Pmax:
case kMips64F64x2Splat:
case kMips64F64x2ExtractLane:
case kMips64F64x2ReplaceLane:
@@ -171,6 +175,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I16x8UConvertI8x16Low:
case kMips64I16x8RoundingAverageU:
case kMips64I16x8Abs:
+ case kMips64I16x8BitMask:
case kMips64I32x4Add:
case kMips64I32x4AddHoriz:
case kMips64I32x4Eq:
@@ -199,6 +204,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I32x4UConvertI16x8High:
case kMips64I32x4UConvertI16x8Low:
case kMips64I32x4Abs:
+ case kMips64I32x4BitMask:
case kMips64I8x16Add:
case kMips64I8x16AddSaturateS:
case kMips64I8x16AddSaturateU:
@@ -226,6 +232,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64I8x16SubSaturateU:
case kMips64I8x16RoundingAverageU:
case kMips64I8x16Abs:
+ case kMips64I8x16BitMask:
case kMips64Ins:
case kMips64Lsa:
case kMips64MaxD:
@@ -265,12 +272,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64S16x8PackOdd:
case kMips64S16x2Reverse:
case kMips64S16x4Reverse:
- case kMips64S1x16AllTrue:
- case kMips64S1x16AnyTrue:
- case kMips64S1x4AllTrue:
- case kMips64S1x4AnyTrue:
- case kMips64S1x8AllTrue:
- case kMips64S1x8AnyTrue:
+ case kMips64V8x16AllTrue:
+ case kMips64V8x16AnyTrue:
+ case kMips64V32x4AllTrue:
+ case kMips64V32x4AnyTrue:
+ case kMips64V16x8AllTrue:
+ case kMips64V16x8AnyTrue:
case kMips64S32x4InterleaveEven:
case kMips64S32x4InterleaveOdd:
case kMips64S32x4InterleaveLeft:
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 719a916b6a5..2c9c8d439b6 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -163,6 +163,14 @@ static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
g.UseRegister(node->InputAt(1)));
}
+static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Mips64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Mips64OperandGenerator g(selector);
selector->Emit(
@@ -2778,21 +2786,24 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
V(I32x4Abs, kMips64I32x4Abs) \
+ V(I32x4BitMask, kMips64I32x4BitMask) \
V(I16x8Neg, kMips64I16x8Neg) \
V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
V(I16x8Abs, kMips64I16x8Abs) \
+ V(I16x8BitMask, kMips64I16x8BitMask) \
V(I8x16Neg, kMips64I8x16Neg) \
V(I8x16Abs, kMips64I8x16Abs) \
+ V(I8x16BitMask, kMips64I8x16BitMask) \
V(S128Not, kMips64S128Not) \
- V(S1x4AnyTrue, kMips64S1x4AnyTrue) \
- V(S1x4AllTrue, kMips64S1x4AllTrue) \
- V(S1x8AnyTrue, kMips64S1x8AnyTrue) \
- V(S1x8AllTrue, kMips64S1x8AllTrue) \
- V(S1x16AnyTrue, kMips64S1x16AnyTrue) \
- V(S1x16AllTrue, kMips64S1x16AllTrue)
+ V(V32x4AnyTrue, kMips64V32x4AnyTrue) \
+ V(V32x4AllTrue, kMips64V32x4AllTrue) \
+ V(V16x8AnyTrue, kMips64V16x8AnyTrue) \
+ V(V16x8AllTrue, kMips64V16x8AllTrue) \
+ V(V8x16AnyTrue, kMips64V8x16AnyTrue) \
+ V(V8x16AllTrue, kMips64V8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
@@ -3099,6 +3110,22 @@ void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
g.TempImmediate(0));
}
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitUniqueRRR(this, kMips64F32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitUniqueRRR(this, kMips64F32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitUniqueRRR(this, kMips64F64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitUniqueRRR(this, kMips64F64x2Pmax, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index b7fece3f72d..56c5003d2e8 100644
--- a/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -1039,7 +1039,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
int offset = (FLAG_enable_embedded_constant_pool ? 20 : 23) * kInstrSize;
-#if defined(_AIX)
+#if ABI_USES_FUNCTION_DESCRIPTORS
// AIX/PPC64BE Linux uses a function descriptor
int kNumParametersMask = kHasFunctionDescriptorBitMask - 1;
num_parameters = kNumParametersMask & misc_field;
@@ -2164,6 +2164,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7));
__ vsro(dst, dst, kScratchDoubleReg);
// reload
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ mtvsrd(kScratchDoubleReg, r0);
__ vor(dst, dst, kScratchDoubleReg);
break;
@@ -2186,6 +2187,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7));
__ vsro(dst, dst, kScratchDoubleReg);
// reload
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ mtvsrd(kScratchDoubleReg, src);
__ vor(dst, dst, kScratchDoubleReg);
break;
@@ -2208,46 +2210,709 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(dst, dst, Operand(7));
break;
}
+#define SHIFT_TO_CORRECT_LANE(starting_lane_nummber, lane_input, \
+ lane_width_in_bytes, input_register) \
+ int shift_bits = abs(lane_input - starting_lane_nummber) * \
+ lane_width_in_bytes * kBitsPerByte; \
+ if (shift_bits > 0) { \
+ __ li(ip, Operand(shift_bits)); \
+ __ mtvsrd(kScratchDoubleReg, ip); \
+ __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7)); \
+ if (lane_input < starting_lane_nummber) { \
+ __ vsro(kScratchDoubleReg, input_register, kScratchDoubleReg); \
+ } else { \
+ DCHECK(lane_input > starting_lane_nummber); \
+ __ vslo(kScratchDoubleReg, input_register, kScratchDoubleReg); \
+ } \
+ input_register = kScratchDoubleReg; \
+ }
case kPPC_F64x2ExtractLane: {
- __ mfvsrd(kScratchReg, i.InputSimd128Register(0));
+ int32_t lane = 1 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(0, lane, 8, src);
+ __ mfvsrd(kScratchReg, src);
__ MovInt64ToDouble(i.OutputDoubleRegister(), kScratchReg);
break;
}
case kPPC_F32x4ExtractLane: {
- __ mfvsrwz(kScratchReg, i.InputSimd128Register(0));
+ int32_t lane = 3 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(1, lane, 4, src)
+ __ mfvsrwz(kScratchReg, src);
__ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg);
break;
}
case kPPC_I64x2ExtractLane: {
- __ mfvsrd(i.OutputRegister(), i.InputSimd128Register(0));
+ int32_t lane = 1 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(0, lane, 8, src)
+ __ mfvsrd(i.OutputRegister(), src);
break;
}
case kPPC_I32x4ExtractLane: {
- __ mfvsrwz(i.OutputRegister(), i.InputSimd128Register(0));
+ int32_t lane = 3 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(1, lane, 4, src)
+ __ mfvsrwz(i.OutputRegister(), src);
break;
}
case kPPC_I16x8ExtractLaneU: {
- __ mfvsrwz(r0, i.InputSimd128Register(0));
+ int32_t lane = 7 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(2, lane, 2, src)
+ __ mfvsrwz(r0, src);
__ li(ip, Operand(16));
__ srd(i.OutputRegister(), r0, ip);
break;
}
case kPPC_I16x8ExtractLaneS: {
- __ mfvsrwz(kScratchReg, i.InputSimd128Register(0));
+ int32_t lane = 7 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(2, lane, 2, src)
+ __ mfvsrwz(kScratchReg, src);
__ sradi(i.OutputRegister(), kScratchReg, 16);
break;
}
case kPPC_I8x16ExtractLaneU: {
- __ mfvsrwz(r0, i.InputSimd128Register(0));
+ int32_t lane = 15 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(4, lane, 1, src)
+ __ mfvsrwz(r0, src);
__ li(ip, Operand(24));
__ srd(i.OutputRegister(), r0, ip);
break;
}
case kPPC_I8x16ExtractLaneS: {
- __ mfvsrwz(kScratchReg, i.InputSimd128Register(0));
+ int32_t lane = 15 - i.InputInt8(1);
+ Simd128Register src = i.InputSimd128Register(0);
+ SHIFT_TO_CORRECT_LANE(4, lane, 1, src)
+ __ mfvsrwz(kScratchReg, src);
__ sradi(i.OutputRegister(), kScratchReg, 24);
break;
}
+#undef SHIFT_TO_CORRECT_LANE
+#define GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane, \
+ lane_width_in_bytes) \
+ uint64_t mask = 0; \
+ for (int i = 0, j = 0; i <= kSimd128Size - 1; i++) { \
+ mask <<= kBitsPerByte; \
+ if (i >= lane * lane_width_in_bytes && \
+ i < lane * lane_width_in_bytes + lane_width_in_bytes) { \
+ mask |= replacement_value_byte_lane + j; \
+ j++; \
+ } else { \
+ mask |= i; \
+ } \
+ if (i == (kSimd128Size / 2) - 1) { \
+ __ mov(r0, Operand(mask)); \
+ mask = 0; \
+ } else if (i >= kSimd128Size - 1) { \
+ __ mov(ip, Operand(mask)); \
+ } \
+ } \
+ /* Need to maintain 16 byte alignment for lvx */ \
+ __ addi(sp, sp, Operand(-24)); \
+ __ StoreP(ip, MemOperand(sp, 0)); \
+ __ StoreP(r0, MemOperand(sp, 8)); \
+ __ li(r0, Operand(0)); \
+ __ lvx(kScratchDoubleReg, MemOperand(sp, r0)); \
+ __ addi(sp, sp, Operand(24));
+ case kPPC_F64x2ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 1 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 16;
+ constexpr int lane_width_in_bytes = 8;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ MovDoubleToInt64(r0, i.InputDoubleRegister(2));
+ __ mtvsrd(dst, r0);
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_F32x4ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 3 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 20;
+ constexpr int lane_width_in_bytes = 4;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(2));
+ __ mtvsrd(dst, kScratchReg);
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I64x2ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 1 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 16;
+ constexpr int lane_width_in_bytes = 8;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ mtvsrd(dst, i.InputRegister(2));
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I32x4ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 3 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 20;
+ constexpr int lane_width_in_bytes = 4;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ mtvsrd(dst, i.InputRegister(2));
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 7 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 22;
+ constexpr int lane_width_in_bytes = 2;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ mtvsrd(dst, i.InputRegister(2));
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ int32_t lane = 15 - i.InputInt8(1);
+ constexpr int replacement_value_byte_lane = 23;
+ constexpr int lane_width_in_bytes = 1;
+ GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
+ lane_width_in_bytes)
+ __ mtvsrd(dst, i.InputRegister(2));
+ __ vperm(dst, src, dst, kScratchDoubleReg);
+ break;
+ }
+#undef GENERATE_REPLACE_LANE_MASK
+ case kPPC_F64x2Add: {
+ __ xvadddp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Sub: {
+ __ xvsubdp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Mul: {
+ __ xvmuldp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4Add: {
+ __ vaddfp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4AddHoriz: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
+ Simd128Register tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
+ constexpr int shift_bits = 32;
+ // generate first operand
+ __ vpkudum(dst, src1, src0);
+ // generate second operand
+ __ li(ip, Operand(shift_bits));
+ __ mtvsrd(tempFPReg2, ip);
+ __ vspltb(tempFPReg2, tempFPReg2, Operand(7));
+ __ vsro(tempFPReg1, src0, tempFPReg2);
+ __ vsro(tempFPReg2, src1, tempFPReg2);
+ __ vpkudum(kScratchDoubleReg, tempFPReg2, tempFPReg1);
+ // add the operands
+ __ vaddfp(dst, kScratchDoubleReg, dst);
+ break;
+ }
+ case kPPC_F32x4Sub: {
+ __ vsubfp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4Mul: {
+ __ xvmulsp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2Add: {
+ __ vaddudm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2Sub: {
+ __ vsubudm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2Mul: {
+ // Need to maintain 16 byte alignment for stvx and lvx.
+ __ addi(sp, sp, Operand(-40));
+ __ li(r0, Operand(0));
+ __ stvx(i.InputSimd128Register(0), MemOperand(sp, r0));
+ __ li(r0, Operand(16));
+ __ stvx(i.InputSimd128Register(1), MemOperand(sp, r0));
+ for (int i = 0; i < 2; i++) {
+ __ LoadP(r0, MemOperand(sp, kBitsPerByte * i));
+ __ LoadP(ip, MemOperand(sp, (kBitsPerByte * i) + kSimd128Size));
+ __ mulld(r0, r0, ip);
+ __ StoreP(r0, MemOperand(sp, i * kBitsPerByte));
+ }
+ __ li(r0, Operand(0));
+ __ lvx(i.OutputSimd128Register(), MemOperand(sp, r0));
+ __ addi(sp, sp, Operand(40));
+ break;
+ }
+ case kPPC_I32x4Add: {
+ __ vadduwm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4AddHoriz: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vsum2sws(dst, src0, kScratchDoubleReg);
+ __ vsum2sws(kScratchDoubleReg, src1, kScratchDoubleReg);
+ __ vpkudum(dst, kScratchDoubleReg, dst);
+ break;
+ }
+ case kPPC_I32x4Sub: {
+ __ vsubuwm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4Mul: {
+ __ vmuluwm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8Add: {
+ __ vadduhm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8AddHoriz: {
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vsum4shs(dst, src0, kScratchDoubleReg);
+ __ vsum4shs(kScratchDoubleReg, src1, kScratchDoubleReg);
+ __ vpkuwus(dst, kScratchDoubleReg, dst);
+ break;
+ }
+ case kPPC_I16x8Sub: {
+ __ vsubuhm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8Mul: {
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vmladduhm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16Add: {
+ __ vaddubm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16Sub: {
+ __ vsububm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16Mul: {
+ __ vmuleub(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vmuloub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vpkuhum(i.OutputSimd128Register(), kScratchDoubleReg,
+ i.OutputSimd128Register());
+ break;
+ }
+ case kPPC_I64x2MinS: {
+ __ vminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4MinS: {
+ __ vminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2MinU: {
+ __ vminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4MinU: {
+ __ vminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8MinS: {
+ __ vminsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8MinU: {
+ __ vminuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16MinS: {
+ __ vminsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16MinU: {
+ __ vminub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2MaxS: {
+ __ vmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4MaxS: {
+ __ vmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2MaxU: {
+ __ vmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4MaxU: {
+ __ vmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8MaxS: {
+ __ vmaxsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8MaxU: {
+ __ vmaxuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16MaxS: {
+ __ vmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16MaxU: {
+ __ vmaxub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Eq: {
+ __ xvcmpeqdp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Ne: {
+ __ xvcmpeqdp(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_F64x2Le: {
+ __ xvcmpgedp(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F64x2Lt: {
+ __ xvcmpgtdp(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F32x4Eq: {
+ __ xvcmpeqsp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2Eq: {
+ __ vcmpequd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4Eq: {
+ __ vcmpequw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8Eq: {
+ __ vcmpequh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16Eq: {
+ __ vcmpequb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4Ne: {
+ __ xvcmpeqsp(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I64x2Ne: {
+ __ vcmpequd(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I32x4Ne: {
+ __ vcmpequw(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8Ne: {
+ __ vcmpequh(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16Ne: {
+ __ vcmpequb(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vnor(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg);
+ break;
+ }
+ case kPPC_F32x4Lt: {
+ __ xvcmpgtsp(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F32x4Le: {
+ __ xvcmpgesp(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_I64x2GtS: {
+ __ vcmpgtsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4GtS: {
+ __ vcmpgtsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I64x2GeS: {
+ __ vcmpequd(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I32x4GeS: {
+ __ vcmpequw(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I64x2GtU: {
+ __ vcmpgtud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I32x4GtU: {
+ __ vcmpgtuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+
+ break;
+ }
+ case kPPC_I64x2GeU: {
+ __ vcmpequd(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+
+ break;
+ }
+ case kPPC_I32x4GeU: {
+ __ vcmpequw(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8GtS: {
+ __ vcmpgtsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8GeS: {
+ __ vcmpequh(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8GtU: {
+ __ vcmpgtuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I16x8GeU: {
+ __ vcmpequh(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtuh(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16GtS: {
+ __ vcmpgtsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16GeS: {
+ __ vcmpequb(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16GtU: {
+ __ vcmpgtub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_I8x16GeU: {
+ __ vcmpequb(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vcmpgtub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
+ break;
+ }
+#define VECTOR_SHIFT(op) \
+ { \
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(1)); \
+ __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7)); \
+ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ kScratchDoubleReg); \
+ }
+ case kPPC_I64x2Shl: {
+ VECTOR_SHIFT(vsld)
+ break;
+ }
+ case kPPC_I64x2ShrS: {
+ VECTOR_SHIFT(vsrad)
+ break;
+ }
+ case kPPC_I64x2ShrU: {
+ VECTOR_SHIFT(vsrd)
+ break;
+ }
+ case kPPC_I32x4Shl: {
+ VECTOR_SHIFT(vslw)
+ break;
+ }
+ case kPPC_I32x4ShrS: {
+ VECTOR_SHIFT(vsraw)
+ break;
+ }
+ case kPPC_I32x4ShrU: {
+ VECTOR_SHIFT(vsrw)
+ break;
+ }
+ case kPPC_I16x8Shl: {
+ VECTOR_SHIFT(vslh)
+ break;
+ }
+ case kPPC_I16x8ShrS: {
+ VECTOR_SHIFT(vsrah)
+ break;
+ }
+ case kPPC_I16x8ShrU: {
+ VECTOR_SHIFT(vsrh)
+ break;
+ }
+ case kPPC_I8x16Shl: {
+ VECTOR_SHIFT(vslb)
+ break;
+ }
+ case kPPC_I8x16ShrS: {
+ VECTOR_SHIFT(vsrab)
+ break;
+ }
+ case kPPC_I8x16ShrU: {
+ VECTOR_SHIFT(vsrb)
+ break;
+ }
+#undef VECTOR_SHIFT
+ case kPPC_S128And: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(1);
+ __ vand(dst, i.InputSimd128Register(0), src);
+ break;
+ }
+ case kPPC_S128Or: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(1);
+ __ vor(dst, i.InputSimd128Register(0), src);
+ break;
+ }
+ case kPPC_S128Xor: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(1);
+ __ vxor(dst, i.InputSimd128Register(0), src);
+ break;
+ }
+ case kPPC_S128Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vxor(dst, dst, dst);
+ break;
+ }
+ case kPPC_S128Not: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(1);
+ __ vnor(dst, i.InputSimd128Register(0), src);
+ break;
+ }
+ case kPPC_S128Select: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register mask = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register src2 = i.InputSimd128Register(2);
+ __ vsel(dst, src2, src1, mask);
+ break;
+ }
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
break;
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 4f6aeced6da..fdffc5f0963 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -192,18 +192,111 @@ namespace compiler {
V(PPC_AtomicXorInt64) \
V(PPC_F64x2Splat) \
V(PPC_F64x2ExtractLane) \
+ V(PPC_F64x2ReplaceLane) \
+ V(PPC_F64x2Add) \
+ V(PPC_F64x2Sub) \
+ V(PPC_F64x2Mul) \
+ V(PPC_F64x2Eq) \
+ V(PPC_F64x2Ne) \
+ V(PPC_F64x2Le) \
+ V(PPC_F64x2Lt) \
V(PPC_F32x4Splat) \
V(PPC_F32x4ExtractLane) \
+ V(PPC_F32x4ReplaceLane) \
+ V(PPC_F32x4Add) \
+ V(PPC_F32x4AddHoriz) \
+ V(PPC_F32x4Sub) \
+ V(PPC_F32x4Mul) \
+ V(PPC_F32x4Eq) \
+ V(PPC_F32x4Ne) \
+ V(PPC_F32x4Lt) \
+ V(PPC_F32x4Le) \
V(PPC_I64x2Splat) \
V(PPC_I64x2ExtractLane) \
+ V(PPC_I64x2ReplaceLane) \
+ V(PPC_I64x2Add) \
+ V(PPC_I64x2Sub) \
+ V(PPC_I64x2Mul) \
+ V(PPC_I64x2MinS) \
+ V(PPC_I64x2MinU) \
+ V(PPC_I64x2MaxS) \
+ V(PPC_I64x2MaxU) \
+ V(PPC_I64x2Eq) \
+ V(PPC_I64x2Ne) \
+ V(PPC_I64x2GtS) \
+ V(PPC_I64x2GtU) \
+ V(PPC_I64x2GeU) \
+ V(PPC_I64x2GeS) \
+ V(PPC_I64x2Shl) \
+ V(PPC_I64x2ShrS) \
+ V(PPC_I64x2ShrU) \
V(PPC_I32x4Splat) \
V(PPC_I32x4ExtractLane) \
+ V(PPC_I32x4ReplaceLane) \
+ V(PPC_I32x4Add) \
+ V(PPC_I32x4AddHoriz) \
+ V(PPC_I32x4Sub) \
+ V(PPC_I32x4Mul) \
+ V(PPC_I32x4MinS) \
+ V(PPC_I32x4MinU) \
+ V(PPC_I32x4MaxS) \
+ V(PPC_I32x4MaxU) \
+ V(PPC_I32x4Eq) \
+ V(PPC_I32x4Ne) \
+ V(PPC_I32x4GtS) \
+ V(PPC_I32x4GeS) \
+ V(PPC_I32x4GtU) \
+ V(PPC_I32x4GeU) \
+ V(PPC_I32x4Shl) \
+ V(PPC_I32x4ShrS) \
+ V(PPC_I32x4ShrU) \
V(PPC_I16x8Splat) \
V(PPC_I16x8ExtractLaneU) \
V(PPC_I16x8ExtractLaneS) \
+ V(PPC_I16x8ReplaceLane) \
+ V(PPC_I16x8Add) \
+ V(PPC_I16x8AddHoriz) \
+ V(PPC_I16x8Sub) \
+ V(PPC_I16x8Mul) \
+ V(PPC_I16x8MinS) \
+ V(PPC_I16x8MinU) \
+ V(PPC_I16x8MaxS) \
+ V(PPC_I16x8MaxU) \
+ V(PPC_I16x8Eq) \
+ V(PPC_I16x8Ne) \
+ V(PPC_I16x8GtS) \
+ V(PPC_I16x8GeS) \
+ V(PPC_I16x8GtU) \
+ V(PPC_I16x8GeU) \
+ V(PPC_I16x8Shl) \
+ V(PPC_I16x8ShrS) \
+ V(PPC_I16x8ShrU) \
V(PPC_I8x16Splat) \
V(PPC_I8x16ExtractLaneU) \
V(PPC_I8x16ExtractLaneS) \
+ V(PPC_I8x16ReplaceLane) \
+ V(PPC_I8x16Add) \
+ V(PPC_I8x16Sub) \
+ V(PPC_I8x16Mul) \
+ V(PPC_I8x16MinS) \
+ V(PPC_I8x16MinU) \
+ V(PPC_I8x16MaxS) \
+ V(PPC_I8x16MaxU) \
+ V(PPC_I8x16Eq) \
+ V(PPC_I8x16Ne) \
+ V(PPC_I8x16GtS) \
+ V(PPC_I8x16GeS) \
+ V(PPC_I8x16GtU) \
+ V(PPC_I8x16GeU) \
+ V(PPC_I8x16Shl) \
+ V(PPC_I8x16ShrS) \
+ V(PPC_I8x16ShrU) \
+ V(PPC_S128And) \
+ V(PPC_S128Or) \
+ V(PPC_S128Xor) \
+ V(PPC_S128Zero) \
+ V(PPC_S128Not) \
+ V(PPC_S128Select) \
V(PPC_StoreCompressTagged) \
V(PPC_LoadDecompressTaggedSigned) \
V(PPC_LoadDecompressTaggedPointer) \
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index 68d0aaedc4b..b1d124432ef 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -115,18 +115,111 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_CompressAny:
case kPPC_F64x2Splat:
case kPPC_F64x2ExtractLane:
+ case kPPC_F64x2ReplaceLane:
+ case kPPC_F64x2Add:
+ case kPPC_F64x2Sub:
+ case kPPC_F64x2Mul:
+ case kPPC_F64x2Eq:
+ case kPPC_F64x2Ne:
+ case kPPC_F64x2Le:
+ case kPPC_F64x2Lt:
case kPPC_F32x4Splat:
case kPPC_F32x4ExtractLane:
+ case kPPC_F32x4ReplaceLane:
+ case kPPC_F32x4Add:
+ case kPPC_F32x4AddHoriz:
+ case kPPC_F32x4Sub:
+ case kPPC_F32x4Mul:
+ case kPPC_F32x4Eq:
+ case kPPC_F32x4Ne:
+ case kPPC_F32x4Lt:
+ case kPPC_F32x4Le:
case kPPC_I64x2Splat:
case kPPC_I64x2ExtractLane:
+ case kPPC_I64x2ReplaceLane:
+ case kPPC_I64x2Add:
+ case kPPC_I64x2Sub:
+ case kPPC_I64x2Mul:
+ case kPPC_I64x2MinS:
+ case kPPC_I64x2MinU:
+ case kPPC_I64x2MaxS:
+ case kPPC_I64x2MaxU:
+ case kPPC_I64x2Eq:
+ case kPPC_I64x2Ne:
+ case kPPC_I64x2GtS:
+ case kPPC_I64x2GtU:
+ case kPPC_I64x2GeU:
+ case kPPC_I64x2GeS:
+ case kPPC_I64x2Shl:
+ case kPPC_I64x2ShrS:
+ case kPPC_I64x2ShrU:
case kPPC_I32x4Splat:
case kPPC_I32x4ExtractLane:
+ case kPPC_I32x4ReplaceLane:
+ case kPPC_I32x4Add:
+ case kPPC_I32x4AddHoriz:
+ case kPPC_I32x4Sub:
+ case kPPC_I32x4Mul:
+ case kPPC_I32x4MinS:
+ case kPPC_I32x4MinU:
+ case kPPC_I32x4MaxS:
+ case kPPC_I32x4MaxU:
+ case kPPC_I32x4Eq:
+ case kPPC_I32x4Ne:
+ case kPPC_I32x4GtS:
+ case kPPC_I32x4GeS:
+ case kPPC_I32x4GtU:
+ case kPPC_I32x4GeU:
+ case kPPC_I32x4Shl:
+ case kPPC_I32x4ShrS:
+ case kPPC_I32x4ShrU:
case kPPC_I16x8Splat:
case kPPC_I16x8ExtractLaneU:
case kPPC_I16x8ExtractLaneS:
+ case kPPC_I16x8ReplaceLane:
+ case kPPC_I16x8Add:
+ case kPPC_I16x8AddHoriz:
+ case kPPC_I16x8Sub:
+ case kPPC_I16x8Mul:
+ case kPPC_I16x8MinS:
+ case kPPC_I16x8MinU:
+ case kPPC_I16x8MaxS:
+ case kPPC_I16x8MaxU:
+ case kPPC_I16x8Eq:
+ case kPPC_I16x8Ne:
+ case kPPC_I16x8GtS:
+ case kPPC_I16x8GeS:
+ case kPPC_I16x8GtU:
+ case kPPC_I16x8GeU:
+ case kPPC_I16x8Shl:
+ case kPPC_I16x8ShrS:
+ case kPPC_I16x8ShrU:
case kPPC_I8x16Splat:
case kPPC_I8x16ExtractLaneU:
case kPPC_I8x16ExtractLaneS:
+ case kPPC_I8x16ReplaceLane:
+ case kPPC_I8x16Add:
+ case kPPC_I8x16Sub:
+ case kPPC_I8x16Mul:
+ case kPPC_I8x16MinS:
+ case kPPC_I8x16MinU:
+ case kPPC_I8x16MaxS:
+ case kPPC_I8x16MaxU:
+ case kPPC_I8x16Eq:
+ case kPPC_I8x16Ne:
+ case kPPC_I8x16GtS:
+ case kPPC_I8x16GeS:
+ case kPPC_I8x16GtU:
+ case kPPC_I8x16GeU:
+ case kPPC_I8x16Shl:
+ case kPPC_I8x16ShrS:
+ case kPPC_I8x16ShrU:
+ case kPPC_S128And:
+ case kPPC_S128Or:
+ case kPPC_S128Xor:
+ case kPPC_S128Zero:
+ case kPPC_S128Not:
+ case kPPC_S128Select:
return kNoOpcodeFlags;
case kPPC_LoadWordS8:
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 1598fbad041..d5ec475a808 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -2127,6 +2127,86 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8) \
V(I8x16)
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add) \
+ V(F64x2Sub) \
+ V(F64x2Mul) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Le) \
+ V(F64x2Lt) \
+ V(F32x4Add) \
+ V(F32x4AddHoriz) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Mul) \
+ V(I32x4Add) \
+ V(I32x4AddHoriz) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4MinS) \
+ V(I32x4MinU) \
+ V(I32x4MaxS) \
+ V(I32x4MaxU) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4GtU) \
+ V(I32x4GeU) \
+ V(I16x8Add) \
+ V(I16x8AddHoriz) \
+ V(I16x8Sub) \
+ V(I16x8Mul) \
+ V(I16x8MinS) \
+ V(I16x8MinU) \
+ V(I16x8MaxS) \
+ V(I16x8MaxU) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8GtS) \
+ V(I16x8GeS) \
+ V(I16x8GtU) \
+ V(I16x8GeU) \
+ V(I8x16Add) \
+ V(I8x16Sub) \
+ V(I8x16Mul) \
+ V(I8x16MinS) \
+ V(I8x16MinU) \
+ V(I8x16MaxS) \
+ V(I8x16MaxU) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16GtS) \
+ V(I8x16GeS) \
+ V(I8x16GtU) \
+ V(I8x16GeU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
+
+#define SIMD_UNOP_LIST(V) V(S128Not)
+
+#define SIMD_SHIFT_LIST(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
#define SIMD_VISIT_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
PPCOperandGenerator g(this); \
@@ -2135,7 +2215,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
}
SIMD_TYPES(SIMD_VISIT_SPLAT)
#undef SIMD_VISIT_SPLAT
-#undef SIMD_TYPES
#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
@@ -2153,72 +2232,74 @@ SIMD_VISIT_EXTRACT_LANE(I8x16, U)
SIMD_VISIT_EXTRACT_LANE(I8x16, S)
#undef SIMD_VISIT_EXTRACT_LANE
-void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ PPCOperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
+ Emit(kPPC_##Type##ReplaceLane, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), g.UseImmediate(lane), \
+ g.UseUniqueRegister(node->InputAt(1))); \
+ }
+SIMD_TYPES(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_BINOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ PPCOperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempSimd128Register(), \
+ g.TempSimd128Register()}; \
+ Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+#undef SIMD_BINOP_LIST
+
+#define SIMD_VISIT_UNOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ PPCOperandGenerator g(this); \
+ Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0))); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+#undef SIMD_UNOP_LIST
+
+#define SIMD_VISIT_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ PPCOperandGenerator g(this); \
+ Emit(kPPC_##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1))); \
+ }
+SIMD_SHIFT_LIST(SIMD_VISIT_SHIFT)
+#undef SIMD_VISIT_SHIFT
+#undef SIMD_SHIFT_LIST
+#undef SIMD_TYPES
-void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS128Zero(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_S128Zero, g.DefineAsRegister(node));
+}
-void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS128Select(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_S128Select, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)));
+}
void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
@@ -2227,20 +2308,8 @@ void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8RoundingAverageU(Node* node) {
UNIMPLEMENTED();
}
@@ -2251,32 +2320,14 @@ void InstructionSelector::VisitI8x16RoundingAverageU(Node* node) {
void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
@@ -2285,36 +2336,8 @@ void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitS128AndNot(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
Node* node) {
@@ -2338,12 +2361,6 @@ void InstructionSelector::EmitPrepareResults(
}
}
-void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4Sqrt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); }
@@ -2352,8 +2369,6 @@ void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
@@ -2364,10 +2379,6 @@ void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
@@ -2431,68 +2442,32 @@ void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV32x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x8AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV32x4AllTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV16x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV16x8AllTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV8x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitV8x16AllTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS8x16Swizzle(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Sqrt(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Mul(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
diff --git a/chromium/v8/src/compiler/backend/register-allocator.cc b/chromium/v8/src/compiler/backend/register-allocator.cc
index 8b74ef68b14..aab47722044 100644
--- a/chromium/v8/src/compiler/backend/register-allocator.cc
+++ b/chromium/v8/src/compiler/backend/register-allocator.cc
@@ -391,8 +391,8 @@ LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
next_(nullptr),
current_interval_(nullptr),
last_processed_use_(nullptr),
- current_hint_position_(nullptr),
- splitting_pointer_(nullptr) {
+ splitting_pointer_(nullptr),
+ current_hint_position_(nullptr) {
DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
RepresentationField::encode(rep) |
@@ -473,11 +473,41 @@ RegisterKind LiveRange::kind() const {
return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS;
}
-UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
- for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
- if (pos->HintRegister(register_index)) return pos;
+UsePosition* LiveRange::FirstHintPosition(int* register_index) {
+ if (!first_pos_) return nullptr;
+ if (current_hint_position_) {
+ if (current_hint_position_->pos() < first_pos_->pos()) {
+ current_hint_position_ = first_pos_;
+ }
+ if (current_hint_position_->pos() > End()) {
+ current_hint_position_ = nullptr;
+ }
}
- return nullptr;
+ bool needs_revisit = false;
+ UsePosition* pos = current_hint_position_;
+ for (; pos != nullptr; pos = pos->next()) {
+ if (pos->HintRegister(register_index)) {
+ break;
+ }
+ // Phi and use position hints can be assigned during allocation which
+ // would invalidate the cached hint position. Make sure we revisit them.
+ needs_revisit = needs_revisit ||
+ pos->hint_type() == UsePositionHintType::kPhi ||
+ pos->hint_type() == UsePositionHintType::kUsePos;
+ }
+ if (!needs_revisit) {
+ current_hint_position_ = pos;
+ }
+#ifdef DEBUG
+ UsePosition* pos_check = first_pos_;
+ for (; pos_check != nullptr; pos_check = pos_check->next()) {
+ if (pos_check->HasHint()) {
+ break;
+ }
+ }
+ CHECK_EQ(pos, pos_check);
+#endif
+ return pos;
}
UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
@@ -684,6 +714,7 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
first_pos_ = nullptr;
}
result->first_pos_ = use_after;
+ result->current_hint_position_ = current_hint_position_;
// Discard cached iteration state. It might be pointing
// to the use that no longer belongs to this live range.
@@ -693,6 +724,7 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
if (connect_hints == ConnectHints && use_before != nullptr &&
use_after != nullptr) {
use_after->SetHint(use_before);
+ result->current_hint_position_ = use_after;
}
#ifdef DEBUG
VerifyChildStructure();
@@ -2660,6 +2692,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
pos->set_type(new_type, true);
}
}
+ range->ResetCurrentHintPosition();
}
for (auto preassigned : data()->preassigned_slot_ranges()) {
TopLevelLiveRange* range = preassigned.first;
@@ -3493,7 +3526,7 @@ void LinearScanAllocator::ComputeStateFromManyPredecessors(
// Choose the live ranges from the majority.
const size_t majority =
(current_block->PredecessorCount() + 2 - deferred_blocks) / 2;
- bool taken_registers[RegisterConfiguration::kMaxRegisters] = {0};
+ bool taken_registers[RegisterConfiguration::kMaxRegisters] = {false};
auto assign_to_live = [this, counts, majority](
std::function<bool(TopLevelLiveRange*)> filter,
RangeWithRegisterSet* to_be_live,
diff --git a/chromium/v8/src/compiler/backend/register-allocator.h b/chromium/v8/src/compiler/backend/register-allocator.h
index f890bd868b7..85a9cf12170 100644
--- a/chromium/v8/src/compiler/backend/register-allocator.h
+++ b/chromium/v8/src/compiler/backend/register-allocator.h
@@ -618,14 +618,14 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
LiveRange* SplitAt(LifetimePosition position, Zone* zone);
// Returns nullptr when no register is hinted, otherwise sets register_index.
- UsePosition* FirstHintPosition(int* register_index) const;
- UsePosition* FirstHintPosition() const {
+ // Uses {current_hint_position_} as a cache, and tries to update it.
+ UsePosition* FirstHintPosition(int* register_index);
+ UsePosition* FirstHintPosition() {
int register_index;
return FirstHintPosition(&register_index);
}
UsePosition* current_hint_position() const {
- DCHECK(current_hint_position_ == FirstHintPosition());
return current_hint_position_;
}
@@ -656,6 +656,7 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
const InstructionOperand& spill_op);
void SetUseHints(int register_index);
void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
+ void ResetCurrentHintPosition() { current_hint_position_ = first_pos_; }
void Print(const RegisterConfiguration* config, bool with_children) const;
void Print(bool with_children) const;
@@ -701,10 +702,10 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
mutable UseInterval* current_interval_;
// This is used as a cache, it doesn't affect correctness.
mutable UsePosition* last_processed_use_;
- // This is used as a cache, it's invalid outside of BuildLiveRanges.
- mutable UsePosition* current_hint_position_;
// Cache the last position splintering stopped at.
mutable UsePosition* splitting_pointer_;
+ // This is used as a cache in BuildLiveRanges and during register allocation.
+ UsePosition* current_hint_position_;
LiveRangeBundle* bundle_ = nullptr;
// Next interval start, relative to the current linear scan position.
LifetimePosition next_start_;
diff --git a/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc b/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc
index cb79373b425..bef8e7c15aa 100644
--- a/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -3853,10 +3853,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
// vector boolean unops
- case kS390_S1x2AnyTrue:
- case kS390_S1x4AnyTrue:
- case kS390_S1x8AnyTrue:
- case kS390_S1x16AnyTrue: {
+ case kS390_V64x2AnyTrue:
+ case kS390_V32x4AnyTrue:
+ case kS390_V16x8AnyTrue:
+ case kS390_V8x16AnyTrue: {
Simd128Register src = i.InputSimd128Register(0);
Register dst = i.OutputRegister();
Register temp = i.TempRegister(0);
@@ -3879,19 +3879,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vtm(kScratchDoubleReg, kScratchDoubleReg, Condition(0), Condition(0), \
Condition(0)); \
__ locgr(Condition(8), dst, temp);
- case kS390_S1x2AllTrue: {
+ case kS390_V64x2AllTrue: {
SIMD_ALL_TRUE(3)
break;
}
- case kS390_S1x4AllTrue: {
+ case kS390_V32x4AllTrue: {
SIMD_ALL_TRUE(2)
break;
}
- case kS390_S1x8AllTrue: {
+ case kS390_V16x8AllTrue: {
SIMD_ALL_TRUE(1)
break;
}
- case kS390_S1x16AllTrue: {
+ case kS390_V8x16AllTrue: {
SIMD_ALL_TRUE(0)
break;
}
@@ -4154,10 +4154,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
for (int i = 0, j = 0; i < 2; i++, j = +2) {
#ifdef V8_TARGET_BIG_ENDIAN
__ lgfi(i < 1 ? ip : r0, Operand(k8x16_indices[j + 1]));
- __ aih(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
+ __ iihf(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
#else
__ lgfi(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
- __ aih(i < 1 ? ip : r0, Operand(k8x16_indices[j + 1]));
+ __ iihf(i < 1 ? ip : r0, Operand(k8x16_indices[j + 1]));
#endif
}
__ vlvgp(kScratchDoubleReg, ip, r0);
@@ -4185,6 +4185,119 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
break;
}
+ case kS390_I32x4BitMask: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ lgfi(kScratchReg, Operand(0x204060));
+ __ iihf(kScratchReg, Operand(0x80808080)); // Zeroing the high bits.
+#else
+ __ lgfi(kScratchReg, Operand(0x80808080));
+ __ iihf(kScratchReg, Operand(0x60402000));
+#endif
+ __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
+ __ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(0));
+ __ vlgv(i.OutputRegister(), kScratchDoubleReg, MemOperand(r0, 7),
+ Condition(0));
+ break;
+ }
+ case kS390_I16x8BitMask: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ lgfi(kScratchReg, Operand(0x40506070));
+ __ iihf(kScratchReg, Operand(0x102030));
+#else
+ __ lgfi(kScratchReg, Operand(0x30201000));
+ __ iihf(kScratchReg, Operand(0x70605040));
+#endif
+ __ vlvg(kScratchDoubleReg, kScratchReg, MemOperand(r0, 1), Condition(3));
+ __ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(0));
+ __ vlgv(i.OutputRegister(), kScratchDoubleReg, MemOperand(r0, 7),
+ Condition(0));
+ break;
+ }
+ case kS390_I8x16BitMask: {
+#ifdef V8_TARGET_BIG_ENDIAN
+ __ lgfi(r0, Operand(0x60687078));
+ __ iihf(r0, Operand(0x40485058));
+ __ lgfi(ip, Operand(0x20283038));
+ __ iihf(ip, Operand(0x81018));
+#else
+ __ lgfi(ip, Operand(0x58504840));
+ __ iihf(ip, Operand(0x78706860));
+ __ lgfi(r0, Operand(0x18100800));
+ __ iihf(r0, Operand(0x38302820));
+#endif
+ __ vlvgp(kScratchDoubleReg, ip, r0);
+ __ vbperm(kScratchDoubleReg, i.InputSimd128Register(0), kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(0));
+ __ vlgv(i.OutputRegister(), kScratchDoubleReg, MemOperand(r0, 3),
+ Condition(1));
+ break;
+ }
+ case kS390_F32x4Pmin: {
+ __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(3), Condition(0),
+ Condition(2));
+ break;
+ }
+ case kS390_F32x4Pmax: {
+ __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(3), Condition(0),
+ Condition(2));
+ break;
+ }
+ case kS390_F64x2Pmin: {
+ __ vfmin(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(3), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_F64x2Pmax: {
+ __ vfmax(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), Condition(3), Condition(0),
+ Condition(3));
+ break;
+ }
+ case kS390_F64x2Ceil: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(6),
+ Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F64x2Floor: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(7),
+ Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F64x2Trunc: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(5),
+ Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F64x2NearestInt: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(4),
+ Condition(0), Condition(3));
+ break;
+ }
+ case kS390_F32x4Ceil: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(6),
+ Condition(0), Condition(2));
+ break;
+ }
+ case kS390_F32x4Floor: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(7),
+ Condition(0), Condition(2));
+ break;
+ }
+ case kS390_F32x4Trunc: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(5),
+ Condition(0), Condition(2));
+ break;
+ }
+ case kS390_F32x4NearestInt: {
+ __ vfi(i.OutputSimd128Register(), i.InputSimd128Register(0), Condition(4),
+ Condition(0), Condition(2));
+ break;
+ }
case kS390_StoreCompressTagged: {
CHECK(!instr->HasOutput());
size_t index = 0;
diff --git a/chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h b/chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h
index 6101b22166c..f588e854265 100644
--- a/chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/chromium/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -215,6 +215,12 @@ namespace compiler {
V(S390_F64x2ExtractLane) \
V(S390_F64x2Qfma) \
V(S390_F64x2Qfms) \
+ V(S390_F64x2Pmin) \
+ V(S390_F64x2Pmax) \
+ V(S390_F64x2Ceil) \
+ V(S390_F64x2Floor) \
+ V(S390_F64x2Trunc) \
+ V(S390_F64x2NearestInt) \
V(S390_F32x4Splat) \
V(S390_F32x4ExtractLane) \
V(S390_F32x4ReplaceLane) \
@@ -238,6 +244,12 @@ namespace compiler {
V(S390_F32x4Max) \
V(S390_F32x4Qfma) \
V(S390_F32x4Qfms) \
+ V(S390_F32x4Pmin) \
+ V(S390_F32x4Pmax) \
+ V(S390_F32x4Ceil) \
+ V(S390_F32x4Floor) \
+ V(S390_F32x4Trunc) \
+ V(S390_F32x4NearestInt) \
V(S390_I64x2Neg) \
V(S390_I64x2Add) \
V(S390_I64x2Sub) \
@@ -286,6 +298,7 @@ namespace compiler {
V(S390_I32x4UConvertI16x8Low) \
V(S390_I32x4UConvertI16x8High) \
V(S390_I32x4Abs) \
+ V(S390_I32x4BitMask) \
V(S390_I16x8Splat) \
V(S390_I16x8ExtractLaneU) \
V(S390_I16x8ExtractLaneS) \
@@ -320,6 +333,7 @@ namespace compiler {
V(S390_I16x8SubSaturateU) \
V(S390_I16x8RoundingAverageU) \
V(S390_I16x8Abs) \
+ V(S390_I16x8BitMask) \
V(S390_I8x16Splat) \
V(S390_I8x16ExtractLaneU) \
V(S390_I8x16ExtractLaneS) \
@@ -349,16 +363,17 @@ namespace compiler {
V(S390_I8x16SubSaturateU) \
V(S390_I8x16RoundingAverageU) \
V(S390_I8x16Abs) \
+ V(S390_I8x16BitMask) \
V(S390_S8x16Shuffle) \
V(S390_S8x16Swizzle) \
- V(S390_S1x2AnyTrue) \
- V(S390_S1x4AnyTrue) \
- V(S390_S1x8AnyTrue) \
- V(S390_S1x16AnyTrue) \
- V(S390_S1x2AllTrue) \
- V(S390_S1x4AllTrue) \
- V(S390_S1x8AllTrue) \
- V(S390_S1x16AllTrue) \
+ V(S390_V64x2AnyTrue) \
+ V(S390_V32x4AnyTrue) \
+ V(S390_V16x8AnyTrue) \
+ V(S390_V8x16AnyTrue) \
+ V(S390_V64x2AllTrue) \
+ V(S390_V32x4AllTrue) \
+ V(S390_V16x8AllTrue) \
+ V(S390_V8x16AllTrue) \
V(S390_S128And) \
V(S390_S128Or) \
V(S390_S128Xor) \
diff --git a/chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index 502ce229f50..775590a863d 100644
--- a/chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/chromium/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -161,6 +161,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_F64x2ExtractLane:
case kS390_F64x2Qfma:
case kS390_F64x2Qfms:
+ case kS390_F64x2Pmin:
+ case kS390_F64x2Pmax:
+ case kS390_F64x2Ceil:
+ case kS390_F64x2Floor:
+ case kS390_F64x2Trunc:
+ case kS390_F64x2NearestInt:
case kS390_F32x4Splat:
case kS390_F32x4ExtractLane:
case kS390_F32x4ReplaceLane:
@@ -184,6 +190,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_F32x4Max:
case kS390_F32x4Qfma:
case kS390_F32x4Qfms:
+ case kS390_F32x4Pmin:
+ case kS390_F32x4Pmax:
+ case kS390_F32x4Ceil:
+ case kS390_F32x4Floor:
+ case kS390_F32x4Trunc:
+ case kS390_F32x4NearestInt:
case kS390_I64x2Neg:
case kS390_I64x2Add:
case kS390_I64x2Sub:
@@ -232,6 +244,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I32x4UConvertI16x8Low:
case kS390_I32x4UConvertI16x8High:
case kS390_I32x4Abs:
+ case kS390_I32x4BitMask:
case kS390_I16x8Splat:
case kS390_I16x8ExtractLaneU:
case kS390_I16x8ExtractLaneS:
@@ -266,6 +279,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I16x8SubSaturateU:
case kS390_I16x8RoundingAverageU:
case kS390_I16x8Abs:
+ case kS390_I16x8BitMask:
case kS390_I8x16Splat:
case kS390_I8x16ExtractLaneU:
case kS390_I8x16ExtractLaneS:
@@ -295,16 +309,17 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I8x16SubSaturateU:
case kS390_I8x16RoundingAverageU:
case kS390_I8x16Abs:
+ case kS390_I8x16BitMask:
case kS390_S8x16Shuffle:
case kS390_S8x16Swizzle:
- case kS390_S1x2AnyTrue:
- case kS390_S1x4AnyTrue:
- case kS390_S1x8AnyTrue:
- case kS390_S1x16AnyTrue:
- case kS390_S1x2AllTrue:
- case kS390_S1x4AllTrue:
- case kS390_S1x8AllTrue:
- case kS390_S1x16AllTrue:
+ case kS390_V64x2AnyTrue:
+ case kS390_V32x4AnyTrue:
+ case kS390_V16x8AnyTrue:
+ case kS390_V8x16AnyTrue:
+ case kS390_V64x2AllTrue:
+ case kS390_V32x4AllTrue:
+ case kS390_V16x8AllTrue:
+ case kS390_V8x16AllTrue:
case kS390_S128And:
case kS390_S128Or:
case kS390_S128Xor:
diff --git a/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index 515e8dd127b..39089f346ed 100644
--- a/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -2635,11 +2635,19 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(F64x2Abs) \
V(F64x2Neg) \
V(F64x2Sqrt) \
+ V(F64x2Ceil) \
+ V(F64x2Floor) \
+ V(F64x2Trunc) \
+ V(F64x2NearestInt) \
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(F32x4Sqrt) \
+ V(F32x4Ceil) \
+ V(F32x4Floor) \
+ V(F32x4Trunc) \
+ V(F32x4NearestInt) \
V(I64x2Neg) \
V(I16x8Abs) \
V(I32x4Neg) \
@@ -2672,14 +2680,14 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
V(I8x16ShrU)
#define SIMD_BOOL_LIST(V) \
- V(S1x2AnyTrue) \
- V(S1x4AnyTrue) \
- V(S1x8AnyTrue) \
- V(S1x16AnyTrue) \
- V(S1x2AllTrue) \
- V(S1x4AllTrue) \
- V(S1x8AllTrue) \
- V(S1x16AllTrue)
+ V(V64x2AnyTrue) \
+ V(V32x4AnyTrue) \
+ V(V16x8AnyTrue) \
+ V(V8x16AnyTrue) \
+ V(V64x2AllTrue) \
+ V(V32x4AllTrue) \
+ V(V16x8AllTrue) \
+ V(V8x16AllTrue)
#define SIMD_CONVERSION_LIST(V) \
V(I32x4SConvertF32x4) \
@@ -2794,6 +2802,29 @@ SIMD_VISIT_QFMOP(F64x2Qfms)
SIMD_VISIT_QFMOP(F32x4Qfma)
SIMD_VISIT_QFMOP(F32x4Qfms)
#undef SIMD_VISIT_QFMOP
+
+#define SIMD_VISIT_BITMASK(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ S390OperandGenerator g(this); \
+ Emit(kS390_##Opcode, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0))); \
+ }
+SIMD_VISIT_BITMASK(I8x16BitMask)
+SIMD_VISIT_BITMASK(I16x8BitMask)
+SIMD_VISIT_BITMASK(I32x4BitMask)
+#undef SIMD_VISIT_BITMASK
+
+#define SIMD_VISIT_PMIN_MAX(Type) \
+ void InstructionSelector::Visit##Type(Node* node) { \
+ S390OperandGenerator g(this); \
+ Emit(kS390_##Type, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
+ }
+SIMD_VISIT_PMIN_MAX(F64x2Pmin)
+SIMD_VISIT_PMIN_MAX(F32x4Pmin)
+SIMD_VISIT_PMIN_MAX(F64x2Pmax)
+SIMD_VISIT_PMIN_MAX(F32x4Pmax)
+#undef SIMD_VISIT_PMIN_MAX
#undef SIMD_TYPES
void InstructionSelector::VisitS8x16Shuffle(Node* node) {
diff --git a/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc b/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc
index 4f99ad49ba8..110a478c543 100644
--- a/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -194,6 +194,94 @@ class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
XMMRegister const result_;
};
+class OutOfLineF32x4Min final : public OutOfLineCode {
+ public:
+ OutOfLineF32x4Min(CodeGenerator* gen, XMMRegister result, XMMRegister error)
+ : OutOfLineCode(gen), result_(result), error_(error) {}
+
+ void Generate() final {
+ // |result| is the partial result, |kScratchDoubleReg| is the error.
+ // propagate -0's and NaNs (possibly non-canonical) from the error.
+ __ Orps(error_, result_);
+ // Canonicalize NaNs by quieting and clearing the payload.
+ __ Cmpps(result_, error_, int8_t{3});
+ __ Orps(error_, result_);
+ __ Psrld(result_, byte{10});
+ __ Andnps(result_, error_);
+ }
+
+ private:
+ XMMRegister const result_;
+ XMMRegister const error_;
+};
+
+class OutOfLineF64x2Min final : public OutOfLineCode {
+ public:
+ OutOfLineF64x2Min(CodeGenerator* gen, XMMRegister result, XMMRegister error)
+ : OutOfLineCode(gen), result_(result), error_(error) {}
+
+ void Generate() final {
+ // |result| is the partial result, |kScratchDoubleReg| is the error.
+ // propagate -0's and NaNs (possibly non-canonical) from the error.
+ __ Orpd(error_, result_);
+ // Canonicalize NaNs by quieting and clearing the payload.
+ __ Cmppd(result_, error_, int8_t{3});
+ __ Orpd(error_, result_);
+ __ Psrlq(result_, 13);
+ __ Andnpd(result_, error_);
+ }
+
+ private:
+ XMMRegister const result_;
+ XMMRegister const error_;
+};
+
+class OutOfLineF32x4Max final : public OutOfLineCode {
+ public:
+ OutOfLineF32x4Max(CodeGenerator* gen, XMMRegister result, XMMRegister error)
+ : OutOfLineCode(gen), result_(result), error_(error) {}
+
+ void Generate() final {
+ // |result| is the partial result, |kScratchDoubleReg| is the error.
+ // Propagate NaNs (possibly non-canonical).
+ __ Orps(result_, error_);
+ // Propagate sign errors and (subtle) quiet NaNs.
+ __ Subps(result_, error_);
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ __ Cmpps(error_, result_, int8_t{3});
+ __ Psrld(error_, byte{10});
+ __ Andnps(error_, result_);
+ __ Movaps(result_, error_);
+ }
+
+ private:
+ XMMRegister const result_;
+ XMMRegister const error_;
+};
+
+class OutOfLineF64x2Max final : public OutOfLineCode {
+ public:
+ OutOfLineF64x2Max(CodeGenerator* gen, XMMRegister result, XMMRegister error)
+ : OutOfLineCode(gen), result_(result), error_(error) {}
+
+ void Generate() final {
+ // |result| is the partial result, |kScratchDoubleReg| is the error.
+ // Propagate NaNs (possibly non-canonical).
+ __ Orpd(result_, error_);
+ // Propagate sign errors and (subtle) quiet NaNs.
+ __ Subpd(result_, error_);
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ __ Cmppd(error_, result_, int8_t{3});
+ __ Psrlq(error_, byte{13});
+ __ Andnpd(error_, result_);
+ __ Movapd(result_, error_);
+ }
+
+ private:
+ XMMRegister const result_;
+ XMMRegister const error_;
+};
+
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
@@ -2328,18 +2416,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src1 = i.InputSimd128Register(1),
dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The minpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minpd in both orders, merge the resuls, and adjust.
+ // The minpd instruction doesn't propagate NaNs and -0's in its first
+ // operand. Perform minpd in both orders and compare results. Handle the
+ // unlikely case of discrepancies out of line.
__ Movapd(kScratchDoubleReg, src1);
__ Minpd(kScratchDoubleReg, dst);
__ Minpd(dst, src1);
- // propagate -0's and NaNs, which may be non-canonical.
- __ Orpd(kScratchDoubleReg, dst);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ Cmppd(dst, kScratchDoubleReg, int8_t{3});
- __ Orpd(kScratchDoubleReg, dst);
- __ Psrlq(dst, 13);
- __ Andnpd(dst, kScratchDoubleReg);
+ // Most likely there is no difference and we're done.
+ __ Xorpd(kScratchDoubleReg, dst);
+ __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
+ auto ool = new (zone()) OutOfLineF64x2Min(this, dst, kScratchDoubleReg);
+ __ j(not_zero, ool->entry());
+ __ bind(ool->exit());
break;
}
case kX64F64x2Max: {
@@ -2347,20 +2435,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
// The maxpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxpd in both orders, merge the resuls, and adjust.
+ // operand. Perform maxpd in both orders and compare results. Handle the
+ // unlikely case of discrepancies out of line.
__ Movapd(kScratchDoubleReg, src1);
__ Maxpd(kScratchDoubleReg, dst);
__ Maxpd(dst, src1);
- // Find discrepancies.
- __ Xorpd(dst, kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- __ Orpd(kScratchDoubleReg, dst);
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- __ Subpd(kScratchDoubleReg, dst);
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmppd(dst, kScratchDoubleReg, int8_t{3});
- __ Psrlq(dst, 13);
- __ Andnpd(dst, kScratchDoubleReg);
+ // Most likely there is no difference and we're done.
+ __ Xorpd(kScratchDoubleReg, dst);
+ __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
+ auto ool = new (zone()) OutOfLineF64x2Max(this, dst, kScratchDoubleReg);
+ __ j(not_zero, ool->entry());
+ __ bind(ool->exit());
break;
}
case kX64F64x2Eq: {
@@ -2524,18 +2609,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src1 = i.InputSimd128Register(1),
dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the resuls, and adjust.
+ // The minps instruction doesn't propagate NaNs and -0's in its first
+ // operand. Perform minps in both orders and compare results. Handle the
+ // unlikely case of discrepancies out of line.
__ Movaps(kScratchDoubleReg, src1);
__ Minps(kScratchDoubleReg, dst);
__ Minps(dst, src1);
- // propagate -0's and NaNs, which may be non-canonical.
- __ Orps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ Cmpps(dst, kScratchDoubleReg, int8_t{3});
- __ Orps(kScratchDoubleReg, dst);
- __ Psrld(dst, byte{10});
- __ Andnps(dst, kScratchDoubleReg);
+ // Most likely there is no difference and we're done.
+ __ Xorps(kScratchDoubleReg, dst);
+ __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
+ auto ool = new (zone()) OutOfLineF32x4Min(this, dst, kScratchDoubleReg);
+ __ j(not_zero, ool->entry());
+ __ bind(ool->exit());
break;
}
case kX64F32x4Max: {
@@ -2543,20 +2628,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
// The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the resuls, and adjust.
+ // operand. Perform maxps in both orders and compare results. Handle the
+ // unlikely case of discrepancies out of line.
__ Movaps(kScratchDoubleReg, src1);
__ Maxps(kScratchDoubleReg, dst);
__ Maxps(dst, src1);
- // Find discrepancies.
- __ Xorps(dst, kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- __ Orps(kScratchDoubleReg, dst);
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- __ Subps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmpps(dst, kScratchDoubleReg, int8_t{3});
- __ Psrld(dst, byte{10});
- __ Andnps(dst, kScratchDoubleReg);
+ // Most likely there is no difference and we're done.
+ __ Xorps(kScratchDoubleReg, dst);
+ __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
+ auto ool = new (zone()) OutOfLineF32x4Max(this, dst, kScratchDoubleReg);
+ __ j(not_zero, ool->entry());
+ __ bind(ool->exit());
break;
}
case kX64F32x4Eq: {
@@ -2619,6 +2701,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Maxps(dst, i.InputSimd128Register(1));
break;
}
+ case kX64F32x4Round: {
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ Roundps(i.OutputSimd128Register(), i.InputSimd128Register(0), mode);
+ break;
+ }
+ case kX64F64x2Round: {
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ Roundpd(i.OutputSimd128Register(), i.InputSimd128Register(0), mode);
+ break;
+ }
case kX64F64x2Pmin: {
XMMRegister dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
@@ -3093,6 +3187,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movmskps(i.OutputRegister(), i.InputSimd128Register(0));
break;
}
+ case kX64I32x4DotI16x8S: {
+ __ Pmaddwd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kX64S128Zero: {
XMMRegister dst = i.OutputSimd128Register();
__ Xorps(dst, dst);
@@ -3926,10 +4024,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Por(dst, kScratchDoubleReg);
break;
}
- case kX64S1x2AnyTrue:
- case kX64S1x4AnyTrue:
- case kX64S1x8AnyTrue:
- case kX64S1x16AnyTrue: {
+ case kX64V64x2AnyTrue:
+ case kX64V32x4AnyTrue:
+ case kX64V16x8AnyTrue:
+ case kX64V8x16AnyTrue: {
Register dst = i.OutputRegister();
XMMRegister src = i.InputSimd128Register(0);
@@ -3942,19 +4040,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
// 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
// respectively.
- case kX64S1x2AllTrue: {
+ case kX64V64x2AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqq);
break;
}
- case kX64S1x4AllTrue: {
+ case kX64V32x4AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
break;
}
- case kX64S1x8AllTrue: {
+ case kX64V16x8AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqw);
break;
}
- case kX64S1x16AllTrue: {
+ case kX64V8x16AllTrue: {
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
break;
}
diff --git a/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h b/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 745f5c6cb25..ed7d2060f59 100644
--- a/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -174,6 +174,7 @@ namespace compiler {
V(X64F64x2Qfms) \
V(X64F64x2Pmin) \
V(X64F64x2Pmax) \
+ V(X64F64x2Round) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
@@ -199,6 +200,7 @@ namespace compiler {
V(X64F32x4Qfms) \
V(X64F32x4Pmin) \
V(X64F32x4Pmax) \
+ V(X64F32x4Round) \
V(X64I64x2Splat) \
V(X64I64x2ExtractLane) \
V(X64I64x2ReplaceLane) \
@@ -248,6 +250,7 @@ namespace compiler {
V(X64I32x4GeU) \
V(X64I32x4Abs) \
V(X64I32x4BitMask) \
+ V(X64I32x4DotI16x8S) \
V(X64I16x8Splat) \
V(X64I16x8ExtractLaneU) \
V(X64I16x8ExtractLaneS) \
@@ -357,14 +360,14 @@ namespace compiler {
V(X64S8x8Reverse) \
V(X64S8x4Reverse) \
V(X64S8x2Reverse) \
- V(X64S1x2AnyTrue) \
- V(X64S1x2AllTrue) \
- V(X64S1x4AnyTrue) \
- V(X64S1x4AllTrue) \
- V(X64S1x8AnyTrue) \
- V(X64S1x8AllTrue) \
- V(X64S1x16AnyTrue) \
- V(X64S1x16AllTrue) \
+ V(X64V64x2AnyTrue) \
+ V(X64V64x2AllTrue) \
+ V(X64V32x4AnyTrue) \
+ V(X64V32x4AllTrue) \
+ V(X64V16x8AnyTrue) \
+ V(X64V16x8AllTrue) \
+ V(X64V8x16AnyTrue) \
+ V(X64V8x16AllTrue) \
V(X64Word64AtomicLoadUint8) \
V(X64Word64AtomicLoadUint16) \
V(X64Word64AtomicLoadUint32) \
diff --git a/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index d2c1d14855c..395c4a4e9c7 100644
--- a/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -146,6 +146,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F64x2Qfms:
case kX64F64x2Pmin:
case kX64F64x2Pmax:
+ case kX64F64x2Round:
case kX64F32x4Splat:
case kX64F32x4ExtractLane:
case kX64F32x4ReplaceLane:
@@ -171,6 +172,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Qfms:
case kX64F32x4Pmin:
case kX64F32x4Pmax:
+ case kX64F32x4Round:
case kX64I64x2Splat:
case kX64I64x2ExtractLane:
case kX64I64x2ReplaceLane:
@@ -220,6 +222,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4GeU:
case kX64I32x4Abs:
case kX64I32x4BitMask:
+ case kX64I32x4DotI16x8S:
case kX64I16x8Splat:
case kX64I16x8ExtractLaneU:
case kX64I16x8ExtractLaneS:
@@ -292,12 +295,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S128Select:
case kX64S128Zero:
case kX64S128AndNot:
- case kX64S1x2AnyTrue:
- case kX64S1x2AllTrue:
- case kX64S1x4AnyTrue:
- case kX64S1x4AllTrue:
- case kX64S1x8AnyTrue:
- case kX64S1x8AllTrue:
+ case kX64V64x2AnyTrue:
+ case kX64V64x2AllTrue:
+ case kX64V32x4AnyTrue:
+ case kX64V32x4AllTrue:
+ case kX64V16x8AnyTrue:
+ case kX64V16x8AllTrue:
case kX64S8x16Swizzle:
case kX64S8x16Shuffle:
case kX64S32x4Swizzle:
@@ -325,8 +328,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S8x8Reverse:
case kX64S8x4Reverse:
case kX64S8x2Reverse:
- case kX64S1x16AnyTrue:
- case kX64S1x16AllTrue:
+ case kX64V8x16AnyTrue:
+ case kX64V8x16AllTrue:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
diff --git a/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index dd3f556937d..ab669864954 100644
--- a/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -1461,7 +1461,16 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
V(Float32RoundTiesEven, \
kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \
- V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
+ V(Float64RoundTiesEven, \
+ kSSEFloat64Round | MiscField::encode(kRoundToNearest)) \
+ V(F32x4Ceil, kX64F32x4Round | MiscField::encode(kRoundUp)) \
+ V(F32x4Floor, kX64F32x4Round | MiscField::encode(kRoundDown)) \
+ V(F32x4Trunc, kX64F32x4Round | MiscField::encode(kRoundToZero)) \
+ V(F32x4NearestInt, kX64F32x4Round | MiscField::encode(kRoundToNearest)) \
+ V(F64x2Ceil, kX64F64x2Round | MiscField::encode(kRoundUp)) \
+ V(F64x2Floor, kX64F64x2Round | MiscField::encode(kRoundDown)) \
+ V(F64x2Trunc, kX64F64x2Round | MiscField::encode(kRoundToZero)) \
+ V(F64x2NearestInt, kX64F64x2Round | MiscField::encode(kRoundToNearest))
#define RO_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1898,16 +1907,33 @@ void VisitWord32EqualImpl(InstructionSelector* selector, Node* node,
X64OperandGenerator g(selector);
const RootsTable& roots_table = selector->isolate()->roots_table();
RootIndex root_index;
- CompressedHeapObjectBinopMatcher m(node);
- if (m.right().HasValue() &&
- roots_table.IsRootHandle(m.right().Value(), &root_index)) {
+ Node* left = nullptr;
+ Handle<HeapObject> right;
+ // HeapConstants and CompressedHeapConstants can be treated the same when
+ // using them as an input to a 32-bit comparison. Check whether either is
+ // present.
+ {
+ CompressedHeapObjectBinopMatcher m(node);
+ if (m.right().HasValue()) {
+ left = m.left().node();
+ right = m.right().Value();
+ } else {
+ HeapObjectBinopMatcher m2(node);
+ if (m2.right().HasValue()) {
+ left = m2.left().node();
+ right = m2.right().Value();
+ }
+ }
+ }
+ if (!right.is_null() && roots_table.IsRootHandle(right, &root_index)) {
+ DCHECK_NE(left, nullptr);
InstructionCode opcode =
kX64Cmp32 | AddressingModeField::encode(kMode_Root);
return VisitCompare(
selector, opcode,
g.TempImmediate(
TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
- g.UseRegister(m.left().node()), cont);
+ g.UseRegister(left), cont);
}
}
VisitWordCompare(selector, node, kX64Cmp32, cont);
@@ -2674,6 +2700,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4MinU) \
V(I32x4MaxU) \
V(I32x4GeU) \
+ V(I32x4DotI16x8S) \
V(I16x8SConvertI32x4) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
@@ -2766,16 +2793,16 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16ShrU)
#define SIMD_ANYTRUE_LIST(V) \
- V(S1x2AnyTrue) \
- V(S1x4AnyTrue) \
- V(S1x8AnyTrue) \
- V(S1x16AnyTrue)
+ V(V64x2AnyTrue) \
+ V(V32x4AnyTrue) \
+ V(V16x8AnyTrue) \
+ V(V8x16AnyTrue)
#define SIMD_ALLTRUE_LIST(V) \
- V(S1x2AllTrue) \
- V(S1x4AllTrue) \
- V(S1x8AllTrue) \
- V(S1x16AllTrue)
+ V(V64x2AllTrue) \
+ V(V32x4AllTrue) \
+ V(V16x8AllTrue) \
+ V(V8x16AllTrue)
void InstructionSelector::VisitS128Zero(Node* node) {
X64OperandGenerator g(this);
diff --git a/chromium/v8/src/compiler/basic-block-instrumentor.cc b/chromium/v8/src/compiler/basic-block-instrumentor.cc
index c2548b77267..ca6a60b7827 100644
--- a/chromium/v8/src/compiler/basic-block-instrumentor.cc
+++ b/chromium/v8/src/compiler/basic-block-instrumentor.cc
@@ -37,16 +37,21 @@ static NodeVector::iterator FindInsertionPoint(BasicBlock* block) {
return i;
}
+static const Operator* IntPtrConstant(CommonOperatorBuilder* common,
+ intptr_t value) {
+ return kSystemPointerSize == 8
+ ? common->Int64Constant(value)
+ : common->Int32Constant(static_cast<int32_t>(value));
+}
// TODO(dcarney): need to mark code as non-serializable.
static const Operator* PointerConstant(CommonOperatorBuilder* common,
- intptr_t ptr) {
- return kSystemPointerSize == 8
- ? common->Int64Constant(ptr)
- : common->Int32Constant(static_cast<int32_t>(ptr));
+ const void* ptr) {
+ intptr_t ptr_as_int = reinterpret_cast<intptr_t>(ptr);
+ return IntPtrConstant(common, ptr_as_int);
}
-BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
+BasicBlockProfilerData* BasicBlockInstrumentor::Instrument(
OptimizedCompilationInfo* info, Graph* graph, Schedule* schedule,
Isolate* isolate) {
// Basic block profiling disables concurrent compilation, so handle deref is
@@ -54,41 +59,68 @@ BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
AllowHandleDereference allow_handle_dereference;
// Skip the exit block in profiles, since the register allocator can't handle
// it and entry into it means falling off the end of the function anyway.
- size_t n_blocks = static_cast<size_t>(schedule->RpoBlockCount()) - 1;
- BasicBlockProfiler::Data* data = BasicBlockProfiler::Get()->NewData(n_blocks);
+ size_t n_blocks = schedule->RpoBlockCount() - 1;
+ BasicBlockProfilerData* data = BasicBlockProfiler::Get()->NewData(n_blocks);
// Set the function name.
data->SetFunctionName(info->GetDebugName());
// Capture the schedule string before instrumentation.
- {
+ if (FLAG_turbo_profiling_verbose) {
std::ostringstream os;
os << *schedule;
- data->SetSchedule(&os);
+ data->SetSchedule(os);
}
+ // Check whether we should write counts to a JS heap object or to the
+ // BasicBlockProfilerData directly. The JS heap object is only used for
+ // builtins.
+ bool on_heap_counters = isolate && isolate->IsGeneratingEmbeddedBuiltins();
// Add the increment instructions to the start of every block.
CommonOperatorBuilder common(graph->zone());
- Node* zero = graph->NewNode(common.Int32Constant(0));
- Node* one = graph->NewNode(common.Int32Constant(1));
MachineOperatorBuilder machine(graph->zone());
+ Node* counters_array = nullptr;
+ if (on_heap_counters) {
+ // Allocation is disallowed here, so rather than referring to an actual
+ // counters array, create a reference to a special marker object. This
+ // object will get fixed up later in the constants table (see
+ // PatchBasicBlockCountersReference). An important and subtle point: we
+ // cannot use the root handle basic_block_counters_marker_handle() and must
+ // create a new separate handle. Otherwise
+ // TurboAssemblerBase::IndirectLoadConstant would helpfully emit a
+ // root-relative load rather than putting this value in the constants table
+ // where we expect it to be for patching.
+ counters_array = graph->NewNode(common.HeapConstant(Handle<HeapObject>::New(
+ ReadOnlyRoots(isolate).basic_block_counters_marker(), isolate)));
+ } else {
+ counters_array = graph->NewNode(PointerConstant(&common, data->counts()));
+ }
+ Node* one = graph->NewNode(common.Int32Constant(1));
BasicBlockVector* blocks = schedule->rpo_order();
size_t block_number = 0;
for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
++it, ++block_number) {
BasicBlock* block = (*it);
data->SetBlockRpoNumber(block_number, block->rpo_number());
- // TODO(dcarney): wire effect and control deps for load and store.
+ // It is unnecessary to wire effect and control deps for load and store
+ // since this happens after scheduling.
// Construct increment operation.
- Node* base = graph->NewNode(
- PointerConstant(&common, data->GetCounterAddress(block_number)));
- Node* load = graph->NewNode(machine.Load(MachineType::Uint32()), base, zero,
- graph->start(), graph->start());
+ int offset_to_counter_value = static_cast<int>(block_number) * kInt32Size;
+ if (on_heap_counters) {
+ offset_to_counter_value += ByteArray::kHeaderSize - kHeapObjectTag;
+ }
+ Node* offset_to_counter =
+ graph->NewNode(IntPtrConstant(&common, offset_to_counter_value));
+ Node* load =
+ graph->NewNode(machine.Load(MachineType::Uint32()), counters_array,
+ offset_to_counter, graph->start(), graph->start());
Node* inc = graph->NewNode(machine.Int32Add(), load, one);
- Node* store =
- graph->NewNode(machine.Store(StoreRepresentation(
- MachineRepresentation::kWord32, kNoWriteBarrier)),
- base, zero, inc, graph->start(), graph->start());
+ Node* store = graph->NewNode(
+ machine.Store(StoreRepresentation(MachineRepresentation::kWord32,
+ kNoWriteBarrier)),
+ counters_array, offset_to_counter, inc, graph->start(), graph->start());
// Insert the new nodes.
static const int kArraySize = 6;
- Node* to_insert[kArraySize] = {zero, one, base, load, inc, store};
+ Node* to_insert[kArraySize] = {counters_array, one, offset_to_counter,
+ load, inc, store};
+ // The first two Nodes are constant across all blocks.
int insertion_start = block_number == 0 ? 0 : 2;
NodeVector::iterator insertion_point = FindInsertionPoint(block);
block->InsertNodes(insertion_point, &to_insert[insertion_start],
diff --git a/chromium/v8/src/compiler/basic-block-instrumentor.h b/chromium/v8/src/compiler/basic-block-instrumentor.h
index c8bc94c16bf..e63a2cac5d9 100644
--- a/chromium/v8/src/compiler/basic-block-instrumentor.h
+++ b/chromium/v8/src/compiler/basic-block-instrumentor.h
@@ -20,9 +20,9 @@ class Schedule;
class BasicBlockInstrumentor : public AllStatic {
public:
- static BasicBlockProfiler::Data* Instrument(OptimizedCompilationInfo* info,
- Graph* graph, Schedule* schedule,
- Isolate* isolate);
+ static BasicBlockProfilerData* Instrument(OptimizedCompilationInfo* info,
+ Graph* graph, Schedule* schedule,
+ Isolate* isolate);
};
} // namespace compiler
diff --git a/chromium/v8/src/compiler/bytecode-graph-builder.cc b/chromium/v8/src/compiler/bytecode-graph-builder.cc
index b59b5a1b844..93aaca2512e 100644
--- a/chromium/v8/src/compiler/bytecode-graph-builder.cc
+++ b/chromium/v8/src/compiler/bytecode-graph-builder.cc
@@ -63,12 +63,30 @@ class BytecodeGraphBuilder {
// Get or create the node that represents the outer function closure.
Node* GetFunctionClosure();
+ bool native_context_independent() const {
+ return native_context_independent_;
+ }
+
+ // The node representing the current feedback vector is generated once prior
+ // to visiting bytecodes, and is later passed as input to other nodes that
+ // may need it.
+ // TODO(jgruber): Remove feedback_vector() and rename feedback_vector_node()
+ // to feedback_vector() once all uses of the direct heap object reference
+ // have been replaced with a Node* reference.
+ void CreateFeedbackVectorNode();
+ Node* BuildLoadFeedbackVector();
+ Node* feedback_vector_node() const {
+ DCHECK_NOT_NULL(feedback_vector_node_);
+ return feedback_vector_node_;
+ }
+
// Builder for loading the a native context field.
Node* BuildLoadNativeContextField(int index);
// Helper function for creating a feedback source containing type feedback
// vector and a feedback slot.
FeedbackSource CreateFeedbackSource(int slot_id);
+ FeedbackSource CreateFeedbackSource(FeedbackSlot slot);
void set_environment(Environment* env) { environment_ = env; }
const Environment* environment() const { return environment_; }
@@ -191,6 +209,7 @@ class BytecodeGraphBuilder {
void BuildUnaryOp(const Operator* op);
void BuildBinaryOp(const Operator* op);
void BuildBinaryOpWithImmediate(const Operator* op);
+ void BuildInstanceOf(const Operator* op);
void BuildCompareOp(const Operator* op);
void BuildDelete(LanguageMode language_mode);
void BuildCastOperator(const Operator* op);
@@ -243,14 +262,6 @@ class BytecodeGraphBuilder {
Environment* CheckContextExtensionAtDepth(Environment* slow_environment,
uint32_t depth);
- // Helper function to create binary operation hint from the recorded
- // type feedback.
- BinaryOperationHint GetBinaryOperationHint(int operand_index);
-
- // Helper function to create compare operation hint from the recorded
- // type feedback.
- CompareOperationHint GetCompareOperationHint();
-
// Helper function to create for-in mode from the recorded type feedback.
ForInMode GetForInMode(int operand_index);
@@ -423,6 +434,9 @@ class BytecodeGraphBuilder {
int input_buffer_size_;
Node** input_buffer_;
+ const bool native_context_independent_;
+ Node* feedback_vector_node_;
+
// Optimization to only create checkpoints when the current position in the
// control-flow is not effect-dominated by another checkpoint already. All
// operations that do not have observable side-effects can be re-evaluated.
@@ -443,10 +457,11 @@ class BytecodeGraphBuilder {
TickCounter* const tick_counter_;
- static int const kBinaryOperationHintIndex = 1;
- static int const kCountOperationHintIndex = 0;
- static int const kBinaryOperationSmiHintIndex = 1;
- static int const kUnaryOperationHintIndex = 0;
+ static constexpr int kBinaryOperationHintIndex = 1;
+ static constexpr int kBinaryOperationSmiHintIndex = 1;
+ static constexpr int kCompareOperationHintIndex = 1;
+ static constexpr int kCountOperationHintIndex = 0;
+ static constexpr int kUnaryOperationHintIndex = 0;
DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
};
@@ -984,6 +999,9 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
current_exception_handler_(0),
input_buffer_size_(0),
input_buffer_(nullptr),
+ native_context_independent_(
+ flags & BytecodeGraphBuilderFlag::kNativeContextIndependent),
+ feedback_vector_node_(nullptr),
needs_eager_checkpoint_(true),
exit_controls_(local_zone),
state_values_cache_(jsgraph),
@@ -1014,6 +1032,36 @@ Node* BytecodeGraphBuilder::GetFunctionClosure() {
return function_closure_.get();
}
+void BytecodeGraphBuilder::CreateFeedbackVectorNode() {
+ DCHECK_NULL(feedback_vector_node_);
+ feedback_vector_node_ = native_context_independent()
+ ? BuildLoadFeedbackVector()
+ : jsgraph()->Constant(feedback_vector());
+}
+
+Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() {
+ DCHECK(native_context_independent());
+ DCHECK_NULL(feedback_vector_node_);
+
+ // The feedback vector must exist and remain live while the generated code
+ // lives. Specifically that means it must be created when NCI code is
+ // installed, and must not be flushed.
+
+ Environment* env = environment();
+ Node* control = env->GetControlDependency();
+ Node* effect = env->GetEffectDependency();
+
+ Node* feedback_cell = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionFeedbackCell()),
+ GetFunctionClosure(), effect, control);
+ Node* vector = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFeedbackCellValue()),
+ feedback_cell, effect, control);
+
+ env->UpdateEffectDependency(effect);
+ return vector;
+}
+
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
Node* result = NewNode(javascript()->LoadContext(0, index, true));
NodeProperties::ReplaceContextInput(result,
@@ -1022,7 +1070,10 @@ Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
}
FeedbackSource BytecodeGraphBuilder::CreateFeedbackSource(int slot_id) {
- FeedbackSlot slot = FeedbackVector::ToSlot(slot_id);
+ return CreateFeedbackSource(FeedbackVector::ToSlot(slot_id));
+}
+
+FeedbackSource BytecodeGraphBuilder::CreateFeedbackSource(FeedbackSlot slot) {
return FeedbackSource(feedback_vector(), slot);
}
@@ -1042,6 +1093,7 @@ void BytecodeGraphBuilder::CreateGraph() {
graph()->start());
set_environment(&env);
+ CreateFeedbackVectorNode();
VisitBytecodes();
// Finish the basic structure of the graph.
@@ -2179,8 +2231,7 @@ void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
}
void BytecodeGraphBuilder::VisitCreateEmptyObjectLiteral() {
- Node* literal =
- NewNode(javascript()->CreateEmptyLiteralObject(), GetFunctionClosure());
+ Node* literal = NewNode(javascript()->CreateEmptyLiteralObject());
environment()->BindAccumulator(literal);
}
@@ -2210,10 +2261,7 @@ void BytecodeGraphBuilder::VisitGetTemplateObject() {
Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters(
Node* callee, Node* receiver, interpreter::Register first_arg,
int arg_count) {
- // The arity of the Call node -- includes the callee, receiver and function
- // arguments.
- int arity = 2 + arg_count;
-
+ int arity = kTargetAndReceiver + arg_count;
Node** all = local_zone()->NewArray<Node*>(static_cast<size_t>(arity));
all[0] = callee;
@@ -2222,7 +2270,7 @@ Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters(
// The function arguments are in consecutive registers.
int arg_base = first_arg.index();
for (int i = 0; i < arg_count; ++i) {
- all[2 + i] =
+ all[kTargetAndReceiver + i] =
environment()->LookupRegister(interpreter::Register(arg_base + i));
}
@@ -2247,7 +2295,8 @@ Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
Node* const* call_args = GetCallArgumentsFromRegisters(callee, receiver_node,
first_arg, arg_count);
- return ProcessCallArguments(call_op, call_args, 2 + arg_count);
+ return ProcessCallArguments(call_op, call_args,
+ kTargetAndReceiver + arg_count);
}
void BytecodeGraphBuilder::BuildCall(ConvertReceiverMode receiver_mode,
@@ -2318,8 +2367,8 @@ void BytecodeGraphBuilder::BuildCallVarArgs(ConvertReceiverMode receiver_mode) {
: static_cast<int>(reg_count) - 1;
Node* const* call_args =
ProcessCallVarArgs(receiver_mode, callee, first_reg, arg_count);
- BuildCall(receiver_mode, call_args, static_cast<size_t>(2 + arg_count),
- slot_id);
+ BuildCall(receiver_mode, call_args,
+ static_cast<size_t>(kTargetAndReceiver + arg_count), slot_id);
}
void BytecodeGraphBuilder::VisitCallAnyReceiver() {
@@ -2341,9 +2390,7 @@ void BytecodeGraphBuilder::VisitCallNoFeedback() {
// The receiver is the first register, followed by the arguments in the
// consecutive registers.
int arg_count = static_cast<int>(reg_count) - 1;
- // The arity of the Call node -- includes the callee, receiver and function
- // arguments.
- int arity = 2 + arg_count;
+ int arity = kTargetAndReceiver + arg_count;
// Setting call frequency to a value less than min_inlining frequency to
// prevent inlining of one-shot call node.
@@ -2459,7 +2506,7 @@ void BytecodeGraphBuilder::VisitCallWithSpread() {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- node = ProcessCallArguments(op, args, 2 + arg_count);
+ node = ProcessCallArguments(op, args, kTargetAndReceiver + arg_count);
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2472,10 +2519,11 @@ void BytecodeGraphBuilder::VisitCallJSRuntime() {
size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
int arg_count = static_cast<int>(reg_count);
- const Operator* call = javascript()->Call(2 + arg_count);
+ const Operator* call = javascript()->Call(kTargetAndReceiver + arg_count);
Node* const* call_args = ProcessCallVarArgs(
ConvertReceiverMode::kNullOrUndefined, callee, first_reg, arg_count);
- Node* value = ProcessCallArguments(call, call_args, 2 + arg_count);
+ Node* value =
+ ProcessCallArguments(call, call_args, kTargetAndReceiver + arg_count);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
@@ -2532,8 +2580,7 @@ void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
Node* const* BytecodeGraphBuilder::GetConstructArgumentsFromRegister(
Node* target, Node* new_target, interpreter::Register first_arg,
int arg_count) {
- // arity is args + callee and new target.
- int arity = arg_count + 2;
+ int arity = kTargetAndNewTarget + arg_count;
Node** all = local_zone()->NewArray<Node*>(static_cast<size_t>(arity));
all[0] = target;
int first_arg_index = first_arg.index();
@@ -2563,9 +2610,10 @@ void BytecodeGraphBuilder::VisitConstruct() {
Node* callee = environment()->LookupRegister(callee_reg);
CallFrequency frequency = ComputeCallFrequency(slot_id);
- const Operator* op = javascript()->Construct(
- static_cast<uint32_t>(reg_count + 2), frequency, feedback);
- int arg_count = static_cast<int>(reg_count);
+ const uint32_t arg_count = static_cast<uint32_t>(reg_count);
+ const uint32_t arg_count_with_extra_args = kTargetAndNewTarget + arg_count;
+ const Operator* op =
+ javascript()->Construct(arg_count_with_extra_args, frequency, feedback);
Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
first_reg, arg_count);
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedConstruct(
@@ -2577,7 +2625,7 @@ void BytecodeGraphBuilder::VisitConstruct() {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- node = ProcessConstructArguments(op, args, 2 + arg_count);
+ node = ProcessConstructArguments(op, args, arg_count_with_extra_args);
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2594,9 +2642,10 @@ void BytecodeGraphBuilder::VisitConstructWithSpread() {
Node* callee = environment()->LookupRegister(callee_reg);
CallFrequency frequency = ComputeCallFrequency(slot_id);
+ const uint32_t arg_count = static_cast<uint32_t>(reg_count);
+ const uint32_t arg_count_with_extra_args = kTargetAndNewTarget + arg_count;
const Operator* op = javascript()->ConstructWithSpread(
- static_cast<uint32_t>(reg_count + 2), frequency, feedback);
- int arg_count = static_cast<int>(reg_count);
+ arg_count_with_extra_args, frequency, feedback);
Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
first_reg, arg_count);
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedConstruct(
@@ -2608,7 +2657,7 @@ void BytecodeGraphBuilder::VisitConstructWithSpread() {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- node = ProcessConstructArguments(op, args, 2 + arg_count);
+ node = ProcessConstructArguments(op, args, arg_count_with_extra_args);
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2711,6 +2760,7 @@ void BytecodeGraphBuilder::VisitThrowSuperAlreadyCalledIfNotHole() {
}
void BytecodeGraphBuilder::BuildUnaryOp(const Operator* op) {
+ DCHECK(JSOperator::IsUnaryWithFeedback(op->opcode()));
PrepareEagerCheckpoint();
Node* operand = environment()->LookupAccumulator();
@@ -2725,13 +2775,14 @@ void BytecodeGraphBuilder::BuildUnaryOp(const Operator* op) {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- node = NewNode(op, operand);
+ node = NewNode(op, operand, feedback_vector_node());
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
+ DCHECK(JSOperator::IsBinaryWithFeedback(op->opcode()));
PrepareEagerCheckpoint();
Node* left =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -2748,29 +2799,12 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- node = NewNode(op, left, right);
+ node = NewNode(op, left, right, feedback_vector_node());
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-// Helper function to create binary operation hint from the recorded type
-// feedback.
-BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
- int operand_index) {
- FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
- FeedbackSource source(feedback_vector(), slot);
- return broker()->GetFeedbackForBinaryOperation(source);
-}
-
-// Helper function to create compare operation hint from the recorded type
-// feedback.
-CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
- FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
- FeedbackSource source(feedback_vector(), slot);
- return broker()->GetFeedbackForCompareOperation(source);
-}
-
// Helper function to create for-in mode from the recorded type feedback.
ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
@@ -2810,69 +2844,103 @@ SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const {
}
void BytecodeGraphBuilder::VisitBitwiseNot() {
- BuildUnaryOp(javascript()->BitwiseNot());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kUnaryOperationHintIndex));
+ BuildUnaryOp(javascript()->BitwiseNot(feedback));
}
void BytecodeGraphBuilder::VisitDec() {
- BuildUnaryOp(javascript()->Decrement());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kUnaryOperationHintIndex));
+ BuildUnaryOp(javascript()->Decrement(feedback));
}
void BytecodeGraphBuilder::VisitInc() {
- BuildUnaryOp(javascript()->Increment());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kUnaryOperationHintIndex));
+ BuildUnaryOp(javascript()->Increment(feedback));
}
void BytecodeGraphBuilder::VisitNegate() {
- BuildUnaryOp(javascript()->Negate());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kUnaryOperationHintIndex));
+ BuildUnaryOp(javascript()->Negate(feedback));
}
void BytecodeGraphBuilder::VisitAdd() {
- BuildBinaryOp(
- javascript()->Add(GetBinaryOperationHint(kBinaryOperationHintIndex)));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->Add(feedback));
}
void BytecodeGraphBuilder::VisitSub() {
- BuildBinaryOp(javascript()->Subtract());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->Subtract(feedback));
}
void BytecodeGraphBuilder::VisitMul() {
- BuildBinaryOp(javascript()->Multiply());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->Multiply(feedback));
}
-void BytecodeGraphBuilder::VisitDiv() { BuildBinaryOp(javascript()->Divide()); }
+void BytecodeGraphBuilder::VisitDiv() {
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->Divide(feedback));
+}
void BytecodeGraphBuilder::VisitMod() {
- BuildBinaryOp(javascript()->Modulus());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->Modulus(feedback));
}
void BytecodeGraphBuilder::VisitExp() {
- BuildBinaryOp(javascript()->Exponentiate());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->Exponentiate(feedback));
}
void BytecodeGraphBuilder::VisitBitwiseOr() {
- BuildBinaryOp(javascript()->BitwiseOr());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->BitwiseOr(feedback));
}
void BytecodeGraphBuilder::VisitBitwiseXor() {
- BuildBinaryOp(javascript()->BitwiseXor());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->BitwiseXor(feedback));
}
void BytecodeGraphBuilder::VisitBitwiseAnd() {
- BuildBinaryOp(javascript()->BitwiseAnd());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->BitwiseAnd(feedback));
}
void BytecodeGraphBuilder::VisitShiftLeft() {
- BuildBinaryOp(javascript()->ShiftLeft());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->ShiftLeft(feedback));
}
void BytecodeGraphBuilder::VisitShiftRight() {
- BuildBinaryOp(javascript()->ShiftRight());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->ShiftRight(feedback));
}
void BytecodeGraphBuilder::VisitShiftRightLogical() {
- BuildBinaryOp(javascript()->ShiftRightLogical());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationHintIndex));
+ BuildBinaryOp(javascript()->ShiftRightLogical(feedback));
}
void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* op) {
+ DCHECK(JSOperator::IsBinaryWithFeedback(op->opcode()));
PrepareEagerCheckpoint();
Node* left = environment()->LookupAccumulator();
Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
@@ -2888,58 +2956,81 @@ void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* op) {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
- node = NewNode(op, left, right);
+ node = NewNode(op, left, right, feedback_vector_node());
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitAddSmi() {
- BuildBinaryOpWithImmediate(
- javascript()->Add(GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->Add(feedback));
}
void BytecodeGraphBuilder::VisitSubSmi() {
- BuildBinaryOpWithImmediate(javascript()->Subtract());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->Subtract(feedback));
}
void BytecodeGraphBuilder::VisitMulSmi() {
- BuildBinaryOpWithImmediate(javascript()->Multiply());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->Multiply(feedback));
}
void BytecodeGraphBuilder::VisitDivSmi() {
- BuildBinaryOpWithImmediate(javascript()->Divide());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->Divide(feedback));
}
void BytecodeGraphBuilder::VisitModSmi() {
- BuildBinaryOpWithImmediate(javascript()->Modulus());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->Modulus(feedback));
}
void BytecodeGraphBuilder::VisitExpSmi() {
- BuildBinaryOpWithImmediate(javascript()->Exponentiate());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->Exponentiate(feedback));
}
void BytecodeGraphBuilder::VisitBitwiseOrSmi() {
- BuildBinaryOpWithImmediate(javascript()->BitwiseOr());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->BitwiseOr(feedback));
}
void BytecodeGraphBuilder::VisitBitwiseXorSmi() {
- BuildBinaryOpWithImmediate(javascript()->BitwiseXor());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->BitwiseXor(feedback));
}
void BytecodeGraphBuilder::VisitBitwiseAndSmi() {
- BuildBinaryOpWithImmediate(javascript()->BitwiseAnd());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->BitwiseAnd(feedback));
}
void BytecodeGraphBuilder::VisitShiftLeftSmi() {
- BuildBinaryOpWithImmediate(javascript()->ShiftLeft());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->ShiftLeft(feedback));
}
void BytecodeGraphBuilder::VisitShiftRightSmi() {
- BuildBinaryOpWithImmediate(javascript()->ShiftRight());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->ShiftRight(feedback));
}
void BytecodeGraphBuilder::VisitShiftRightLogicalSmi() {
- BuildBinaryOpWithImmediate(javascript()->ShiftRightLogical());
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kBinaryOperationSmiHintIndex));
+ BuildBinaryOpWithImmediate(javascript()->ShiftRightLogical(feedback));
}
void BytecodeGraphBuilder::VisitLogicalNot() {
@@ -2986,7 +3077,9 @@ void BytecodeGraphBuilder::VisitGetSuperConstructor() {
Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildCompareOp(const Operator* op) {
+void BytecodeGraphBuilder::BuildInstanceOf(const Operator* op) {
+ // TODO(jgruber, v8:8888): Treat InstanceOf like other compare ops.
+ DCHECK_EQ(op->opcode(), IrOpcode::kJSInstanceOf);
PrepareEagerCheckpoint();
Node* left =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -3007,28 +3100,62 @@ void BytecodeGraphBuilder::BuildCompareOp(const Operator* op) {
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::BuildCompareOp(const Operator* op) {
+ DCHECK(JSOperator::IsBinaryWithFeedback(op->opcode()));
+ PrepareEagerCheckpoint();
+ Node* left =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* right = environment()->LookupAccumulator();
+
+ FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedBinaryOp(op, left, right, slot);
+ if (lowering.IsExit()) return;
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
+ } else {
+ DCHECK(!lowering.Changed());
+ node = NewNode(op, left, right, feedback_vector_node());
+ }
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitTestEqual() {
- BuildCompareOp(javascript()->Equal(GetCompareOperationHint()));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kCompareOperationHintIndex));
+ BuildCompareOp(javascript()->Equal(feedback));
}
void BytecodeGraphBuilder::VisitTestEqualStrict() {
- BuildCompareOp(javascript()->StrictEqual(GetCompareOperationHint()));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kCompareOperationHintIndex));
+ BuildCompareOp(javascript()->StrictEqual(feedback));
}
void BytecodeGraphBuilder::VisitTestLessThan() {
- BuildCompareOp(javascript()->LessThan(GetCompareOperationHint()));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kCompareOperationHintIndex));
+ BuildCompareOp(javascript()->LessThan(feedback));
}
void BytecodeGraphBuilder::VisitTestGreaterThan() {
- BuildCompareOp(javascript()->GreaterThan(GetCompareOperationHint()));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kCompareOperationHintIndex));
+ BuildCompareOp(javascript()->GreaterThan(feedback));
}
void BytecodeGraphBuilder::VisitTestLessThanOrEqual() {
- BuildCompareOp(javascript()->LessThanOrEqual(GetCompareOperationHint()));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kCompareOperationHintIndex));
+ BuildCompareOp(javascript()->LessThanOrEqual(feedback));
}
void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual() {
- BuildCompareOp(javascript()->GreaterThanOrEqual(GetCompareOperationHint()));
+ FeedbackSource feedback = CreateFeedbackSource(
+ bytecode_iterator().GetSlotOperand(kCompareOperationHintIndex));
+ BuildCompareOp(javascript()->GreaterThanOrEqual(feedback));
}
void BytecodeGraphBuilder::VisitTestReferenceEqual() {
@@ -3052,7 +3179,7 @@ void BytecodeGraphBuilder::VisitTestIn() {
void BytecodeGraphBuilder::VisitTestInstanceOf() {
int const slot_index = bytecode_iterator().GetIndexOperand(1);
- BuildCompareOp(javascript()->InstanceOf(CreateFeedbackSource(slot_index)));
+ BuildInstanceOf(javascript()->InstanceOf(CreateFeedbackSource(slot_index)));
}
void BytecodeGraphBuilder::VisitTestUndetectable() {
diff --git a/chromium/v8/src/compiler/bytecode-graph-builder.h b/chromium/v8/src/compiler/bytecode-graph-builder.h
index 03e900c214e..1667a4d57d2 100644
--- a/chromium/v8/src/compiler/bytecode-graph-builder.h
+++ b/chromium/v8/src/compiler/bytecode-graph-builder.h
@@ -33,6 +33,7 @@ enum class BytecodeGraphBuilderFlag : uint8_t {
// bytecode analysis.
kAnalyzeEnvironmentLiveness = 1 << 1,
kBailoutOnUninitialized = 1 << 2,
+ kNativeContextIndependent = 1 << 3,
};
using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>;
diff --git a/chromium/v8/src/compiler/code-assembler.cc b/chromium/v8/src/compiler/code-assembler.cc
index 035d64144f6..44177c16b5b 100644
--- a/chromium/v8/src/compiler/code-assembler.cc
+++ b/chromium/v8/src/compiler/code-assembler.cc
@@ -1027,11 +1027,7 @@ Node* CodeAssembler::CallJSStubImpl(const CallInterfaceDescriptor& descriptor,
inputs.Add(new_target);
}
inputs.Add(arity);
-#ifdef V8_REVERSE_JSARGS
- for (auto arg : base::Reversed(args)) inputs.Add(arg);
-#else
for (auto arg : args) inputs.Add(arg);
-#endif
if (descriptor.HasContextParameter()) {
inputs.Add(context);
}
@@ -1393,6 +1389,7 @@ void CodeAssemblerLabel::MergeVariables() {
}
// If the following asserts, then you've jumped to a label without a bound
// variable along that path that expects to merge its value into a phi.
+ // This can also occur if a label is bound that is never jumped to.
DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
count == merge_count_);
USE(count);
diff --git a/chromium/v8/src/compiler/code-assembler.h b/chromium/v8/src/compiler/code-assembler.h
index d9d81cfe30c..de15e05497d 100644
--- a/chromium/v8/src/compiler/code-assembler.h
+++ b/chromium/v8/src/compiler/code-assembler.h
@@ -73,10 +73,9 @@ class PromiseFulfillReactionJobTask;
class PromiseReaction;
class PromiseReactionJobTask;
class PromiseRejectReactionJobTask;
-class WasmDebugInfo;
class Zone;
#define MAKE_FORWARD_DECLARATION(Name) class Name;
-TORQUE_INTERNAL_CLASS_LIST(MAKE_FORWARD_DECLARATION)
+TORQUE_DEFINED_CLASS_LIST(MAKE_FORWARD_DECLARATION)
#undef MAKE_FORWARD_DECLARATION
template <typename T>
diff --git a/chromium/v8/src/compiler/effect-control-linearizer.cc b/chromium/v8/src/compiler/effect-control-linearizer.cc
index 20391eacce6..65bb2eaf053 100644
--- a/chromium/v8/src/compiler/effect-control-linearizer.cc
+++ b/chromium/v8/src/compiler/effect-control-linearizer.cc
@@ -2700,6 +2700,20 @@ Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
check_number, frame_state);
break;
}
+ case CheckTaggedInputMode::kNumberOrBoolean: {
+ auto check_done = __ MakeLabel();
+
+ __ GotoIf(check_number, &check_done);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotANumberOrBoolean, feedback,
+ __ TaggedEqual(value_map, __ BooleanMapConstant()),
+ frame_state);
+ STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset,
+ Oddball::kToNumberRawOffset);
+ __ Goto(&check_done);
+
+ __ Bind(&check_done);
+ break;
+ }
case CheckTaggedInputMode::kNumberOrOddball: {
auto check_done = __ MakeLabel();
@@ -3756,7 +3770,7 @@ Node* EffectControlLinearizer::LowerDeadValue(Node* node) {
Node* unreachable = __ Unreachable();
NodeProperties::ReplaceValueInput(node, unreachable, 0);
}
- return node;
+ return gasm()->AddNode(node);
}
Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
diff --git a/chromium/v8/src/compiler/globals.h b/chromium/v8/src/compiler/globals.h
index a8d8a47c59c..fe96783c23d 100644
--- a/chromium/v8/src/compiler/globals.h
+++ b/chromium/v8/src/compiler/globals.h
@@ -6,11 +6,24 @@
#define V8_COMPILER_GLOBALS_H_
#include "src/common/globals.h"
+#include "src/flags/flags.h"
namespace v8 {
namespace internal {
namespace compiler {
+// The nci flag is currently used to experiment with feedback collection in
+// optimized code produced by generic lowering.
+// Considerations:
+// - Should we increment the call count? https://crbug.com/v8/10524
+// - Is feedback already megamorphic in all these cases?
+//
+// TODO(jgruber): Remove once we've made a decision whether to collect feedback
+// unconditionally.
+inline bool CollectFeedbackInGenericLowering() {
+ return FLAG_turbo_collect_feedback_in_generic_lowering;
+}
+
enum class StackCheckKind {
kJSFunctionEntry = 0,
kJSIterationBody,
diff --git a/chromium/v8/src/compiler/graph-assembler.cc b/chromium/v8/src/compiler/graph-assembler.cc
index 6057f1ce649..c25930150ed 100644
--- a/chromium/v8/src/compiler/graph-assembler.cc
+++ b/chromium/v8/src/compiler/graph-assembler.cc
@@ -32,7 +32,6 @@ class GraphAssembler::BasicBlockUpdater {
void AddBranch(Node* branch, BasicBlock* tblock, BasicBlock* fblock);
void AddGoto(BasicBlock* to);
void AddGoto(BasicBlock* from, BasicBlock* to);
- void AddThrow(Node* node);
void StartBlock(BasicBlock* block);
BasicBlock* Finalize(BasicBlock* original);
@@ -268,92 +267,6 @@ void GraphAssembler::BasicBlockUpdater::AddGoto(BasicBlock* from,
current_block_ = nullptr;
}
-void GraphAssembler::BasicBlockUpdater::RemoveSuccessorsFromSchedule() {
- ZoneSet<BasicBlock*> blocks(temp_zone());
- ZoneQueue<BasicBlock*> worklist(temp_zone());
-
- for (SuccessorInfo succ : saved_successors_) {
- BasicBlock* block = succ.block;
- block->predecessors().erase(block->predecessors().begin() + succ.index);
- blocks.insert(block);
- worklist.push(block);
- }
- saved_successors_.clear();
-
- // Walk through blocks until we get to the end node, then remove the path from
- // end, clearing their successors / predecessors.
- // This works because the unreachable paths form self-contained control flow
- // that doesn't re-merge with reachable control flow (checked below) and
- // DeadCodeElimination::ReduceEffectPhi preventing Unreachable from going into
- // an effect-phi. We would need to extend this if we need the ability to mark
- // control flow as unreachable later in the pipeline.
- while (!worklist.empty()) {
- BasicBlock* current = worklist.front();
- worklist.pop();
-
- for (BasicBlock* successor : current->successors()) {
- // Remove the block from sucessors predecessors.
- ZoneVector<BasicBlock*>& predecessors = successor->predecessors();
- auto it = std::find(predecessors.begin(), predecessors.end(), current);
- DCHECK_EQ(*it, current);
- predecessors.erase(it);
-
- if (successor == schedule_->end()) {
- // If we have reached the end block, remove this block's control input
- // from the end node's control inputs.
- DCHECK_EQ(current->SuccessorCount(), 1);
- NodeProperties::RemoveControlFromEnd(graph_, common_,
- current->control_input());
- } else {
- // Otherwise, add successor to worklist if it's not already been seen.
- if (blocks.insert(successor).second) {
- worklist.push(successor);
- }
- }
- }
- current->ClearSuccessors();
- }
-
-#ifdef DEBUG
- // Ensure that the set of blocks being removed from the schedule are self
- // contained, i.e., all predecessors have been removed from these blocks.
- for (BasicBlock* block : blocks) {
- CHECK_EQ(block->PredecessorCount(), 0);
- CHECK_EQ(block->SuccessorCount(), 0);
- }
-#endif
-}
-
-void GraphAssembler::BasicBlockUpdater::AddThrow(Node* node) {
- if (state_ == kUnchanged) {
- CopyForChange();
- }
-
- // Clear original successors and replace the block's original control and
- // control input to the throw, since this block is now connected directly to
- // the end.
- if (original_control_input_ != nullptr) {
- NodeProperties::ReplaceUses(original_control_input_, node, nullptr, node);
- original_control_input_->Kill();
- }
- original_control_input_ = node;
- original_control_ = BasicBlock::kThrow;
-
- bool already_connected_to_end =
- saved_successors_.size() == 1 &&
- saved_successors_[0].block == schedule_->end();
- if (!already_connected_to_end) {
- // Remove all successor blocks from the schedule.
- RemoveSuccessorsFromSchedule();
-
- // Update current block's successor withend.
- DCHECK(saved_successors_.empty());
- size_t index = schedule_->end()->predecessors().size();
- schedule_->end()->AddPredecessor(current_block_);
- saved_successors_.push_back({schedule_->end(), index});
- }
-}
-
void GraphAssembler::BasicBlockUpdater::UpdateSuccessors(BasicBlock* block) {
for (SuccessorInfo succ : saved_successors_) {
(succ.block->predecessors())[succ.index] = block;
@@ -716,6 +629,11 @@ Node* GraphAssembler::Unreachable() {
graph()->NewNode(common()->Unreachable(), effect(), control()));
}
+TNode<RawPtrT> GraphAssembler::StackSlot(int size, int alignment) {
+ return AddNode<RawPtrT>(
+ graph()->NewNode(machine()->StackSlot(size, alignment)));
+}
+
Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset,
Node* value) {
return AddNode(graph()->NewNode(machine()->Store(rep), object, offset, value,
@@ -906,11 +824,15 @@ BasicBlock* GraphAssembler::FinalizeCurrentBlock(BasicBlock* block) {
void GraphAssembler::ConnectUnreachableToEnd() {
DCHECK_EQ(effect()->opcode(), IrOpcode::kUnreachable);
- Node* throw_node = graph()->NewNode(common()->Throw(), effect(), control());
- NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- effect_ = control_ = mcgraph()->Dead();
- if (block_updater_) {
- block_updater_->AddThrow(throw_node);
+ // When maintaining the schedule we can't easily rewire the successor blocks
+ // to disconnect them from the graph, so we just leave the unreachable nodes
+ // in the schedule.
+ // TODO(9684): Add a scheduled dead-code elimination phase to remove all the
+ // subsiquent unreacahble code from the schedule.
+ if (!block_updater_) {
+ Node* throw_node = graph()->NewNode(common()->Throw(), effect(), control());
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+ effect_ = control_ = mcgraph()->Dead();
}
}
diff --git a/chromium/v8/src/compiler/graph-assembler.h b/chromium/v8/src/compiler/graph-assembler.h
index f57c732912b..9b0b5b42c11 100644
--- a/chromium/v8/src/compiler/graph-assembler.h
+++ b/chromium/v8/src/compiler/graph-assembler.h
@@ -133,16 +133,6 @@ class GraphAssembler;
// Wrapper classes for special node/edge types (effect, control, frame states)
// that otherwise don't fit into the type system.
-class NodeWrapper {
- public:
- explicit constexpr NodeWrapper(Node* node) : node_(node) {}
- operator Node*() const { return node_; }
- Node* operator->() const { return node_; }
-
- private:
- Node* node_;
-};
-
class Effect : public NodeWrapper {
public:
explicit constexpr Effect(Node* node) : NodeWrapper(node) {
@@ -313,6 +303,8 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* TypeGuard(Type type, Node* value);
Node* Checkpoint(FrameState frame_state);
+ TNode<RawPtrT> StackSlot(int size, int alignment);
+
Node* Store(StoreRepresentation rep, Node* object, Node* offset, Node* value);
Node* Store(StoreRepresentation rep, Node* object, int offset, Node* value);
Node* Load(MachineType type, Node* object, Node* offset);
diff --git a/chromium/v8/src/compiler/graph-visualizer.cc b/chromium/v8/src/compiler/graph-visualizer.cc
index 86e3da9d27a..4b327ca285e 100644
--- a/chromium/v8/src/compiler/graph-visualizer.cc
+++ b/chromium/v8/src/compiler/graph-visualizer.cc
@@ -964,6 +964,118 @@ void PrintScheduledGraph(std::ostream& os, const Schedule* schedule) {
} // namespace
+std::ostream& operator<<(std::ostream& os,
+ const LiveRangeAsJSON& live_range_json) {
+ const LiveRange& range = live_range_json.range_;
+ os << "{\"id\":" << range.relative_id() << ",\"type\":";
+ if (range.HasRegisterAssigned()) {
+ const InstructionOperand op = range.GetAssignedOperand();
+ os << "\"assigned\",\"op\":"
+ << InstructionOperandAsJSON{&op, &(live_range_json.code_)};
+ } else if (range.spilled() && !range.TopLevel()->HasNoSpillType()) {
+ const TopLevelLiveRange* top = range.TopLevel();
+ if (top->HasSpillOperand()) {
+ os << "\"assigned\",\"op\":"
+ << InstructionOperandAsJSON{top->GetSpillOperand(),
+ &(live_range_json.code_)};
+ } else {
+ int index = top->GetSpillRange()->assigned_slot();
+ os << "\"spilled\",\"op\":";
+ if (IsFloatingPoint(top->representation())) {
+ os << "\"fp_stack:" << index << "\"";
+ } else {
+ os << "\"stack:" << index << "\"";
+ }
+ }
+ } else {
+ os << "\"none\"";
+ }
+
+ os << ",\"intervals\":[";
+ bool first = true;
+ for (const UseInterval* interval = range.first_interval();
+ interval != nullptr; interval = interval->next()) {
+ if (first) {
+ first = false;
+ } else {
+ os << ",";
+ }
+ os << "[" << interval->start().value() << "," << interval->end().value()
+ << "]";
+ }
+
+ os << "],\"uses\":[";
+ first = true;
+ for (UsePosition* current_pos = range.first_pos(); current_pos != nullptr;
+ current_pos = current_pos->next()) {
+ if (first) {
+ first = false;
+ } else {
+ os << ",";
+ }
+ os << current_pos->pos().value();
+ }
+
+ os << "]}";
+ return os;
+}
+
+std::ostream& operator<<(
+ std::ostream& os,
+ const TopLevelLiveRangeAsJSON& top_level_live_range_json) {
+ int vreg = top_level_live_range_json.range_.vreg();
+ bool first = true;
+ os << "\"" << (vreg > 0 ? vreg : -vreg) << "\":{ \"child_ranges\":[";
+ for (const LiveRange* child = &(top_level_live_range_json.range_);
+ child != nullptr; child = child->next()) {
+ if (!top_level_live_range_json.range_.IsEmpty()) {
+ if (first) {
+ first = false;
+ } else {
+ os << ",";
+ }
+ os << LiveRangeAsJSON{*child, top_level_live_range_json.code_};
+ }
+ }
+ os << "]";
+ if (top_level_live_range_json.range_.IsFixed()) {
+ os << ", \"is_deferred\": "
+ << (top_level_live_range_json.range_.IsDeferredFixed() ? "true"
+ : "false");
+ }
+ os << "}";
+ return os;
+}
+
+void PrintTopLevelLiveRanges(std::ostream& os,
+ const ZoneVector<TopLevelLiveRange*> ranges,
+ const InstructionSequence& code) {
+ bool first = true;
+ os << "{";
+ for (const TopLevelLiveRange* range : ranges) {
+ if (range != nullptr && !range->IsEmpty()) {
+ if (first) {
+ first = false;
+ } else {
+ os << ",";
+ }
+ os << TopLevelLiveRangeAsJSON{*range, code};
+ }
+ }
+ os << "}";
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const RegisterAllocationDataAsJSON& ac) {
+ os << "\"fixed_double_live_ranges\": ";
+ PrintTopLevelLiveRanges(os, ac.data_.fixed_double_live_ranges(), ac.code_);
+ os << ",\"fixed_live_ranges\": ";
+ PrintTopLevelLiveRanges(os, ac.data_.fixed_live_ranges(), ac.code_);
+ os << ",\"live_ranges\": ";
+ PrintTopLevelLiveRanges(os, ac.data_.live_ranges(), ac.code_);
+ return os;
+}
+
std::ostream& operator<<(std::ostream& os, const AsScheduledGraph& scheduled) {
PrintScheduledGraph(os, scheduled.schedule);
return os;
@@ -1121,8 +1233,11 @@ std::ostream& operator<<(std::ostream& os, const InstructionAsJSON& i_json) {
bool first = true;
for (MoveOperands* move : *pm) {
if (move->IsEliminated()) continue;
- if (!first) os << ",";
- first = false;
+ if (first) {
+ first = false;
+ } else {
+ os << ",";
+ }
os << "[" << InstructionOperandAsJSON{&move->destination(), i_json.code_}
<< "," << InstructionOperandAsJSON{&move->source(), i_json.code_}
<< "]";
@@ -1228,7 +1343,7 @@ std::ostream& operator<<(std::ostream& os, const InstructionBlockAsJSON& b) {
std::ostream& operator<<(std::ostream& os, const InstructionSequenceAsJSON& s) {
const InstructionSequence* code = s.sequence_;
- os << "\"blocks\": [";
+ os << "[";
bool need_comma = false;
for (int i = 0; i < code->InstructionBlockCount(); i++) {
diff --git a/chromium/v8/src/compiler/graph-visualizer.h b/chromium/v8/src/compiler/graph-visualizer.h
index 05f522b6bca..55859330157 100644
--- a/chromium/v8/src/compiler/graph-visualizer.h
+++ b/chromium/v8/src/compiler/graph-visualizer.h
@@ -22,6 +22,8 @@ class SourcePosition;
namespace compiler {
class Graph;
+class LiveRange;
+class TopLevelLiveRange;
class Instruction;
class InstructionBlock;
class InstructionOperand;
@@ -155,6 +157,30 @@ std::ostream& operator<<(std::ostream& os, const AsC1V& ac);
std::ostream& operator<<(std::ostream& os,
const AsC1VRegisterAllocationData& ac);
+struct LiveRangeAsJSON {
+ const LiveRange& range_;
+ const InstructionSequence& code_;
+};
+
+std::ostream& operator<<(std::ostream& os,
+ const LiveRangeAsJSON& live_range_json);
+
+struct TopLevelLiveRangeAsJSON {
+ const TopLevelLiveRange& range_;
+ const InstructionSequence& code_;
+};
+
+std::ostream& operator<<(
+ std::ostream& os, const TopLevelLiveRangeAsJSON& top_level_live_range_json);
+
+struct RegisterAllocationDataAsJSON {
+ const RegisterAllocationData& data_;
+ const InstructionSequence& code_;
+};
+
+std::ostream& operator<<(std::ostream& os,
+ const RegisterAllocationDataAsJSON& ac);
+
struct InstructionOperandAsJSON {
const InstructionOperand* op_;
const InstructionSequence* code_;
diff --git a/chromium/v8/src/compiler/js-call-reducer.cc b/chromium/v8/src/compiler/js-call-reducer.cc
index 947f54c4109..a2f9aaeb6ff 100644
--- a/chromium/v8/src/compiler/js-call-reducer.cc
+++ b/chromium/v8/src/compiler/js-call-reducer.cc
@@ -65,7 +65,6 @@ class JSCallReducerAssembler : public JSGraphAssembler {
outermost_catch_scope_.set_has_handler(has_handler);
outermost_catch_scope_.set_gasm(this);
}
- virtual ~JSCallReducerAssembler() {}
TNode<Object> ReduceMathUnary(const Operator* op);
TNode<Object> ReduceMathBinary(const Operator* op);
@@ -793,11 +792,7 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
int ConstructArity() const {
DCHECK_EQ(IrOpcode::kJSConstruct, node_ptr()->opcode());
ConstructParameters const& p = ConstructParametersOf(node_ptr()->op());
- static constexpr int kTarget = 1; // The first input.
- static constexpr int kNewTarget = 1; // The last input.
- static constexpr int kExtraArgs = kTarget + kNewTarget;
- DCHECK_GE(p.arity(), kExtraArgs);
- return static_cast<int>(p.arity() - kExtraArgs);
+ return p.arity_without_implicit_args();
}
TNode<Object> NewTargetInput() const {
@@ -846,7 +841,8 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
FeedbackSource no_feedback_source{};
MayThrow(_ {
return AddNode<Object>(graph()->NewNode(
- javascript()->Call(4, p.frequency(), no_feedback_source,
+ javascript()->Call(2 + kTargetAndReceiver, p.frequency(),
+ no_feedback_source,
ConvertReceiverMode::kNullOrUndefined),
executor, UndefinedConstant(), resolve, reject, ContextInput(),
frame_state, effect(), control()));
@@ -859,7 +855,8 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
FeedbackSource no_feedback_source{};
MayThrow(_ {
return AddNode<Object>(graph()->NewNode(
- javascript()->Call(3, p.frequency(), no_feedback_source,
+ javascript()->Call(1 + kTargetAndReceiver, p.frequency(),
+ no_feedback_source,
ConvertReceiverMode::kNullOrUndefined),
reject, UndefinedConstant(), exception, ContextInput(), frame_state,
effect(), control()));
@@ -1012,7 +1009,7 @@ TNode<Object> JSCallReducerAssembler::JSCall3(
CallParameters const& p = CallParametersOf(node_ptr()->op());
return MayThrow(_ {
return AddNode<Object>(graph()->NewNode(
- javascript()->Call(5, p.frequency(), p.feedback(),
+ javascript()->Call(3 + kTargetAndReceiver, p.frequency(), p.feedback(),
ConvertReceiverMode::kAny, p.speculation_mode(),
CallFeedbackRelation::kUnrelated),
function, this_arg, arg0, arg1, arg2, ContextInput(), frame_state,
@@ -1027,7 +1024,7 @@ TNode<Object> JSCallReducerAssembler::JSCall4(
CallParameters const& p = CallParametersOf(node_ptr()->op());
return MayThrow(_ {
return AddNode<Object>(graph()->NewNode(
- javascript()->Call(6, p.frequency(), p.feedback(),
+ javascript()->Call(4 + kTargetAndReceiver, p.frequency(), p.feedback(),
ConvertReceiverMode::kAny, p.speculation_mode(),
CallFeedbackRelation::kUnrelated),
function, this_arg, arg0, arg1, arg2, arg3, ContextInput(), frame_state,
@@ -2340,8 +2337,7 @@ Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
CallParameters const& p = CallParametersOf(node->op());
// Turn the {node} into a {JSCreateArray} call.
- DCHECK_LE(2u, p.arity());
- size_t const arity = p.arity() - 2;
+ size_t const arity = p.arity_without_implicit_args();
NodeProperties::ReplaceValueInput(node, target, 0);
NodeProperties::ReplaceValueInput(node, target, 1);
NodeProperties::ChangeOp(
@@ -2355,9 +2351,9 @@ Reduction JSCallReducer::ReduceBooleanConstructor(Node* node) {
CallParameters const& p = CallParametersOf(node->op());
// Replace the {node} with a proper {ToBoolean} operator.
- DCHECK_LE(2u, p.arity());
- Node* value = (p.arity() == 2) ? jsgraph()->UndefinedConstant()
- : NodeProperties::GetValueInput(node, 2);
+ Node* value = (p.arity_without_implicit_args() == 0)
+ ? jsgraph()->UndefinedConstant()
+ : NodeProperties::GetValueInput(node, 2);
value = graph()->NewNode(simplified()->ToBoolean(), value);
ReplaceWithValue(node, value);
return Replace(value);
@@ -2367,9 +2363,8 @@ Reduction JSCallReducer::ReduceBooleanConstructor(Node* node) {
Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- if (p.arity() < 3) return NoChange();
- Node* value = (p.arity() >= 3) ? NodeProperties::GetValueInput(node, 2)
- : jsgraph()->UndefinedConstant();
+ if (p.arity_without_implicit_args() < 1) return NoChange();
+ Node* value = NodeProperties::GetValueInput(node, 2);
Node* effect = NodeProperties::GetEffectInput(node);
// We can fold away the Object(x) call if |x| is definitely not a primitive.
@@ -2394,15 +2389,14 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- size_t arity = p.arity();
- DCHECK_LE(2u, arity);
+ size_t arity = p.arity_without_implicit_args();
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny;
- if (arity == 2) {
+ if (arity == 0) {
// Neither thisArg nor argArray was provided.
convert_mode = ConvertReceiverMode::kNullOrUndefined;
node->ReplaceInput(0, node->InputAt(1));
node->ReplaceInput(1, jsgraph()->UndefinedConstant());
- } else if (arity == 3) {
+ } else if (arity == 1) {
// The argArray was not provided, just remove the {target}.
node->RemoveInput(0);
--arity;
@@ -2423,7 +2417,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
node->ReplaceInput(0, target);
node->ReplaceInput(1, this_argument);
node->ReplaceInput(2, arguments_list);
- while (arity-- > 3) node->RemoveInput(3);
+ while (arity-- > 1) node->RemoveInput(3);
// Morph the {node} to a {JSCallWithArrayLike}.
NodeProperties::ChangeOp(
@@ -2465,9 +2459,9 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
Node* effect1 = effect;
Node* control1 =
graph()->NewNode(common()->Merge(2), if_null, if_undefined);
- Node* value1 = effect1 = control1 =
- graph()->NewNode(javascript()->Call(2), target, this_argument,
- context, frame_state, effect1, control1);
+ Node* value1 = effect1 = control1 = graph()->NewNode(
+ javascript()->Call(0 + kTargetAndReceiver), target, this_argument,
+ context, frame_state, effect1, control1);
// Rewire potential exception edges.
Node* if_exception = nullptr;
@@ -2504,8 +2498,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
}
// Change {node} to the new {JSCall} operator.
NodeProperties::ChangeOp(
- node, javascript()->Call(arity, p.frequency(), p.feedback(), convert_mode,
- p.speculation_mode(),
+ node, javascript()->Call(arity + kTargetAndReceiver, p.frequency(),
+ p.feedback(), convert_mode, p.speculation_mode(),
CallFeedbackRelation::kUnrelated));
// Try to further reduce the JSCall {node}.
return Changed(node).FollowedBy(ReduceJSCall(node));
@@ -2625,17 +2619,19 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
control, p.feedback());
// Replace the {node} with a JSCreateBoundFunction.
- int const arity = std::max(0, node->op()->ValueInputCount() - 3);
- int const input_count = 2 + arity + 3;
+ static constexpr int kContextEffectAndControl = 3;
+ int const arity =
+ std::max(0, node->op()->ValueInputCount() - kContextEffectAndControl);
+ int const input_count = kTargetAndReceiver + arity + kContextEffectAndControl;
Node** inputs = graph()->zone()->NewArray<Node*>(input_count);
inputs[0] = receiver;
inputs[1] = bound_this;
for (int i = 0; i < arity; ++i) {
- inputs[2 + i] = NodeProperties::GetValueInput(node, 3 + i);
+ inputs[kTargetAndReceiver + i] = NodeProperties::GetValueInput(node, 3 + i);
}
- inputs[2 + arity + 0] = context;
- inputs[2 + arity + 1] = effect;
- inputs[2 + arity + 2] = control;
+ inputs[kTargetAndReceiver + arity + 0] = context;
+ inputs[kTargetAndReceiver + arity + 1] = effect;
+ inputs[kTargetAndReceiver + arity + 2] = control;
Node* value = effect =
graph()->NewNode(javascript()->CreateBoundFunction(arity, map.object()),
input_count, inputs);
@@ -2675,10 +2671,9 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
// Remove the target from {node} and use the receiver as target instead, and
// the thisArg becomes the new target. If thisArg was not provided, insert
// undefined instead.
- size_t arity = p.arity();
- DCHECK_LE(2u, arity);
+ size_t arity = p.arity_without_implicit_args();
ConvertReceiverMode convert_mode;
- if (arity == 2) {
+ if (arity == 0) {
// The thisArg was not provided, use undefined as receiver.
convert_mode = ConvertReceiverMode::kNullOrUndefined;
node->ReplaceInput(0, node->InputAt(1));
@@ -2690,8 +2685,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
--arity;
}
NodeProperties::ChangeOp(
- node, javascript()->Call(arity, p.frequency(), p.feedback(), convert_mode,
- p.speculation_mode(),
+ node, javascript()->Call(arity + kTargetAndReceiver, p.frequency(),
+ p.feedback(), convert_mode, p.speculation_mode(),
CallFeedbackRelation::kUnrelated));
// Try to further reduce the JSCall {node}.
return Changed(node).FollowedBy(ReduceJSCall(node));
@@ -2780,7 +2775,7 @@ Reduction JSCallReducer::ReduceObjectGetPrototypeOf(Node* node) {
Reduction JSCallReducer::ReduceObjectIs(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& params = CallParametersOf(node->op());
- int const argc = static_cast<int>(params.arity() - 2);
+ int const argc = params.arity_without_implicit_args();
Node* lhs = (argc >= 1) ? NodeProperties::GetValueInput(node, 2)
: jsgraph()->UndefinedConstant();
Node* rhs = (argc >= 2) ? NodeProperties::GetValueInput(node, 3)
@@ -2801,7 +2796,7 @@ Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
Reduction JSCallReducer::ReduceObjectPrototypeHasOwnProperty(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& params = CallParametersOf(node->op());
- int const argc = static_cast<int>(params.arity() - 2);
+ int const argc = params.arity_without_implicit_args();
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* name = (argc >= 1) ? NodeProperties::GetValueInput(node, 2)
: jsgraph()->UndefinedConstant();
@@ -2911,8 +2906,7 @@ Reduction JSCallReducer::ReduceObjectPrototypeIsPrototypeOf(Node* node) {
Reduction JSCallReducer::ReduceReflectApply(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
- DCHECK_LE(0, arity);
+ int arity = p.arity_without_implicit_args();
// Massage value inputs appropriately.
node->RemoveInput(0);
node->RemoveInput(0);
@@ -2933,8 +2927,7 @@ Reduction JSCallReducer::ReduceReflectApply(Node* node) {
Reduction JSCallReducer::ReduceReflectConstruct(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
- DCHECK_LE(0, arity);
+ int arity = p.arity_without_implicit_args();
// Massage value inputs appropriately.
node->RemoveInput(0);
node->RemoveInput(0);
@@ -2947,8 +2940,8 @@ Reduction JSCallReducer::ReduceReflectConstruct(Node* node) {
while (arity-- > 3) {
node->RemoveInput(arity);
}
- NodeProperties::ChangeOp(node,
- javascript()->ConstructWithArrayLike(p.frequency()));
+ NodeProperties::ChangeOp(
+ node, javascript()->ConstructWithArrayLike(p.frequency(), p.feedback()));
return Changed(node).FollowedBy(ReduceJSConstructWithArrayLike(node));
}
@@ -2988,7 +2981,7 @@ Reduction JSCallReducer::ReduceObjectCreate(Node* node) {
Reduction JSCallReducer::ReduceReflectGet(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
if (arity != 2) return NoChange();
Node* target = NodeProperties::GetValueInput(node, 2);
Node* key = NodeProperties::GetValueInput(node, 3);
@@ -3063,8 +3056,7 @@ Reduction JSCallReducer::ReduceReflectGet(Node* node) {
Reduction JSCallReducer::ReduceReflectHas(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
- DCHECK_LE(0, arity);
+ int arity = p.arity_without_implicit_args();
Node* target = (arity >= 1) ? NodeProperties::GetValueInput(node, 2)
: jsgraph()->UndefinedConstant();
Node* key = (arity >= 2) ? NodeProperties::GetValueInput(node, 3)
@@ -3403,7 +3395,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int const argc = static_cast<int>(p.arity()) - 2;
+ int const argc = p.arity_without_implicit_args();
Node* target = NodeProperties::GetValueInput(node, 0);
Node* global_proxy =
jsgraph()->Constant(native_context().global_proxy_object());
@@ -3491,10 +3483,14 @@ Reduction JSCallReducer::ReduceCallApiFunction(
function_template_info.LookupHolderOfExpectedType(receiver_map);
if (api_holder.lookup != holder_i.lookup) return inference.NoChange();
- if (!(api_holder.holder.has_value() && holder_i.holder.has_value()))
- return inference.NoChange();
- if (!api_holder.holder->equals(*holder_i.holder))
- return inference.NoChange();
+ DCHECK(holder_i.lookup == CallOptimization::kHolderFound ||
+ holder_i.lookup == CallOptimization::kHolderIsReceiver);
+ if (holder_i.lookup == CallOptimization::kHolderFound) {
+ DCHECK(api_holder.holder.has_value() && holder_i.holder.has_value());
+ if (!api_holder.holder->equals(*holder_i.holder)) {
+ return inference.NoChange();
+ }
+ }
CHECK(receiver_map.IsJSReceiverMap());
CHECK(!receiver_map.is_access_check_needed() ||
@@ -3677,14 +3673,14 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
case IrOpcode::kJSCallWithSpread: {
// Ignore uses as spread input to calls with spread.
CallParameters p = CallParametersOf(user->op());
- int const arity = static_cast<int>(p.arity() - 1);
- if (user->InputAt(arity) == arguments_list) continue;
+ int const arity = p.arity_without_implicit_args();
+ if (user->InputAt(arity + 1) == arguments_list) continue;
break;
}
case IrOpcode::kJSConstructWithSpread: {
// Ignore uses as spread input to construct with spread.
ConstructParameters p = ConstructParametersOf(user->op());
- int const arity = static_cast<int>(p.arity() - 2);
+ int const arity = p.arity_without_implicit_args();
if (user->InputAt(arity) == arguments_list) continue;
break;
}
@@ -3775,7 +3771,8 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
return Changed(node).FollowedBy(ReduceJSCall(node));
} else {
NodeProperties::ChangeOp(
- node, javascript()->Construct(arity + 2, frequency, feedback));
+ node, javascript()->Construct(arity + kTargetAndNewTarget, frequency,
+ feedback));
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* context = NodeProperties::GetContextInput(node);
@@ -3875,8 +3872,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
Node* target = NodeProperties::GetValueInput(node, 0);
Node* control = NodeProperties::GetControlInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
- size_t arity = p.arity();
- DCHECK_LE(2u, arity);
+ size_t arity = p.arity_without_implicit_args();
// Try to specialize JSCall {node}s with constant {target}s.
HeapObjectMatcher m(target);
@@ -3923,9 +3919,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
}
NodeProperties::ChangeOp(
- node, javascript()->Call(arity, p.frequency(), p.feedback(),
- convert_mode, p.speculation_mode(),
- CallFeedbackRelation::kUnrelated));
+ node,
+ javascript()->Call(arity + kTargetAndReceiver, p.frequency(),
+ p.feedback(), convert_mode, p.speculation_mode(),
+ CallFeedbackRelation::kUnrelated));
// Try to further reduce the JSCall {node}.
return Changed(node).FollowedBy(ReduceJSCall(node));
@@ -3976,9 +3973,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
? ConvertReceiverMode::kAny
: ConvertReceiverMode::kNotNullOrUndefined;
NodeProperties::ChangeOp(
- node, javascript()->Call(arity, p.frequency(), p.feedback(),
- convert_mode, p.speculation_mode(),
- CallFeedbackRelation::kUnrelated));
+ node,
+ javascript()->Call(arity + kTargetAndReceiver, p.frequency(),
+ p.feedback(), convert_mode, p.speculation_mode(),
+ CallFeedbackRelation::kUnrelated));
// Try to further reduce the JSCall {node}.
return Changed(node).FollowedBy(ReduceJSCall(node));
@@ -4416,30 +4414,29 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
Reduction JSCallReducer::ReduceJSCallWithArrayLike(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallWithArrayLike, node->opcode());
const CallParameters& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity());
- DCHECK_EQ(arity, 2);
+ int arity = p.arity_without_implicit_args();
+ DCHECK_EQ(arity, 0);
return ReduceCallOrConstructWithArrayLikeOrSpread(
- node, arity, p.frequency(), p.feedback(), p.speculation_mode(),
- p.feedback_relation());
+ node, arity + kTargetAndReceiver, p.frequency(), p.feedback(),
+ p.speculation_mode(), p.feedback_relation());
}
Reduction JSCallReducer::ReduceJSCallWithSpread(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallWithSpread, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- DCHECK_LE(3u, p.arity());
- int arity = static_cast<int>(p.arity() - 1);
+ int arity = p.arity_without_implicit_args();
+ DCHECK_GE(p.arity(), 1);
CallFrequency frequency = p.frequency();
FeedbackSource feedback = p.feedback();
return ReduceCallOrConstructWithArrayLikeOrSpread(
- node, arity, frequency, feedback, p.speculation_mode(),
- p.feedback_relation());
+ node, arity + kTargetAndReceiver - 1, frequency, feedback,
+ p.speculation_mode(), p.feedback_relation());
}
Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
- DCHECK_LE(2u, p.arity());
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -4615,8 +4612,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Update the JSConstruct operator on {node}.
NodeProperties::ChangeOp(
- node,
- javascript()->Construct(arity + 2, p.frequency(), FeedbackSource()));
+ node, javascript()->Construct(arity + kTargetAndNewTarget,
+ p.frequency(), FeedbackSource()));
// Try to further reduce the JSConstruct {node}.
return Changed(node).FollowedBy(ReduceJSConstruct(node));
@@ -4655,8 +4652,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Update the JSConstruct operator on {node}.
NodeProperties::ChangeOp(
- node,
- javascript()->Construct(arity + 2, p.frequency(), FeedbackSource()));
+ node, javascript()->Construct(arity + kTargetAndNewTarget,
+ p.frequency(), FeedbackSource()));
// Try to further reduce the JSConstruct {node}.
return Changed(node).FollowedBy(ReduceJSConstruct(node));
@@ -4835,17 +4832,19 @@ Reduction JSCallReducer::ReduceStringPrototypeSubstr(Node* node) {
Reduction JSCallReducer::ReduceJSConstructWithArrayLike(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstructWithArrayLike, node->opcode());
- CallFrequency frequency = CallFrequencyOf(node->op());
+ ConstructParameters const& p = ConstructParametersOf(node->op());
+ const int arity = p.arity_without_implicit_args();
+ DCHECK_EQ(arity, 1);
return ReduceCallOrConstructWithArrayLikeOrSpread(
- node, 1, frequency, FeedbackSource(),
+ node, arity, p.frequency(), p.feedback(),
SpeculationMode::kDisallowSpeculation, CallFeedbackRelation::kRelated);
}
Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstructWithSpread, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
- DCHECK_LE(3u, p.arity());
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
+ DCHECK_LE(1u, arity);
CallFrequency frequency = p.frequency();
FeedbackSource feedback = p.feedback();
return ReduceCallOrConstructWithArrayLikeOrSpread(
@@ -6094,6 +6093,10 @@ Reduction JSCallReducer::ReduceStringFromCodePoint(Node* node) {
}
Reduction JSCallReducer::ReduceStringPrototypeIterator(Node* node) {
+ // TODO(jgruber): We could reduce here when generating native context
+ // independent code, if LowerJSCreateStringIterator were implemented in
+ // generic lowering.
+ if (broker()->is_native_context_independent()) return NoChange();
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
@@ -6219,6 +6222,11 @@ Reduction JSCallReducer::ReduceStringPrototypeConcat(Node* node) {
}
Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
+ // TODO(jgruber): We could reduce here when generating native context
+ // independent code, if LowerJSCreatePromise were implemented in generic
+ // lowering.
+ if (broker()->is_native_context_independent()) return NoChange();
+
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
PromiseBuiltinReducerAssembler a(jsgraph(), temp_zone(), node, broker());
@@ -6261,7 +6269,7 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -6285,10 +6293,10 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
node->InsertInput(graph()->zone(), 2, jsgraph()->UndefinedConstant());
}
NodeProperties::ChangeOp(
- node, javascript()->Call(2 + arity, p.frequency(), p.feedback(),
- ConvertReceiverMode::kNotNullOrUndefined,
- p.speculation_mode(),
- CallFeedbackRelation::kUnrelated));
+ node, javascript()->Call(
+ arity + kTargetAndReceiver, p.frequency(), p.feedback(),
+ ConvertReceiverMode::kNotNullOrUndefined, p.speculation_mode(),
+ CallFeedbackRelation::kUnrelated));
return Changed(node).FollowedBy(ReducePromisePrototypeThen(node));
}
@@ -6309,7 +6317,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* on_finally = arity >= 1 ? NodeProperties::GetValueInput(node, 2)
: jsgraph()->UndefinedConstant();
@@ -6414,10 +6422,10 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
node->ReplaceInput(2, then_finally);
node->ReplaceInput(3, catch_finally);
NodeProperties::ChangeOp(
- node, javascript()->Call(2 + arity, p.frequency(), p.feedback(),
- ConvertReceiverMode::kNotNullOrUndefined,
- p.speculation_mode(),
- CallFeedbackRelation::kUnrelated));
+ node, javascript()->Call(
+ arity + kTargetAndReceiver, p.frequency(), p.feedback(),
+ ConvertReceiverMode::kNotNullOrUndefined, p.speculation_mode(),
+ CallFeedbackRelation::kUnrelated));
return Changed(node).FollowedBy(ReducePromisePrototypeThen(node));
}
@@ -6525,7 +6533,7 @@ Reduction JSCallReducer::ReduceTypedArrayConstructor(
Node* node, const SharedFunctionInfoRef& shared) {
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
Node* target = NodeProperties::GetValueInput(node, 0);
Node* arg1 = (arity >= 1) ? NodeProperties::GetValueInput(node, 1)
: jsgraph()->UndefinedConstant();
diff --git a/chromium/v8/src/compiler/js-create-lowering.cc b/chromium/v8/src/compiler/js-create-lowering.cc
index d0059030d50..9674a436adb 100644
--- a/chromium/v8/src/compiler/js-create-lowering.cc
+++ b/chromium/v8/src/compiler/js-create-lowering.cc
@@ -28,6 +28,7 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/template-objects.h"
+#include "torque-generated/exported-class-definitions-tq.h"
namespace v8 {
namespace internal {
@@ -1507,16 +1508,15 @@ Node* JSCreateLowering::AllocateAliasedArguments(
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), arguments, control);
- a.AllocateArray(mapped_count + 2,
- MapRef(broker(), factory()->sloppy_arguments_elements_map()));
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(0),
- context);
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(1),
- arguments);
+ a.AllocateSloppyArgumentElements(
+ mapped_count,
+ MapRef(broker(), factory()->sloppy_arguments_elements_map()));
+ a.Store(AccessBuilder::ForSloppyArgumentsElementsContext(), context);
+ a.Store(AccessBuilder::ForSloppyArgumentsElementsArguments(), arguments);
for (int i = 0; i < mapped_count; ++i) {
int idx = shared.context_header_size() + parameter_count - 1 - i;
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i + 2),
- jsgraph()->Constant(idx));
+ a.Store(AccessBuilder::ForSloppyArgumentsElementsMappedEntry(),
+ jsgraph()->Constant(i), jsgraph()->Constant(idx));
}
return a.Finish();
}
@@ -1553,12 +1553,11 @@ Node* JSCreateLowering::AllocateAliasedArguments(
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateArray(mapped_count + 2,
- MapRef(broker(), factory()->sloppy_arguments_elements_map()));
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(0),
- context);
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(1),
- arguments);
+ a.AllocateSloppyArgumentElements(
+ mapped_count,
+ MapRef(broker(), factory()->sloppy_arguments_elements_map()));
+ a.Store(AccessBuilder::ForSloppyArgumentsElementsContext(), context);
+ a.Store(AccessBuilder::ForSloppyArgumentsElementsArguments(), arguments);
for (int i = 0; i < mapped_count; ++i) {
int idx = shared.context_header_size() + parameter_count - 1 - i;
Node* value = graph()->NewNode(
@@ -1566,8 +1565,8 @@ Node* JSCreateLowering::AllocateAliasedArguments(
graph()->NewNode(simplified()->NumberLessThan(), jsgraph()->Constant(i),
arguments_length),
jsgraph()->Constant(idx), jsgraph()->TheHoleConstant());
- a.Store(AccessBuilder::ForFixedArrayElement(), jsgraph()->Constant(i + 2),
- value);
+ a.Store(AccessBuilder::ForSloppyArgumentsElementsMappedEntry(),
+ jsgraph()->Constant(i), value);
}
return a.Finish();
}
diff --git a/chromium/v8/src/compiler/js-generic-lowering.cc b/chromium/v8/src/compiler/js-generic-lowering.cc
index cedb5bc42d5..8dbb64fe662 100644
--- a/chromium/v8/src/compiler/js-generic-lowering.cc
+++ b/chromium/v8/src/compiler/js-generic-lowering.cc
@@ -18,6 +18,7 @@
#include "src/objects/feedback-cell.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/scope-info.h"
+#include "src/objects/template-objects-inl.h"
namespace v8 {
namespace internal {
@@ -42,10 +43,10 @@ JSGenericLowering::~JSGenericLowering() = default;
Reduction JSGenericLowering::Reduce(Node* node) {
switch (node->opcode()) {
-#define DECLARE_CASE(x) \
- case IrOpcode::k##x: \
- Lower##x(node); \
- break;
+#define DECLARE_CASE(x, ...) \
+ case IrOpcode::k##x: \
+ Lower##x(node); \
+ break;
JS_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
default:
@@ -55,34 +56,11 @@ Reduction JSGenericLowering::Reduce(Node* node) {
return Changed(node);
}
-#define REPLACE_STUB_CALL(Name) \
- void JSGenericLowering::LowerJS##Name(Node* node) { \
- CallDescriptor::Flags flags = FrameStateFlagForCall(node); \
- Callable callable = Builtins::CallableFor(isolate(), Builtins::k##Name); \
- ReplaceWithStubCall(node, callable, flags); \
+#define REPLACE_STUB_CALL(Name) \
+ void JSGenericLowering::LowerJS##Name(Node* node) { \
+ ReplaceWithBuiltinCall(node, Builtins::k##Name); \
}
-REPLACE_STUB_CALL(Add)
-REPLACE_STUB_CALL(Subtract)
-REPLACE_STUB_CALL(Multiply)
-REPLACE_STUB_CALL(Divide)
-REPLACE_STUB_CALL(Modulus)
-REPLACE_STUB_CALL(Exponentiate)
-REPLACE_STUB_CALL(BitwiseAnd)
-REPLACE_STUB_CALL(BitwiseOr)
-REPLACE_STUB_CALL(BitwiseXor)
-REPLACE_STUB_CALL(ShiftLeft)
-REPLACE_STUB_CALL(ShiftRight)
-REPLACE_STUB_CALL(ShiftRightLogical)
-REPLACE_STUB_CALL(LessThan)
-REPLACE_STUB_CALL(LessThanOrEqual)
-REPLACE_STUB_CALL(GreaterThan)
-REPLACE_STUB_CALL(GreaterThanOrEqual)
-REPLACE_STUB_CALL(BitwiseNot)
-REPLACE_STUB_CALL(Decrement)
-REPLACE_STUB_CALL(Increment)
-REPLACE_STUB_CALL(Negate)
REPLACE_STUB_CALL(HasProperty)
-REPLACE_STUB_CALL(Equal)
REPLACE_STUB_CALL(ToLength)
REPLACE_STUB_CALL(ToNumber)
REPLACE_STUB_CALL(ToNumberConvertBigInt)
@@ -101,16 +79,21 @@ REPLACE_STUB_CALL(RejectPromise)
REPLACE_STUB_CALL(ResolvePromise)
#undef REPLACE_STUB_CALL
-void JSGenericLowering::ReplaceWithStubCall(Node* node,
- Callable callable,
- CallDescriptor::Flags flags) {
- ReplaceWithStubCall(node, callable, flags, node->op()->properties());
+void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
+ Builtins::Name builtin) {
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = Builtins::CallableFor(isolate(), builtin);
+ ReplaceWithBuiltinCall(node, callable, flags);
}
-void JSGenericLowering::ReplaceWithStubCall(Node* node,
- Callable callable,
- CallDescriptor::Flags flags,
- Operator::Properties properties) {
+void JSGenericLowering::ReplaceWithBuiltinCall(Node* node, Callable callable,
+ CallDescriptor::Flags flags) {
+ ReplaceWithBuiltinCall(node, callable, flags, node->op()->properties());
+}
+
+void JSGenericLowering::ReplaceWithBuiltinCall(
+ Node* node, Callable callable, CallDescriptor::Flags flags,
+ Operator::Properties properties) {
const CallInterfaceDescriptor& descriptor = callable.descriptor();
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), descriptor, descriptor.GetStackParameterCount(), flags,
@@ -120,7 +103,6 @@ void JSGenericLowering::ReplaceWithStubCall(Node* node,
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
-
void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
Runtime::FunctionId f,
int nargs_override) {
@@ -138,13 +120,114 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
+void JSGenericLowering::ReplaceUnaryOpWithBuiltinCall(
+ Node* node, Builtins::Name builtin_without_feedback,
+ Builtins::Name builtin_with_feedback) {
+ DCHECK(JSOperator::IsUnaryWithFeedback(node->opcode()));
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ Callable callable = Builtins::CallableFor(isolate(), builtin_with_feedback);
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().slot.ToInt());
+ const CallInterfaceDescriptor& descriptor = callable.descriptor();
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), descriptor, descriptor.GetStackParameterCount(), flags,
+ node->op()->properties());
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ STATIC_ASSERT(JSUnaryOpNode::ValueIndex() == 0);
+ STATIC_ASSERT(JSUnaryOpNode::FeedbackVectorIndex() == 1);
+ DCHECK_EQ(node->op()->ValueInputCount(), 2);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, slot);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ node->RemoveInput(JSUnaryOpNode::FeedbackVectorIndex());
+ ReplaceWithBuiltinCall(node, builtin_without_feedback);
+ }
+}
+
+#define DEF_UNARY_LOWERING(Name) \
+ void JSGenericLowering::LowerJS##Name(Node* node) { \
+ ReplaceUnaryOpWithBuiltinCall(node, Builtins::k##Name, \
+ Builtins::k##Name##_WithFeedback); \
+ }
+DEF_UNARY_LOWERING(BitwiseNot)
+DEF_UNARY_LOWERING(Decrement)
+DEF_UNARY_LOWERING(Increment)
+DEF_UNARY_LOWERING(Negate)
+#undef DEF_UNARY_LOWERING
+
+void JSGenericLowering::ReplaceBinaryOpWithBuiltinCall(
+ Node* node, Builtins::Name builtin_without_feedback,
+ Builtins::Name builtin_with_feedback) {
+ DCHECK(JSOperator::IsBinaryWithFeedback(node->opcode()));
+ Builtins::Name builtin_id;
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().slot.ToInt());
+ STATIC_ASSERT(JSBinaryOpNode::LeftIndex() == 0);
+ STATIC_ASSERT(JSBinaryOpNode::RightIndex() == 1);
+ STATIC_ASSERT(JSBinaryOpNode::FeedbackVectorIndex() == 2);
+ DCHECK_EQ(node->op()->ValueInputCount(), 3);
+ node->InsertInput(zone(), 2, slot);
+ builtin_id = builtin_with_feedback;
+ } else {
+ node->RemoveInput(JSBinaryOpNode::FeedbackVectorIndex());
+ builtin_id = builtin_without_feedback;
+ }
+
+ ReplaceWithBuiltinCall(node, builtin_id);
+}
+
+#define DEF_BINARY_LOWERING(Name) \
+ void JSGenericLowering::LowerJS##Name(Node* node) { \
+ ReplaceBinaryOpWithBuiltinCall(node, Builtins::k##Name, \
+ Builtins::k##Name##_WithFeedback); \
+ }
+// Binary ops.
+DEF_BINARY_LOWERING(Add)
+DEF_BINARY_LOWERING(BitwiseAnd)
+DEF_BINARY_LOWERING(BitwiseOr)
+DEF_BINARY_LOWERING(BitwiseXor)
+DEF_BINARY_LOWERING(Divide)
+DEF_BINARY_LOWERING(Exponentiate)
+DEF_BINARY_LOWERING(Modulus)
+DEF_BINARY_LOWERING(Multiply)
+DEF_BINARY_LOWERING(ShiftLeft)
+DEF_BINARY_LOWERING(ShiftRight)
+DEF_BINARY_LOWERING(ShiftRightLogical)
+DEF_BINARY_LOWERING(Subtract)
+// Compare ops.
+DEF_BINARY_LOWERING(Equal)
+DEF_BINARY_LOWERING(GreaterThan)
+DEF_BINARY_LOWERING(GreaterThanOrEqual)
+DEF_BINARY_LOWERING(LessThan)
+DEF_BINARY_LOWERING(LessThanOrEqual)
+#undef DEF_BINARY_LOWERING
+
void JSGenericLowering::LowerJSStrictEqual(Node* node) {
// The === operator doesn't need the current context.
NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kStrictEqual);
- node->RemoveInput(4); // control
- ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
- Operator::kEliminatable);
+ node->RemoveInput(NodeProperties::FirstControlIndex(node));
+
+ Builtins::Name builtin_id;
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ Node* slot = jsgraph()->UintPtrConstant(p.feedback().slot.ToInt());
+ STATIC_ASSERT(JSStrictEqualNode::LeftIndex() == 0);
+ STATIC_ASSERT(JSStrictEqualNode::RightIndex() == 1);
+ STATIC_ASSERT(JSStrictEqualNode::FeedbackVectorIndex() == 2);
+ DCHECK_EQ(node->op()->ValueInputCount(), 3);
+ node->InsertInput(zone(), 2, slot);
+ builtin_id = Builtins::kStrictEqual_WithFeedback;
+ } else {
+ node->RemoveInput(JSStrictEqualNode::FeedbackVectorIndex());
+ builtin_id = Builtins::kStrictEqual;
+ }
+
+ Callable callable = Builtins::CallableFor(isolate(), builtin_id);
+ ReplaceWithBuiltinCall(node, callable, CallDescriptor::kNoFlags,
+ Operator::kEliminatable);
}
namespace {
@@ -164,57 +247,49 @@ bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source,
} // namespace
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const PropertyAccess& p = PropertyAccessOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable = Builtins::CallableFor(
- isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
- ? Builtins::kKeyedLoadICTrampoline_Megamorphic
- : Builtins::kKeyedLoadICTrampoline);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(
+ node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
+ ? Builtins::kKeyedLoadICTrampoline_Megamorphic
+ : Builtins::kKeyedLoadICTrampoline);
} else {
- Callable callable = Builtins::CallableFor(
- isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
- ? Builtins::kKeyedLoadIC_Megamorphic
- : Builtins::kKeyedLoadIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 3, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(
+ node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
+ ? Builtins::kKeyedLoadIC_Megamorphic
+ : Builtins::kKeyedLoadIC);
}
}
void JSGenericLowering::LowerJSLoadNamed(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
if (!p.feedback().IsValid()) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kGetProperty);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kGetProperty);
return;
}
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable = Builtins::CallableFor(
- isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
- ? Builtins::kLoadICTrampoline_Megamorphic
- : Builtins::kLoadICTrampoline);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(
+ node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
+ ? Builtins::kLoadICTrampoline_Megamorphic
+ : Builtins::kLoadICTrampoline);
} else {
- Callable callable = Builtins::CallableFor(
- isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
- ? Builtins::kLoadIC_Megamorphic
- : Builtins::kLoadIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 3, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(
+ node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
+ ? Builtins::kLoadIC_Megamorphic
+ : Builtins::kLoadIC);
}
}
@@ -228,50 +303,56 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable = CodeFactory::LoadGlobalIC(isolate(), p.typeof_mode());
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, callable, flags);
} else {
Callable callable =
CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode());
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 2, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, callable, flags);
}
}
void JSGenericLowering::LowerJSGetIterator(Node* node) {
// TODO(v8:9625): Currently, the GetIterator operator is desugared in the
// native context specialization phase. Thus, the following generic lowering
- // would never be reachable. We can add a check in native context
- // specialization to avoid desugaring the GetIterator operator when in the
- // case of megamorphic feedback and here, add a call to the
- // 'GetIteratorWithFeedback' builtin. This would reduce the size of the
- // compiled code as it would insert 1 call to the builtin instead of 2 calls
- // resulting from the generic lowering of the LoadNamed and Call operators.
- UNREACHABLE();
+ // is not reachable unless that phase is disabled (e.g. for
+ // native-context-independent code).
+ // We can add a check in native context specialization to avoid desugaring
+ // the GetIterator operator when feedback is megamorphic. This would reduce
+ // the size of the compiled code as it would insert 1 call to the builtin
+ // instead of 2 calls resulting from the generic lowering of the LoadNamed
+ // and Call operators.
+
+ GetIteratorParameters const& p = GetIteratorParametersOf(node->op());
+ Node* load_slot =
+ jsgraph()->TaggedIndexConstant(p.loadFeedback().slot.ToInt());
+ Node* call_slot =
+ jsgraph()->TaggedIndexConstant(p.callFeedback().slot.ToInt());
+ Node* feedback = jsgraph()->HeapConstant(p.callFeedback().vector);
+ node->InsertInput(zone(), 1, load_slot);
+ node->InsertInput(zone(), 2, call_slot);
+ node->InsertInput(zone(), 3, feedback);
+
+ ReplaceWithBuiltinCall(node, Builtins::kGetIteratorWithFeedback);
}
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kKeyedStoreICTrampoline);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kKeyedStoreICTrampoline);
} else {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 4, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kKeyedStoreIC);
}
}
void JSGenericLowering::LowerJSStoreNamed(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
@@ -283,14 +364,11 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kStoreICTrampoline);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kStoreICTrampoline);
} else {
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kStoreIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 4, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kStoreIC);
}
}
@@ -304,17 +382,16 @@ void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) {
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable = CodeFactory::StoreOwnIC(isolate());
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, callable, flags);
} else {
Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate());
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 4, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, callable, flags);
}
}
void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
@@ -322,15 +399,11 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kStoreGlobalICTrampoline);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kStoreGlobalICTrampoline);
} else {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kStoreGlobalIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector);
node->InsertInput(zone(), 3, vector);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kStoreGlobalIC);
}
}
@@ -344,29 +417,20 @@ void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
}
void JSGenericLowering::LowerJSStoreInArrayLiteral(Node* node) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kStoreInArrayLiteralIC);
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
FeedbackParameter const& p = FeedbackParameterOf(node->op());
RelaxControls(node);
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 4, jsgraph()->HeapConstant(p.feedback().vector));
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kStoreInArrayLiteralIC);
}
void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kDeleteProperty);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kDeleteProperty);
}
void JSGenericLowering::LowerJSGetSuperConstructor(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kGetSuperConstructor);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kGetSuperConstructor);
}
void JSGenericLowering::LowerJSHasInPrototypeChain(Node* node) {
@@ -374,16 +438,12 @@ void JSGenericLowering::LowerJSHasInPrototypeChain(Node* node) {
}
void JSGenericLowering::LowerJSInstanceOf(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kInstanceOf);
- ReplaceWithStubCall(node, callable, flags);
+ // TODO(jgruber, v8:8888): Collect feedback.
+ ReplaceWithBuiltinCall(node, Builtins::kInstanceOf);
}
void JSGenericLowering::LowerJSOrdinaryHasInstance(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kOrdinaryHasInstance);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kOrdinaryHasInstance);
}
void JSGenericLowering::LowerJSHasContextExtension(Node* node) {
@@ -401,10 +461,7 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
void JSGenericLowering::LowerJSCreate(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kFastNewObject);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kFastNewObject);
}
@@ -465,23 +522,15 @@ void JSGenericLowering::LowerJSObjectIsArray(Node* node) {
}
void JSGenericLowering::LowerJSCreateObject(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = Builtins::CallableFor(
- isolate(), Builtins::kCreateObjectWithoutProperties);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateObjectWithoutProperties);
}
void JSGenericLowering::LowerJSParseInt(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kParseInt);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kParseInt);
}
void JSGenericLowering::LowerJSRegExpTest(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kRegExpPrototypeTestFast);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kRegExpPrototypeTestFast);
}
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
@@ -493,10 +542,7 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) {
// Use the FastNewClosure builtin only for functions allocated in new space.
if (p.allocation() == AllocationType::kYoung) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kFastNewClosure);
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kFastNewClosure);
} else {
ReplaceWithRuntimeCall(node, Runtime::kNewClosure_Tenured);
}
@@ -516,7 +562,7 @@ void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
CodeFactory::FastNewFunctionContext(isolate(), scope_type);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(scope_info));
node->InsertInput(zone(), 1, jsgraph()->Int32Constant(slot_count));
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, callable, flags);
} else {
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(scope_info));
ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext);
@@ -524,15 +570,12 @@ void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
}
void JSGenericLowering::LowerJSCreateGeneratorObject(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCreateGeneratorObject);
node->RemoveInput(4); // control
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateGeneratorObject);
}
void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
- UNREACHABLE(); // Eliminated in typed lowering.
+ ReplaceWithBuiltinCall(node, Builtins::kCreateIterResultObject);
}
void JSGenericLowering::LowerJSCreateStringIterator(Node* node) {
@@ -548,15 +591,11 @@ void JSGenericLowering::LowerJSCreatePromise(Node* node) {
}
void JSGenericLowering::LowerJSCreateTypedArray(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCreateTypedArray);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateTypedArray);
}
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
@@ -566,9 +605,7 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
// without properties up to the number of elements that the stubs can handle.
if ((p.flags() & AggregateLiteral::kIsShallow) != 0 &&
p.length() < ConstructorBuiltins::kMaximumClonedShallowArrayElements) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCreateShallowArrayLiteral);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateShallowArrayLiteral);
} else {
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
ReplaceWithRuntimeCall(node, Runtime::kCreateArrayLiteral);
@@ -576,31 +613,36 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
}
void JSGenericLowering::LowerJSGetTemplateObject(Node* node) {
- UNREACHABLE(); // Eliminated in native context specialization.
+ GetTemplateObjectParameters const& p =
+ GetTemplateObjectParametersOf(node->op());
+ SharedFunctionInfoRef shared(broker(), p.shared());
+ TemplateObjectDescriptionRef description(broker(), p.description());
+
+ node->InsertInput(zone(), 0, jsgraph()->Constant(shared));
+ node->InsertInput(zone(), 1, jsgraph()->Constant(description));
+ node->InsertInput(zone(), 2,
+ jsgraph()->UintPtrConstant(p.feedback().index()));
+ node->InsertInput(zone(), 3, jsgraph()->HeapConstant(p.feedback().vector));
+ node->RemoveInput(6); // control
+
+ ReplaceWithBuiltinCall(node, Builtins::kGetTemplateObject);
}
void JSGenericLowering::LowerJSCreateEmptyLiteralArray(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
FeedbackParameter const& p = FeedbackParameterOf(node->op());
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->RemoveInput(4); // control
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCreateEmptyArrayLiteral);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateEmptyArrayLiteral);
}
void JSGenericLowering::LowerJSCreateArrayFromIterable(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = Builtins::CallableFor(
- isolate(), Builtins::kIterableToListWithSymbolLookup);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kIterableToListWithSymbolLookup);
}
void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
@@ -612,9 +654,7 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
if ((p.flags() & AggregateLiteral::kIsShallow) != 0 &&
p.length() <=
ConstructorBuiltins::kMaximumClonedShallowObjectProperties) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCreateShallowObjectLiteral);
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateShallowObjectLiteral);
} else {
ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
}
@@ -622,40 +662,38 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
void JSGenericLowering::LowerJSCloneObject(Node* node) {
CloneObjectParameters const& p = CloneObjectParametersOf(node->op());
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCloneObjectIC);
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.flags()));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 3, jsgraph()->HeapConstant(p.feedback().vector));
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCloneObjectIC);
}
void JSGenericLowering::LowerJSCreateEmptyLiteralObject(Node* node) {
- UNREACHABLE(); // Eliminated in typed lowering.
+ ReplaceWithBuiltinCall(node, Builtins::kCreateEmptyLiteralObject);
}
void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kCreateRegExpLiteral);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kCreateRegExpLiteral);
}
void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
- UNREACHABLE(); // Eliminated in typed lowering.
+ Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op());
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info));
+ ReplaceWithRuntimeCall(node, Runtime::kPushCatchContext);
}
void JSGenericLowering::LowerJSCreateWithContext(Node* node) {
- UNREACHABLE(); // Eliminated in typed lowering.
+ Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op());
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info));
+ ReplaceWithRuntimeCall(node, Runtime::kPushWithContext);
}
void JSGenericLowering::LowerJSCreateBlockContext(Node* node) {
@@ -688,64 +726,178 @@ void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
void JSGenericLowering::LowerJSConstruct(Node* node) {
ConstructParameters const& p = ConstructParametersOf(node->op());
- int const arg_count = static_cast<int>(p.arity() - 2);
+ int const arg_count = p.arity_without_implicit_args();
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::Construct(isolate());
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), arg_count + 1, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- Node* new_target = node->InputAt(arg_count + 1);
- Node* receiver = jsgraph()->UndefinedConstant();
- node->RemoveInput(arg_count + 1); // Drop new target.
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, new_target);
- node->InsertInput(zone(), 3, stub_arity);
- node->InsertInput(zone(), 4, receiver);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+
+ // TODO(jgruber): Understand and document how stack_argument_count is
+ // calculated. I've made some educated guesses below but they should be
+ // verified and documented in other lowerings as well.
+ static constexpr int kReceiver = 1;
+ static constexpr int kMaybeFeedbackVector = 1;
+
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ const int stack_argument_count =
+ arg_count + kReceiver + kMaybeFeedbackVector;
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kConstruct_WithFeedback);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* new_target = node->InputAt(arg_count + 1);
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(arg_count + 1); // Drop new target.
+ // Register argument inputs are followed by stack argument inputs (such as
+ // feedback_vector). Both are listed in ascending order. Note that
+ // the receiver is implicitly placed on the stack and is thus inserted
+ // between explicitly-specified register and stack arguments.
+ // TODO(jgruber): Implement a simpler way to specify these mutations.
+ node->InsertInput(zone(), arg_count + 1, feedback_vector);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, slot);
+ node->InsertInput(zone(), 5, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ const int stack_argument_count = arg_count + kReceiver;
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kConstruct);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* new_target = node->InputAt(arg_count + 1);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(arg_count + 1); // Drop new target.
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ }
}
void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kConstructWithArrayLike);
+ ConstructParameters const& p = ConstructParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- auto call_descriptor =
- Linkage::GetStubCallDescriptor(zone(), callable.descriptor(), 1, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* receiver = jsgraph()->UndefinedConstant();
- Node* arguments_list = node->InputAt(1);
- Node* new_target = node->InputAt(2);
- node->InsertInput(zone(), 0, stub_code);
- node->ReplaceInput(2, new_target);
- node->ReplaceInput(3, arguments_list);
- node->InsertInput(zone(), 4, receiver);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ const int arg_count = p.arity_without_implicit_args();
+ DCHECK_EQ(arg_count, 1);
+
+ static constexpr int kReceiver = 1;
+ static constexpr int kArgumentList = 1;
+ static constexpr int kMaybeFeedbackVector = 1;
+
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ const int stack_argument_count =
+ arg_count - kArgumentList + kReceiver + kMaybeFeedbackVector;
+ Callable callable = Builtins::CallableFor(
+ isolate(), Builtins::kConstructWithArrayLike_WithFeedback);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = jsgraph()->UndefinedConstant();
+ Node* arguments_list = node->InputAt(1);
+ Node* new_target = node->InputAt(2);
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector);
+ Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+
+ node->InsertInput(zone(), 0, stub_code);
+ node->ReplaceInput(2, new_target);
+ node->ReplaceInput(3, arguments_list);
+ // Register argument inputs are followed by stack argument inputs (such as
+ // feedback_vector). Both are listed in ascending order. Note that
+ // the receiver is implicitly placed on the stack and is thus inserted
+ // between explicitly-specified register and stack arguments.
+ // TODO(jgruber): Implement a simpler way to specify these mutations.
+ node->InsertInput(zone(), 4, slot);
+ node->InsertInput(zone(), 5, receiver);
+ node->InsertInput(zone(), 6, feedback_vector);
+
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ const int stack_argument_count = arg_count - kArgumentList + kReceiver;
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kConstructWithArrayLike);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = jsgraph()->UndefinedConstant();
+ Node* arguments_list = node->InputAt(1);
+ Node* new_target = node->InputAt(2);
+ node->InsertInput(zone(), 0, stub_code);
+ node->ReplaceInput(2, new_target);
+ node->ReplaceInput(3, arguments_list);
+ node->InsertInput(zone(), 4, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ }
}
void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
ConstructParameters const& p = ConstructParametersOf(node->op());
- int const arg_count = static_cast<int>(p.arity() - 2);
+ int const arg_count = p.arity_without_implicit_args();
int const spread_index = arg_count;
int const new_target_index = arg_count + 1;
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::ConstructWithSpread(isolate());
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), arg_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
- Node* new_target = node->InputAt(new_target_index);
- Node* spread = node->InputAt(spread_index);
- Node* receiver = jsgraph()->UndefinedConstant();
- DCHECK(new_target_index > spread_index);
- node->RemoveInput(new_target_index); // Drop new target.
- node->RemoveInput(spread_index);
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, new_target);
- node->InsertInput(zone(), 3, stack_arg_count);
- node->InsertInput(zone(), 4, spread);
- node->InsertInput(zone(), 5, receiver);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ static constexpr int kReceiver = 1;
+ static constexpr int kTheSpread = 1; // Included in `arg_count`.
+ static constexpr int kMaybeFeedbackVector = 1;
+
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ const int stack_argument_count =
+ arg_count + kReceiver + kMaybeFeedbackVector;
+ Callable callable = Builtins::CallableFor(
+ isolate(), Builtins::kConstructWithSpread_WithFeedback);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector);
+ Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+
+ // The single available register is needed for `slot`, thus `spread` remains
+ // on the stack here.
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* new_target = node->InputAt(new_target_index);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(new_target_index);
+
+ // Register argument inputs are followed by stack argument inputs (such as
+ // feedback_vector). Both are listed in ascending order. Note that
+ // the receiver is implicitly placed on the stack and is thus inserted
+ // between explicitly-specified register and stack arguments.
+ // TODO(jgruber): Implement a simpler way to specify these mutations.
+ node->InsertInput(zone(), arg_count + 1, feedback_vector);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, slot);
+ node->InsertInput(zone(), 5, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ const int stack_argument_count = arg_count + kReceiver - kTheSpread;
+ Callable callable = CodeFactory::ConstructWithSpread(isolate());
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+
+ // We pass the spread in a register, not on the stack.
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* new_target = node->InputAt(new_target_index);
+ Node* spread = node->InputAt(spread_index);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ DCHECK(new_target_index > spread_index);
+ node->RemoveInput(new_target_index);
+ node->RemoveInput(spread_index);
+
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, spread);
+ node->InsertInput(zone(), 5, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ }
}
void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
@@ -766,49 +918,126 @@ void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
void JSGenericLowering::LowerJSCall(Node* node) {
CallParameters const& p = CallParametersOf(node->op());
- int const arg_count = static_cast<int>(p.arity() - 2);
+ int const arg_count = p.arity_without_implicit_args();
ConvertReceiverMode const mode = p.convert_mode();
- Callable callable = CodeFactory::Call(isolate(), mode);
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), arg_count + 1, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, stub_arity);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ Callable callable = CodeFactory::Call_WithFeedback(isolate(), mode);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector);
+ Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
+ node->InsertInput(zone(), 3, slot);
+ node->InsertInput(zone(), 4, feedback_vector);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ Callable callable = CodeFactory::Call(isolate(), mode);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ }
}
void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) {
- Callable callable = CodeFactory::CallWithArrayLike(isolate());
+ CallParameters const& p = CallParametersOf(node->op());
+ const int arg_count = p.arity_without_implicit_args();
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- auto call_descriptor =
- Linkage::GetStubCallDescriptor(zone(), callable.descriptor(), 1, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* receiver = node->InputAt(1);
- Node* arguments_list = node->InputAt(2);
- node->InsertInput(zone(), 0, stub_code);
- node->ReplaceInput(3, receiver);
- node->ReplaceInput(2, arguments_list);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+
+ DCHECK_EQ(arg_count, 0);
+ static constexpr int kReceiver = 1;
+
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ Callable callable = Builtins::CallableFor(
+ isolate(), Builtins::kCallWithArrayLike_WithFeedback);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), arg_count + kReceiver, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = node->InputAt(1);
+ Node* arguments_list = node->InputAt(2);
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector);
+ Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+ node->InsertInput(zone(), 0, stub_code);
+ node->ReplaceInput(2, arguments_list);
+ node->ReplaceInput(3, receiver);
+ node->InsertInput(zone(), 3, slot);
+ node->InsertInput(zone(), 4, feedback_vector);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ Callable callable = CodeFactory::CallWithArrayLike(isolate());
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), arg_count + kReceiver, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = node->InputAt(1);
+ Node* arguments_list = node->InputAt(2);
+ node->InsertInput(zone(), 0, stub_code);
+ node->ReplaceInput(2, arguments_list);
+ node->ReplaceInput(3, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ }
}
void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
CallParameters const& p = CallParametersOf(node->op());
- int const arg_count = static_cast<int>(p.arity() - 2);
- int const spread_index = static_cast<int>(p.arity() + 1);
+ int const arg_count = p.arity_without_implicit_args();
+ int const spread_index = arg_count + 1;
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::CallWithSpread(isolate());
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), callable.descriptor(), arg_count, flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- // We pass the spread in a register, not on the stack.
- Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, stack_arg_count);
- node->InsertInput(zone(), 3, node->InputAt(spread_index));
- node->RemoveInput(spread_index + 1);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+
+ static constexpr int kTheSpread = 1;
+ static constexpr int kMaybeFeedbackVector = 1;
+
+ if (CollectFeedbackInGenericLowering() && p.feedback().IsValid()) {
+ const int stack_argument_count = arg_count + kMaybeFeedbackVector;
+ Callable callable = Builtins::CallableFor(
+ isolate(), Builtins::kCallWithSpread_WithFeedback);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector);
+ Node* slot = jsgraph()->Int32Constant(p.feedback().index());
+
+ // We pass the spread in a register, not on the stack.
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* spread = node->InputAt(spread_index);
+ node->RemoveInput(spread_index);
+
+ // Register argument inputs are followed by stack argument inputs (such as
+ // feedback_vector). Both are listed in ascending order. Note that
+ // the receiver is implicitly placed on the stack and is thus inserted
+ // between explicitly-specified register and stack arguments.
+ // TODO(jgruber): Implement a simpler way to specify these mutations.
+ node->InsertInput(zone(), arg_count + 1, feedback_vector);
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
+ node->InsertInput(zone(), 3, spread);
+ node->InsertInput(zone(), 4, slot);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ } else {
+ const int stack_argument_count = arg_count;
+ Callable callable = CodeFactory::CallWithSpread(isolate());
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ zone(), callable.descriptor(), stack_argument_count, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+
+ // We pass the spread in a register, not on the stack.
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* spread = node->InputAt(spread_index);
+ node->RemoveInput(spread_index);
+
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
+ node->InsertInput(zone(), 3, spread);
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ }
}
void JSGenericLowering::LowerJSCallRuntime(Node* node) {
@@ -932,9 +1161,7 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
}
void JSGenericLowering::LowerJSDebugger(Node* node) {
- CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::HandleDebuggerStatement(isolate());
- ReplaceWithStubCall(node, callable, flags);
+ ReplaceWithBuiltinCall(node, Builtins::kHandleDebuggerStatement);
}
Zone* JSGenericLowering::zone() const { return graph()->zone(); }
diff --git a/chromium/v8/src/compiler/js-generic-lowering.h b/chromium/v8/src/compiler/js-generic-lowering.h
index 2a4ac808b1a..2addadffab1 100644
--- a/chromium/v8/src/compiler/js-generic-lowering.h
+++ b/chromium/v8/src/compiler/js-generic-lowering.h
@@ -31,17 +31,27 @@ class JSGenericLowering final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
protected:
-#define DECLARE_LOWER(x) void Lower##x(Node* node);
+#define DECLARE_LOWER(x, ...) void Lower##x(Node* node);
// Dispatched depending on opcode.
JS_OP_LIST(DECLARE_LOWER)
#undef DECLARE_LOWER
// Helpers to replace existing nodes with a generic call.
- void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
- void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags,
- Operator::Properties properties);
+ void ReplaceWithBuiltinCall(Node* node, Builtins::Name builtin);
+ void ReplaceWithBuiltinCall(Node* node, Callable c,
+ CallDescriptor::Flags flags);
+ void ReplaceWithBuiltinCall(Node* node, Callable c,
+ CallDescriptor::Flags flags,
+ Operator::Properties properties);
void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
+ void ReplaceUnaryOpWithBuiltinCall(Node* node,
+ Builtins::Name builtin_without_feedback,
+ Builtins::Name builtin_with_feedback);
+ void ReplaceBinaryOpWithBuiltinCall(Node* node,
+ Builtins::Name builtin_without_feedback,
+ Builtins::Name builtin_with_feedback);
+
Zone* zone() const;
Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/chromium/v8/src/compiler/js-heap-broker.cc b/chromium/v8/src/compiler/js-heap-broker.cc
index 8ff520921f2..47bc291c8d3 100644
--- a/chromium/v8/src/compiler/js-heap-broker.cc
+++ b/chromium/v8/src/compiler/js-heap-broker.cc
@@ -2385,7 +2385,8 @@ base::Optional<ObjectRef> ContextRef::get(int index,
}
JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
- bool tracing_enabled, bool is_concurrent_inlining)
+ bool tracing_enabled, bool is_concurrent_inlining,
+ bool is_native_context_independent)
: isolate_(isolate),
zone_(broker_zone),
refs_(new (zone())
@@ -2394,6 +2395,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
array_and_object_prototypes_(zone()),
tracing_enabled_(tracing_enabled),
is_concurrent_inlining_(is_concurrent_inlining),
+ is_native_context_independent_(is_native_context_independent),
feedback_(zone()),
bytecode_analyses_(zone()),
property_access_infos_(zone()),
@@ -2407,9 +2409,11 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
TRACE(this, "Constructing heap broker");
}
-std::ostream& JSHeapBroker::Trace() const {
- return trace_out_ << "[" << this << "] "
- << std::string(trace_indentation_ * 2, ' ');
+std::string JSHeapBroker::Trace() const {
+ std::ostringstream oss;
+ oss << "[" << this << "] ";
+ for (unsigned i = 0; i < trace_indentation_ * 2; ++i) oss.put(' ');
+ return oss.str();
}
void JSHeapBroker::StopSerializing() {
diff --git a/chromium/v8/src/compiler/js-heap-broker.h b/chromium/v8/src/compiler/js-heap-broker.h
index 424da1df55b..b3e256d6864 100644
--- a/chromium/v8/src/compiler/js-heap-broker.h
+++ b/chromium/v8/src/compiler/js-heap-broker.h
@@ -33,20 +33,20 @@ std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
#define TRACE_BROKER(broker, x) \
do { \
if (broker->tracing_enabled() && FLAG_trace_heap_broker_verbose) \
- broker->Trace() << x << '\n'; \
+ StdoutStream{} << broker->Trace() << x << '\n'; \
} while (false)
#define TRACE_BROKER_MEMORY(broker, x) \
do { \
if (broker->tracing_enabled() && FLAG_trace_heap_broker_memory) \
- broker->Trace() << x << std::endl; \
+ StdoutStream{} << broker->Trace() << x << std::endl; \
} while (false)
-#define TRACE_BROKER_MISSING(broker, x) \
- do { \
- if (broker->tracing_enabled()) \
- broker->Trace() << "Missing " << x << " (" << __FILE__ << ":" \
- << __LINE__ << ")" << std::endl; \
+#define TRACE_BROKER_MISSING(broker, x) \
+ do { \
+ if (broker->tracing_enabled()) \
+ StdoutStream{} << broker->Trace() << "Missing " << x << " (" << __FILE__ \
+ << ":" << __LINE__ << ")" << std::endl; \
} while (false)
struct PropertyAccessTarget {
@@ -74,7 +74,13 @@ struct PropertyAccessTarget {
class V8_EXPORT_PRIVATE JSHeapBroker {
public:
JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled,
- bool is_concurrent_inlining);
+ bool is_concurrent_inlining, bool is_native_context_independent);
+
+ // For use only in tests, sets default values for some arguments. Avoids
+ // churn when new flags are added.
+ JSHeapBroker(Isolate* isolate, Zone* broker_zone)
+ : JSHeapBroker(isolate, broker_zone, FLAG_trace_heap_broker, false,
+ false) {}
// The compilation target's native context. We need the setter because at
// broker construction time we don't yet have the canonical handle.
@@ -89,6 +95,9 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Zone* zone() const { return zone_; }
bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
+ bool is_native_context_independent() const {
+ return is_native_context_independent_;
+ }
enum BrokerMode { kDisabled, kSerializing, kSerialized, kRetired };
BrokerMode mode() const { return mode_; }
@@ -193,7 +202,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool IsSerializedForCompilation(const SharedFunctionInfoRef& shared,
const FeedbackVectorRef& feedback) const;
- std::ostream& Trace() const;
+ std::string Trace() const;
void IncrementTracingIndentation();
void DecrementTracingIndentation();
@@ -242,7 +251,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
bool const is_concurrent_inlining_;
- mutable StdoutStream trace_out_;
+ bool const is_native_context_independent_;
unsigned trace_indentation_ = 0;
PerIsolateCompilerCache* compiler_cache_ = nullptr;
ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*,
diff --git a/chromium/v8/src/compiler/js-heap-copy-reducer.cc b/chromium/v8/src/compiler/js-heap-copy-reducer.cc
index 820928ec8ca..689732eea15 100644
--- a/chromium/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/chromium/v8/src/compiler/js-heap-copy-reducer.cc
@@ -85,6 +85,50 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
}
break;
}
+ /* Unary ops. */
+ case IrOpcode::kJSBitwiseNot:
+ case IrOpcode::kJSDecrement:
+ case IrOpcode::kJSIncrement:
+ case IrOpcode::kJSNegate: {
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ if (p.feedback().IsValid()) {
+ // Unary ops are treated as binary ops with respect to feedback.
+ broker()->ProcessFeedbackForBinaryOperation(p.feedback());
+ }
+ break;
+ }
+ /* Binary ops. */
+ case IrOpcode::kJSAdd:
+ case IrOpcode::kJSSubtract:
+ case IrOpcode::kJSMultiply:
+ case IrOpcode::kJSDivide:
+ case IrOpcode::kJSModulus:
+ case IrOpcode::kJSExponentiate:
+ case IrOpcode::kJSBitwiseOr:
+ case IrOpcode::kJSBitwiseXor:
+ case IrOpcode::kJSBitwiseAnd:
+ case IrOpcode::kJSShiftLeft:
+ case IrOpcode::kJSShiftRight:
+ case IrOpcode::kJSShiftRightLogical: {
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForBinaryOperation(p.feedback());
+ }
+ break;
+ }
+ /* Compare ops. */
+ case IrOpcode::kJSEqual:
+ case IrOpcode::kJSGreaterThan:
+ case IrOpcode::kJSGreaterThanOrEqual:
+ case IrOpcode::kJSLessThan:
+ case IrOpcode::kJSLessThanOrEqual:
+ case IrOpcode::kJSStrictEqual: {
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ if (p.feedback().IsValid()) {
+ broker()->ProcessFeedbackForCompareOperation(p.feedback());
+ }
+ break;
+ }
case IrOpcode::kJSCreateFunctionContext: {
CreateFunctionContextParameters const& p =
CreateFunctionContextParametersOf(node->op());
diff --git a/chromium/v8/src/compiler/js-inlining.cc b/chromium/v8/src/compiler/js-inlining.cc
index 16a6fb2f0f5..64ed0ed0893 100644
--- a/chromium/v8/src/compiler/js-inlining.cc
+++ b/chromium/v8/src/compiler/js-inlining.cc
@@ -429,8 +429,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// always hold true.
CHECK(shared_info->is_compiled());
- if (!broker()->is_concurrent_inlining() &&
- info_->is_source_positions_enabled()) {
+ if (!broker()->is_concurrent_inlining() && info_->source_positions()) {
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(),
shared_info->object());
}
@@ -462,10 +461,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
Graph::SubgraphScope scope(graph());
BytecodeGraphBuilderFlags flags(
BytecodeGraphBuilderFlag::kSkipFirstStackCheck);
- if (info_->is_analyze_environment_liveness()) {
+ if (info_->analyze_environment_liveness()) {
flags |= BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness;
}
- if (info_->is_bailout_on_uninitialized()) {
+ if (info_->bailout_on_uninitialized()) {
flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
}
{
diff --git a/chromium/v8/src/compiler/js-native-context-specialization.cc b/chromium/v8/src/compiler/js-native-context-specialization.cc
index 3283ebd0efc..73b10435e27 100644
--- a/chromium/v8/src/compiler/js-native-context-specialization.cc
+++ b/chromium/v8/src/compiler/js-native-context-specialization.cc
@@ -488,7 +488,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
node->ReplaceInput(4, continuation_frame_state);
node->ReplaceInput(5, effect);
NodeProperties::ChangeOp(
- node, javascript()->Call(3, CallFrequency(), FeedbackSource(),
+ node, javascript()->Call(1 + kTargetAndReceiver, CallFrequency(),
+ FeedbackSource(),
ConvertReceiverMode::kNotNullOrUndefined));
// Rewire the value uses of {node} to ToBoolean conversion of the result.
@@ -1428,10 +1429,10 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
SpeculationMode mode = feedback.IsInsufficient()
? SpeculationMode::kDisallowSpeculation
: feedback.AsCall().speculation_mode();
- const Operator* call_op =
- javascript()->Call(2, CallFrequency(), p.callFeedback(),
- ConvertReceiverMode::kNotNullOrUndefined, mode,
- CallFeedbackRelation::kRelated);
+ const Operator* call_op = javascript()->Call(
+ 0 + kTargetAndReceiver, CallFrequency(), p.callFeedback(),
+ ConvertReceiverMode::kNotNullOrUndefined, mode,
+ CallFeedbackRelation::kRelated);
Node* call_property = graph()->NewNode(call_op, load_property, receiver,
context, frame_state, effect, control);
@@ -2048,7 +2049,8 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
Node* value;
if (constant.IsJSFunction()) {
value = *effect = *control = graph()->NewNode(
- jsgraph()->javascript()->Call(2, CallFrequency(), FeedbackSource(),
+ jsgraph()->javascript()->Call(0 + kTargetAndReceiver, CallFrequency(),
+ FeedbackSource(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, context, frame_state, *effect, *control);
} else {
@@ -2085,7 +2087,8 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
// Introduce the call to the setter function.
if (constant.IsJSFunction()) {
*effect = *control = graph()->NewNode(
- jsgraph()->javascript()->Call(3, CallFrequency(), FeedbackSource(),
+ jsgraph()->javascript()->Call(1 + kTargetAndReceiver, CallFrequency(),
+ FeedbackSource(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, value, context, frame_state, *effect, *control);
} else {
diff --git a/chromium/v8/src/compiler/js-operator.cc b/chromium/v8/src/compiler/js-operator.cc
index 45e144094b9..b152569ae1f 100644
--- a/chromium/v8/src/compiler/js-operator.cc
+++ b/chromium/v8/src/compiler/js-operator.cc
@@ -7,7 +7,6 @@
#include <limits>
#include "src/base/lazy-instance.h"
-#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/handles/handles-inl.h"
#include "src/objects/objects-inl.h"
@@ -17,16 +16,22 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+
+// Returns properties for the given binary op.
+constexpr Operator::Properties BinopProperties(Operator::Opcode opcode) {
+ CONSTEXPR_DCHECK(JSOperator::IsBinaryWithFeedback(opcode));
+ return opcode == IrOpcode::kJSStrictEqual ? Operator::kPure
+ : Operator::kNoProperties;
+}
+
+} // namespace
+
std::ostream& operator<<(std::ostream& os, CallFrequency const& f) {
if (f.IsUnknown()) return os << "unknown";
return os << f.value();
}
-CallFrequency CallFrequencyOf(Operator const* op) {
- DCHECK_EQ(op->opcode(), IrOpcode::kJSConstructWithArrayLike);
- return OpParameter<CallFrequency>(op);
-}
-
std::ostream& operator<<(std::ostream& os,
ConstructForwardVarargsParameters const& p) {
return os << p.arity() << ", " << p.start_index();
@@ -60,6 +65,7 @@ std::ostream& operator<<(std::ostream& os, ConstructParameters const& p) {
ConstructParameters const& ConstructParametersOf(Operator const* op) {
DCHECK(op->opcode() == IrOpcode::kJSConstruct ||
+ op->opcode() == IrOpcode::kJSConstructWithArrayLike ||
op->opcode() == IrOpcode::kJSConstructWithSpread);
return OpParameter<ConstructParameters>(op);
}
@@ -230,7 +236,9 @@ std::ostream& operator<<(std::ostream& os, FeedbackParameter const& p) {
}
FeedbackParameter const& FeedbackParameterOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kJSCreateEmptyLiteralArray ||
+ DCHECK(JSOperator::IsUnaryWithFeedback(op->opcode()) ||
+ JSOperator::IsBinaryWithFeedback(op->opcode()) ||
+ op->opcode() == IrOpcode::kJSCreateEmptyLiteralArray ||
op->opcode() == IrOpcode::kJSInstanceOf ||
op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
op->opcode() == IrOpcode::kJSStoreInArrayLiteral);
@@ -636,37 +644,7 @@ ForInMode ForInModeOf(Operator const* op) {
return OpParameter<ForInMode>(op);
}
-BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSAdd, op->opcode());
- return OpParameter<BinaryOperationHint>(op);
-}
-
-CompareOperationHint CompareOperationHintOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kJSEqual ||
- op->opcode() == IrOpcode::kJSStrictEqual ||
- op->opcode() == IrOpcode::kJSLessThan ||
- op->opcode() == IrOpcode::kJSGreaterThan ||
- op->opcode() == IrOpcode::kJSLessThanOrEqual ||
- op->opcode() == IrOpcode::kJSGreaterThanOrEqual);
- return OpParameter<CompareOperationHint>(op);
-}
-
#define CACHED_OP_LIST(V) \
- V(BitwiseOr, Operator::kNoProperties, 2, 1) \
- V(BitwiseXor, Operator::kNoProperties, 2, 1) \
- V(BitwiseAnd, Operator::kNoProperties, 2, 1) \
- V(ShiftLeft, Operator::kNoProperties, 2, 1) \
- V(ShiftRight, Operator::kNoProperties, 2, 1) \
- V(ShiftRightLogical, Operator::kNoProperties, 2, 1) \
- V(Subtract, Operator::kNoProperties, 2, 1) \
- V(Multiply, Operator::kNoProperties, 2, 1) \
- V(Divide, Operator::kNoProperties, 2, 1) \
- V(Modulus, Operator::kNoProperties, 2, 1) \
- V(Exponentiate, Operator::kNoProperties, 2, 1) \
- V(BitwiseNot, Operator::kNoProperties, 1, 1) \
- V(Decrement, Operator::kNoProperties, 1, 1) \
- V(Increment, Operator::kNoProperties, 1, 1) \
- V(Negate, Operator::kNoProperties, 1, 1) \
V(ToLength, Operator::kNoProperties, 1, 1) \
V(ToName, Operator::kNoProperties, 1, 1) \
V(ToNumber, Operator::kNoProperties, 1, 1) \
@@ -703,16 +681,6 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(ParseInt, Operator::kNoProperties, 2, 1) \
V(RegExpTest, Operator::kNoProperties, 2, 1)
-#define BINARY_OP_LIST(V) V(Add)
-
-#define COMPARE_OP_LIST(V) \
- V(Equal, Operator::kNoProperties) \
- V(StrictEqual, Operator::kPure) \
- V(LessThan, Operator::kNoProperties) \
- V(GreaterThan, Operator::kNoProperties) \
- V(LessThanOrEqual, Operator::kNoProperties) \
- V(GreaterThanOrEqual, Operator::kNoProperties)
-
struct JSOperatorGlobalCache final {
#define CACHED_OP(Name, properties, value_input_count, value_output_count) \
struct Name##Operator final : public Operator { \
@@ -726,55 +694,6 @@ struct JSOperatorGlobalCache final {
Name##Operator k##Name##Operator;
CACHED_OP_LIST(CACHED_OP)
#undef CACHED_OP
-
-#define BINARY_OP(Name) \
- template <BinaryOperationHint kHint> \
- struct Name##Operator final : public Operator1<BinaryOperationHint> { \
- Name##Operator() \
- : Operator1<BinaryOperationHint>(IrOpcode::kJS##Name, \
- Operator::kNoProperties, "JS" #Name, \
- 2, 1, 1, 1, 1, 2, kHint) {} \
- }; \
- Name##Operator<BinaryOperationHint::kNone> k##Name##NoneOperator; \
- Name##Operator<BinaryOperationHint::kSignedSmall> \
- k##Name##SignedSmallOperator; \
- Name##Operator<BinaryOperationHint::kSignedSmallInputs> \
- k##Name##SignedSmallInputsOperator; \
- Name##Operator<BinaryOperationHint::kSigned32> k##Name##Signed32Operator; \
- Name##Operator<BinaryOperationHint::kNumber> k##Name##NumberOperator; \
- Name##Operator<BinaryOperationHint::kNumberOrOddball> \
- k##Name##NumberOrOddballOperator; \
- Name##Operator<BinaryOperationHint::kString> k##Name##StringOperator; \
- Name##Operator<BinaryOperationHint::kBigInt> k##Name##BigIntOperator; \
- Name##Operator<BinaryOperationHint::kAny> k##Name##AnyOperator;
- BINARY_OP_LIST(BINARY_OP)
-#undef BINARY_OP
-
-#define COMPARE_OP(Name, properties) \
- template <CompareOperationHint kHint> \
- struct Name##Operator final : public Operator1<CompareOperationHint> { \
- Name##Operator() \
- : Operator1<CompareOperationHint>( \
- IrOpcode::kJS##Name, properties, "JS" #Name, 2, 1, 1, 1, 1, \
- Operator::ZeroIfNoThrow(properties), kHint) {} \
- }; \
- Name##Operator<CompareOperationHint::kNone> k##Name##NoneOperator; \
- Name##Operator<CompareOperationHint::kSignedSmall> \
- k##Name##SignedSmallOperator; \
- Name##Operator<CompareOperationHint::kNumber> k##Name##NumberOperator; \
- Name##Operator<CompareOperationHint::kNumberOrOddball> \
- k##Name##NumberOrOddballOperator; \
- Name##Operator<CompareOperationHint::kInternalizedString> \
- k##Name##InternalizedStringOperator; \
- Name##Operator<CompareOperationHint::kString> k##Name##StringOperator; \
- Name##Operator<CompareOperationHint::kSymbol> k##Name##SymbolOperator; \
- Name##Operator<CompareOperationHint::kBigInt> k##Name##BigIntOperator; \
- Name##Operator<CompareOperationHint::kReceiver> k##Name##ReceiverOperator; \
- Name##Operator<CompareOperationHint::kReceiverOrNullOrUndefined> \
- k##Name##ReceiverOrNullOrUndefinedOperator; \
- Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
- COMPARE_OP_LIST(COMPARE_OP)
-#undef COMPARE_OP
};
namespace {
@@ -791,65 +710,26 @@ JSOperatorBuilder::JSOperatorBuilder(Zone* zone)
CACHED_OP_LIST(CACHED_OP)
#undef CACHED_OP
-#define BINARY_OP(Name) \
- const Operator* JSOperatorBuilder::Name(BinaryOperationHint hint) { \
- switch (hint) { \
- case BinaryOperationHint::kNone: \
- return &cache_.k##Name##NoneOperator; \
- case BinaryOperationHint::kSignedSmall: \
- return &cache_.k##Name##SignedSmallOperator; \
- case BinaryOperationHint::kSignedSmallInputs: \
- return &cache_.k##Name##SignedSmallInputsOperator; \
- case BinaryOperationHint::kSigned32: \
- return &cache_.k##Name##Signed32Operator; \
- case BinaryOperationHint::kNumber: \
- return &cache_.k##Name##NumberOperator; \
- case BinaryOperationHint::kNumberOrOddball: \
- return &cache_.k##Name##NumberOrOddballOperator; \
- case BinaryOperationHint::kString: \
- return &cache_.k##Name##StringOperator; \
- case BinaryOperationHint::kBigInt: \
- return &cache_.k##Name##BigIntOperator; \
- case BinaryOperationHint::kAny: \
- return &cache_.k##Name##AnyOperator; \
- } \
- UNREACHABLE(); \
- return nullptr; \
+#define UNARY_OP(JSName, Name) \
+ const Operator* JSOperatorBuilder::Name(FeedbackSource const& feedback) { \
+ FeedbackParameter parameters(feedback); \
+ return new (zone()) Operator1<FeedbackParameter>( \
+ IrOpcode::k##JSName, Operator::kNoProperties, #JSName, 2, 1, 1, 1, 1, \
+ 2, parameters); \
}
-BINARY_OP_LIST(BINARY_OP)
-#undef BINARY_OP
-
-#define COMPARE_OP(Name, ...) \
- const Operator* JSOperatorBuilder::Name(CompareOperationHint hint) { \
- switch (hint) { \
- case CompareOperationHint::kNone: \
- return &cache_.k##Name##NoneOperator; \
- case CompareOperationHint::kSignedSmall: \
- return &cache_.k##Name##SignedSmallOperator; \
- case CompareOperationHint::kNumber: \
- return &cache_.k##Name##NumberOperator; \
- case CompareOperationHint::kNumberOrOddball: \
- return &cache_.k##Name##NumberOrOddballOperator; \
- case CompareOperationHint::kInternalizedString: \
- return &cache_.k##Name##InternalizedStringOperator; \
- case CompareOperationHint::kString: \
- return &cache_.k##Name##StringOperator; \
- case CompareOperationHint::kSymbol: \
- return &cache_.k##Name##SymbolOperator; \
- case CompareOperationHint::kBigInt: \
- return &cache_.k##Name##BigIntOperator; \
- case CompareOperationHint::kReceiver: \
- return &cache_.k##Name##ReceiverOperator; \
- case CompareOperationHint::kReceiverOrNullOrUndefined: \
- return &cache_.k##Name##ReceiverOrNullOrUndefinedOperator; \
- case CompareOperationHint::kAny: \
- return &cache_.k##Name##AnyOperator; \
- } \
- UNREACHABLE(); \
- return nullptr; \
+JS_UNOP_WITH_FEEDBACK(UNARY_OP)
+#undef UNARY_OP
+
+#define BINARY_OP(JSName, Name) \
+ const Operator* JSOperatorBuilder::Name(FeedbackSource const& feedback) { \
+ static constexpr auto kProperties = BinopProperties(IrOpcode::k##JSName); \
+ FeedbackParameter parameters(feedback); \
+ return new (zone()) Operator1<FeedbackParameter>( \
+ IrOpcode::k##JSName, kProperties, #JSName, 3, 1, 1, 1, 1, \
+ Operator::ZeroIfNoThrow(kProperties), parameters); \
}
-COMPARE_OP_LIST(COMPARE_OP)
-#undef COMPARE_OP
+JS_BINOP_WITH_FEEDBACK(BINARY_OP)
+#undef BINARY_OP
const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
const FeedbackSource& feedback) {
@@ -972,13 +852,15 @@ const Operator* JSOperatorBuilder::Construct(uint32_t arity,
}
const Operator* JSOperatorBuilder::ConstructWithArrayLike(
- CallFrequency const& frequency) {
- return new (zone()) Operator1<CallFrequency>( // --
- IrOpcode::kJSConstructWithArrayLike, // opcode
- Operator::kNoProperties, // properties
- "JSConstructWithArrayLike", // name
- 3, 1, 1, 1, 1, 2, // counts
- frequency); // parameter
+ CallFrequency const& frequency, FeedbackSource const& feedback) {
+ static constexpr uint32_t arity = 3;
+ ConstructParameters parameters(arity, frequency, feedback);
+ return new (zone()) Operator1<ConstructParameters>( // --
+ IrOpcode::kJSConstructWithArrayLike, // opcode
+ Operator::kNoProperties, // properties
+ "JSConstructWithArrayLike", // name
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::ConstructWithSpread(
@@ -1359,7 +1241,7 @@ const Operator* JSOperatorBuilder::CreateEmptyLiteralObject() {
IrOpcode::kJSCreateEmptyLiteralObject, // opcode
Operator::kNoProperties, // properties
"JSCreateEmptyLiteralObject", // name
- 1, 1, 1, 1, 1, 2); // counts
+ 0, 1, 1, 1, 1, 2); // counts
}
const Operator* JSOperatorBuilder::CreateLiteralRegExp(
@@ -1420,9 +1302,7 @@ Handle<ScopeInfo> ScopeInfoOf(const Operator* op) {
return OpParameter<Handle<ScopeInfo>>(op);
}
-#undef BINARY_OP_LIST
#undef CACHED_OP_LIST
-#undef COMPARE_OP_LIST
} // namespace compiler
} // namespace internal
diff --git a/chromium/v8/src/compiler/js-operator.h b/chromium/v8/src/compiler/js-operator.h
index 1f9230d22b6..ad9365b4b59 100644
--- a/chromium/v8/src/compiler/js-operator.h
+++ b/chromium/v8/src/compiler/js-operator.h
@@ -8,6 +8,8 @@
#include "src/base/compiler-specific.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/globals.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
#include "src/handles/maybe-handles.h"
#include "src/objects/type-hints.h"
#include "src/runtime/runtime.h"
@@ -27,6 +29,77 @@ namespace compiler {
class Operator;
struct JSOperatorGlobalCache;
+// Macro lists.
+#define JS_UNOP_WITH_FEEDBACK(V) \
+ JS_BITWISE_UNOP_LIST(V) \
+ JS_ARITH_UNOP_LIST(V)
+
+#define JS_BINOP_WITH_FEEDBACK(V) \
+ JS_ARITH_BINOP_LIST(V) \
+ JS_BITWISE_BINOP_LIST(V) \
+ JS_COMPARE_BINOP_LIST(V)
+
+// Predicates.
+class JSOperator final : public AllStatic {
+ public:
+ static constexpr bool IsUnaryWithFeedback(Operator::Opcode opcode) {
+#define CASE(Name, ...) \
+ case IrOpcode::k##Name: \
+ return true;
+ switch (opcode) {
+ JS_UNOP_WITH_FEEDBACK(CASE);
+ default:
+ return false;
+ }
+#undef CASE
+ return false;
+ }
+
+ static constexpr bool IsBinaryWithFeedback(Operator::Opcode opcode) {
+#define CASE(Name, ...) \
+ case IrOpcode::k##Name: \
+ return true;
+ switch (opcode) {
+ JS_BINOP_WITH_FEEDBACK(CASE);
+ default:
+ return false;
+ }
+#undef CASE
+ return false;
+ }
+};
+
+// Node wrappers.
+
+class JSUnaryOpNode final : public NodeWrapper {
+ public:
+ explicit constexpr JSUnaryOpNode(Node* node) : NodeWrapper(node) {
+ CONSTEXPR_DCHECK(JSOperator::IsUnaryWithFeedback(node->opcode()));
+ }
+
+ static constexpr int ValueIndex() { return 0; }
+ static constexpr int FeedbackVectorIndex() { return 1; }
+};
+
+#define V(JSName, ...) using JSName##Node = JSUnaryOpNode;
+JS_UNOP_WITH_FEEDBACK(V)
+#undef V
+
+class JSBinaryOpNode final : public NodeWrapper {
+ public:
+ explicit constexpr JSBinaryOpNode(Node* node) : NodeWrapper(node) {
+ CONSTEXPR_DCHECK(JSOperator::IsBinaryWithFeedback(node->opcode()));
+ }
+
+ static constexpr int LeftIndex() { return 0; }
+ static constexpr int RightIndex() { return 1; }
+ static constexpr int FeedbackVectorIndex() { return 2; }
+};
+
+#define V(JSName, ...) using JSName##Node = JSBinaryOpNode;
+JS_BINOP_WITH_FEEDBACK(V)
+#undef V
+
// Defines the frequency a given Call/Construct site was executed. For some
// call sites the frequency is not known.
class CallFrequency final {
@@ -60,8 +133,6 @@ class CallFrequency final {
std::ostream& operator<<(std::ostream&, CallFrequency const&);
-CallFrequency CallFrequencyOf(Operator const* op) V8_WARN_UNUSED_RESULT;
-
// Defines the flags for a JavaScript call forwarding parameters. This
// is used as parameter by JSConstructForwardVarargs operators.
class ConstructForwardVarargsParameters final {
@@ -97,15 +168,32 @@ std::ostream& operator<<(std::ostream&,
ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf(
Operator const*) V8_WARN_UNUSED_RESULT;
-// Defines the arity and the feedback for a JavaScript constructor call. This is
-// used as a parameter by JSConstruct and JSConstructWithSpread operators.
+// Part of ConstructParameters::arity.
+static constexpr int kTargetAndNewTarget = 2;
+
+// Defines the arity (parameters plus the target and new target) and the
+// feedback for a JavaScript constructor call. This is used as a parameter by
+// JSConstruct, JSConstructWithArrayLike, and JSConstructWithSpread operators.
class ConstructParameters final {
public:
ConstructParameters(uint32_t arity, CallFrequency const& frequency,
FeedbackSource const& feedback)
- : arity_(arity), frequency_(frequency), feedback_(feedback) {}
+ : arity_(arity), frequency_(frequency), feedback_(feedback) {
+ DCHECK_GE(arity, kTargetAndNewTarget);
+ DCHECK(is_int32(arity));
+ }
+ // TODO(jgruber): Consider removing `arity()` and just storing the arity
+ // without extra args in ConstructParameters. Every spot that creates
+ // ConstructParameters artifically adds the extra args. Every spot that uses
+ // ConstructParameters artificially subtracts the extra args.
+ // We keep them for now for consistency with other spots
+ // that expect `arity()` to include extra args.
uint32_t arity() const { return arity_; }
+ int arity_without_implicit_args() const {
+ return static_cast<int>(arity_ - kTargetAndNewTarget);
+ }
+
CallFrequency const& frequency() const { return frequency_; }
FeedbackSource const& feedback() const { return feedback_; }
@@ -158,8 +246,12 @@ std::ostream& operator<<(std::ostream&, CallForwardVarargsParameters const&);
CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
Operator const*) V8_WARN_UNUSED_RESULT;
-// Defines the arity and the call flags for a JavaScript function call. This is
-// used as a parameter by JSCall and JSCallWithSpread operators.
+// Part of CallParameters::arity.
+static constexpr int kTargetAndReceiver = 2;
+
+// Defines the arity (parameters plus the target and receiver) and the call
+// flags for a JavaScript function call. This is used as a parameter by JSCall,
+// JSCallWithArrayLike and JSCallWithSpread operators.
class CallParameters final {
public:
CallParameters(size_t arity, CallFrequency const& frequency,
@@ -178,9 +270,17 @@ class CallParameters final {
feedback.IsValid());
DCHECK_IMPLIES(!feedback.IsValid(),
feedback_relation == CallFeedbackRelation::kUnrelated);
+ DCHECK_GE(arity, kTargetAndReceiver);
+ DCHECK(is_int32(arity));
}
+ // TODO(jgruber): Consider removing `arity()` and just storing the arity
+ // without extra args in CallParameters.
size_t arity() const { return ArityField::decode(bit_field_); }
+ int arity_without_implicit_args() const {
+ return static_cast<int>(arity() - kTargetAndReceiver);
+ }
+
CallFrequency const& frequency() const { return frequency_; }
ConvertReceiverMode convert_mode() const {
return ConvertReceiverModeField::decode(bit_field_);
@@ -733,10 +833,6 @@ std::ostream& operator<<(std::ostream&, ForInMode);
ForInMode ForInModeOf(Operator const* op) V8_WARN_UNUSED_RESULT;
-BinaryOperationHint BinaryOperationHintOf(const Operator* op);
-
-CompareOperationHint CompareOperationHintOf(const Operator* op);
-
int RegisterCountOf(Operator const* op) V8_WARN_UNUSED_RESULT;
int GeneratorStoreValueCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
@@ -752,30 +848,30 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
public:
explicit JSOperatorBuilder(Zone* zone);
- const Operator* Equal(CompareOperationHint hint);
- const Operator* StrictEqual(CompareOperationHint hint);
- const Operator* LessThan(CompareOperationHint hint);
- const Operator* GreaterThan(CompareOperationHint hint);
- const Operator* LessThanOrEqual(CompareOperationHint hint);
- const Operator* GreaterThanOrEqual(CompareOperationHint hint);
-
- const Operator* BitwiseOr();
- const Operator* BitwiseXor();
- const Operator* BitwiseAnd();
- const Operator* ShiftLeft();
- const Operator* ShiftRight();
- const Operator* ShiftRightLogical();
- const Operator* Add(BinaryOperationHint hint);
- const Operator* Subtract();
- const Operator* Multiply();
- const Operator* Divide();
- const Operator* Modulus();
- const Operator* Exponentiate();
-
- const Operator* BitwiseNot();
- const Operator* Decrement();
- const Operator* Increment();
- const Operator* Negate();
+ const Operator* Equal(FeedbackSource const& feedback);
+ const Operator* StrictEqual(FeedbackSource const& feedback);
+ const Operator* LessThan(FeedbackSource const& feedback);
+ const Operator* GreaterThan(FeedbackSource const& feedback);
+ const Operator* LessThanOrEqual(FeedbackSource const& feedback);
+ const Operator* GreaterThanOrEqual(FeedbackSource const& feedback);
+
+ const Operator* BitwiseOr(FeedbackSource const& feedback);
+ const Operator* BitwiseXor(FeedbackSource const& feedback);
+ const Operator* BitwiseAnd(FeedbackSource const& feedback);
+ const Operator* ShiftLeft(FeedbackSource const& feedback);
+ const Operator* ShiftRight(FeedbackSource const& feedback);
+ const Operator* ShiftRightLogical(FeedbackSource const& feedback);
+ const Operator* Add(FeedbackSource const& feedback);
+ const Operator* Subtract(FeedbackSource const& feedback);
+ const Operator* Multiply(FeedbackSource const& feedback);
+ const Operator* Divide(FeedbackSource const& feedback);
+ const Operator* Modulus(FeedbackSource const& feedback);
+ const Operator* Exponentiate(FeedbackSource const& feedback);
+
+ const Operator* BitwiseNot(FeedbackSource const& feedback);
+ const Operator* Decrement(FeedbackSource const& feedback);
+ const Operator* Increment(FeedbackSource const& feedback);
+ const Operator* Negate(FeedbackSource const& feedback);
const Operator* ToLength();
const Operator* ToName();
@@ -849,7 +945,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* Construct(uint32_t arity,
CallFrequency const& frequency = CallFrequency(),
FeedbackSource const& feedback = FeedbackSource());
- const Operator* ConstructWithArrayLike(CallFrequency const& frequency);
+ const Operator* ConstructWithArrayLike(CallFrequency const& frequency,
+ FeedbackSource const& feedback);
const Operator* ConstructWithSpread(
uint32_t arity, CallFrequency const& frequency = CallFrequency(),
FeedbackSource const& feedback = FeedbackSource());
diff --git a/chromium/v8/src/compiler/js-type-hint-lowering.cc b/chromium/v8/src/compiler/js-type-hint-lowering.cc
index 5c9a287bccc..808c59a65e2 100644
--- a/chromium/v8/src/compiler/js-type-hint-lowering.cc
+++ b/chromium/v8/src/compiler/js-type-hint-lowering.cc
@@ -97,6 +97,9 @@ class JSSpeculativeBinopBuilder final {
case CompareOperationHint::kNumber:
*hint = NumberOperationHint::kNumber;
return true;
+ case CompareOperationHint::kNumberOrBoolean:
+ *hint = NumberOperationHint::kNumberOrBoolean;
+ return true;
case CompareOperationHint::kNumberOrOddball:
*hint = NumberOperationHint::kNumberOrOddball;
return true;
@@ -282,31 +285,33 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
return LoweringResult::Exit(node);
}
+ // Note: Unary and binary operations collect the same kind of feedback.
+ FeedbackSource feedback(feedback_vector(), slot);
+
Node* node;
switch (op->opcode()) {
case IrOpcode::kJSBitwiseNot: {
// Lower to a speculative xor with -1 if we have some kind of Number
// feedback.
- JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->BitwiseXor(),
- operand, jsgraph()->SmiConstant(-1), effect,
- control, slot);
+ JSSpeculativeBinopBuilder b(
+ this, jsgraph()->javascript()->BitwiseXor(feedback), operand,
+ jsgraph()->SmiConstant(-1), effect, control, slot);
node = b.TryBuildNumberBinop();
break;
}
case IrOpcode::kJSDecrement: {
// Lower to a speculative subtraction of 1 if we have some kind of Number
// feedback.
- JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->Subtract(),
- operand, jsgraph()->SmiConstant(1), effect,
- control, slot);
+ JSSpeculativeBinopBuilder b(
+ this, jsgraph()->javascript()->Subtract(feedback), operand,
+ jsgraph()->SmiConstant(1), effect, control, slot);
node = b.TryBuildNumberBinop();
break;
}
case IrOpcode::kJSIncrement: {
// Lower to a speculative addition of 1 if we have some kind of Number
// feedback.
- BinaryOperationHint hint = BinaryOperationHint::kAny; // Dummy.
- JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->Add(hint),
+ JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->Add(feedback),
operand, jsgraph()->SmiConstant(1), effect,
control, slot);
node = b.TryBuildNumberBinop();
@@ -315,9 +320,9 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
case IrOpcode::kJSNegate: {
// Lower to a speculative multiplication with -1 if we have some kind of
// Number feedback.
- JSSpeculativeBinopBuilder b(this, jsgraph()->javascript()->Multiply(),
- operand, jsgraph()->SmiConstant(-1), effect,
- control, slot);
+ JSSpeculativeBinopBuilder b(
+ this, jsgraph()->javascript()->Multiply(feedback), operand,
+ jsgraph()->SmiConstant(-1), effect, control, slot);
node = b.TryBuildNumberBinop();
if (!node) {
if (GetBinaryOperationHint(slot) == BinaryOperationHint::kBigInt) {
diff --git a/chromium/v8/src/compiler/js-type-hint-lowering.h b/chromium/v8/src/compiler/js-type-hint-lowering.h
index 303e2f8dcfa..256858c1c69 100644
--- a/chromium/v8/src/compiler/js-type-hint-lowering.h
+++ b/chromium/v8/src/compiler/js-type-hint-lowering.h
@@ -72,6 +72,7 @@ class JSTypeHintLowering {
Node* control) {
DCHECK_NOT_NULL(effect);
DCHECK_NOT_NULL(control);
+ DCHECK(value->op()->HasProperty(Operator::kNoThrow));
return LoweringResult(LoweringResultKind::kSideEffectFree, value, effect,
control);
}
diff --git a/chromium/v8/src/compiler/js-typed-lowering.cc b/chromium/v8/src/compiler/js-typed-lowering.cc
index 69ca3e62e7a..8e03fc2f435 100644
--- a/chromium/v8/src/compiler/js-typed-lowering.cc
+++ b/chromium/v8/src/compiler/js-typed-lowering.cc
@@ -38,13 +38,16 @@ class JSBinopReduction final {
bool GetCompareNumberOperationHint(NumberOperationHint* hint) {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
- switch (CompareOperationHintOf(node_->op())) {
+ switch (GetCompareOperationHint(node_)) {
case CompareOperationHint::kSignedSmall:
*hint = NumberOperationHint::kSignedSmall;
return true;
case CompareOperationHint::kNumber:
*hint = NumberOperationHint::kNumber;
return true;
+ case CompareOperationHint::kNumberOrBoolean:
+ *hint = NumberOperationHint::kNumberOrBoolean;
+ return true;
case CompareOperationHint::kNumberOrOddball:
*hint = NumberOperationHint::kNumberOrOddball;
return true;
@@ -63,36 +66,34 @@ class JSBinopReduction final {
bool IsInternalizedStringCompareOperation() {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
- return (CompareOperationHintOf(node_->op()) ==
+ return (GetCompareOperationHint(node_) ==
CompareOperationHint::kInternalizedString) &&
BothInputsMaybe(Type::InternalizedString());
}
bool IsReceiverCompareOperation() {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
- return (CompareOperationHintOf(node_->op()) ==
+ return (GetCompareOperationHint(node_) ==
CompareOperationHint::kReceiver) &&
BothInputsMaybe(Type::Receiver());
}
bool IsReceiverOrNullOrUndefinedCompareOperation() {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
- return (CompareOperationHintOf(node_->op()) ==
+ return (GetCompareOperationHint(node_) ==
CompareOperationHint::kReceiverOrNullOrUndefined) &&
BothInputsMaybe(Type::ReceiverOrNullOrUndefined());
}
bool IsStringCompareOperation() {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
- return (CompareOperationHintOf(node_->op()) ==
- CompareOperationHint::kString) &&
+ return (GetCompareOperationHint(node_) == CompareOperationHint::kString) &&
BothInputsMaybe(Type::String());
}
bool IsSymbolCompareOperation() {
DCHECK_EQ(1, node_->op()->EffectOutputCount());
- return (CompareOperationHintOf(node_->op()) ==
- CompareOperationHint::kSymbol) &&
+ return (GetCompareOperationHint(node_) == CompareOperationHint::kSymbol) &&
BothInputsMaybe(Type::Symbol());
}
@@ -103,7 +104,7 @@ class JSBinopReduction final {
DCHECK_EQ(IrOpcode::kJSAdd, node_->opcode());
DCHECK(OneInputIs(Type::String()));
if (BothInputsAre(Type::String()) ||
- BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString) {
+ GetBinaryOperationHint(node_) == BinaryOperationHint::kString) {
HeapObjectBinopMatcher m(node_);
JSHeapBroker* broker = lowering_->broker();
if (m.right().HasValue() && m.right().Ref(broker).IsString()) {
@@ -269,6 +270,10 @@ class JSBinopReduction final {
}
// Remove the inputs corresponding to context, effect, and control.
NodeProperties::RemoveNonValueInputs(node_);
+ // Remove the feedback vector input, if applicable.
+ if (JSOperator::IsBinaryWithFeedback(node_->opcode())) {
+ node_->RemoveInput(JSBinaryOpNode::FeedbackVectorIndex());
+ }
// Finally, update the operator to the new one.
NodeProperties::ChangeOp(node_, op);
@@ -292,7 +297,6 @@ class JSBinopReduction final {
DCHECK_EQ(1, node_->op()->EffectInputCount());
DCHECK_EQ(1, node_->op()->EffectOutputCount());
DCHECK_EQ(1, node_->op()->ControlInputCount());
- DCHECK_EQ(2, node_->op()->ValueInputCount());
// Reconnect the control output to bypass the IfSuccess node and
// possibly disconnect from the IfException node.
@@ -304,6 +308,11 @@ class JSBinopReduction final {
}
node_->RemoveInput(NodeProperties::FirstContextIndex(node_));
+ // Remove the feedback vector input, if applicable.
+ if (JSOperator::IsBinaryWithFeedback(node_->opcode())) {
+ node_->RemoveInput(JSBinaryOpNode::FeedbackVectorIndex());
+ }
+ // Finally, update the operator to the new one.
NodeProperties::ChangeOp(node_, op);
// Update the type to number.
@@ -366,6 +375,11 @@ class JSBinopReduction final {
return !left_type().Maybe(t) && !right_type().Maybe(t);
}
+ BinaryOperationHint GetBinaryOperationHint(Node* node) const {
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
+ return lowering_->broker()->GetFeedbackForBinaryOperation(p.feedback());
+ }
+
Node* effect() { return NodeProperties::GetEffectInput(node_); }
Node* control() { return NodeProperties::GetControlInput(node_); }
Node* context() { return NodeProperties::GetContextInput(node_); }
@@ -414,6 +428,11 @@ class JSBinopReduction final {
return node;
}
+ CompareOperationHint GetCompareOperationHint(Node* node) const {
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
+ return lowering_->broker()->GetFeedbackForCompareOperation(p.feedback());
+ }
+
void update_effect(Node* effect) {
NodeProperties::ReplaceEffectInput(node_, effect);
}
@@ -443,8 +462,9 @@ Reduction JSTypedLowering::ReduceJSBitwiseNot(Node* node) {
Type input_type = NodeProperties::GetType(input);
if (input_type.Is(Type::PlainPrimitive())) {
// JSBitwiseNot(x) => NumberBitwiseXor(ToInt32(x), -1)
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
node->InsertInput(graph()->zone(), 1, jsgraph()->SmiConstant(-1));
- NodeProperties::ChangeOp(node, javascript()->BitwiseXor());
+ NodeProperties::ChangeOp(node, javascript()->BitwiseXor(p.feedback()));
JSBinopReduction r(this, node);
r.ConvertInputsToNumber();
r.ConvertInputsToUI32(kSigned, kSigned);
@@ -458,8 +478,9 @@ Reduction JSTypedLowering::ReduceJSDecrement(Node* node) {
Type input_type = NodeProperties::GetType(input);
if (input_type.Is(Type::PlainPrimitive())) {
// JSDecrement(x) => NumberSubtract(ToNumber(x), 1)
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
node->InsertInput(graph()->zone(), 1, jsgraph()->OneConstant());
- NodeProperties::ChangeOp(node, javascript()->Subtract());
+ NodeProperties::ChangeOp(node, javascript()->Subtract(p.feedback()));
JSBinopReduction r(this, node);
r.ConvertInputsToNumber();
DCHECK_EQ(simplified()->NumberSubtract(), r.NumberOp());
@@ -473,9 +494,9 @@ Reduction JSTypedLowering::ReduceJSIncrement(Node* node) {
Type input_type = NodeProperties::GetType(input);
if (input_type.Is(Type::PlainPrimitive())) {
// JSIncrement(x) => NumberAdd(ToNumber(x), 1)
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
node->InsertInput(graph()->zone(), 1, jsgraph()->OneConstant());
- BinaryOperationHint hint = BinaryOperationHint::kAny; // Dummy.
- NodeProperties::ChangeOp(node, javascript()->Add(hint));
+ NodeProperties::ChangeOp(node, javascript()->Add(p.feedback()));
JSBinopReduction r(this, node);
r.ConvertInputsToNumber();
DCHECK_EQ(simplified()->NumberAdd(), r.NumberOp());
@@ -489,8 +510,9 @@ Reduction JSTypedLowering::ReduceJSNegate(Node* node) {
Type input_type = NodeProperties::GetType(input);
if (input_type.Is(Type::PlainPrimitive())) {
// JSNegate(x) => NumberMultiply(ToNumber(x), -1)
+ const FeedbackParameter& p = FeedbackParameterOf(node->op());
node->InsertInput(graph()->zone(), 1, jsgraph()->SmiConstant(-1));
- NodeProperties::ChangeOp(node, javascript()->Multiply());
+ NodeProperties::ChangeOp(node, javascript()->Multiply(p.feedback()));
JSBinopReduction r(this, node);
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
@@ -527,7 +549,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
}
// Always bake in String feedback into the graph.
- if (BinaryOperationHintOf(node->op()) == BinaryOperationHint::kString) {
+ if (r.GetBinaryOperationHint(node) == BinaryOperationHint::kString) {
r.CheckInputsToString();
}
@@ -630,7 +652,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
}
// We never get here when we had String feedback.
- DCHECK_NE(BinaryOperationHint::kString, BinaryOperationHintOf(node->op()));
+ DCHECK_NE(BinaryOperationHint::kString, r.GetBinaryOperationHint(node));
if (r.OneInputIs(Type::String())) {
StringAddFlags flags = STRING_ADD_CHECK_NONE;
if (!r.LeftInputIs(Type::String())) {
@@ -654,6 +676,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
callable.descriptor().GetStackParameterCount(),
CallDescriptor::kNeedsFrameState, properties);
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+ node->RemoveInput(JSAddNode::FeedbackVectorIndex());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
@@ -887,7 +910,14 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
if (r.BothInputsAre(Type::Signed32()) ||
r.BothInputsAre(Type::Unsigned32())) {
return r.ChangeToPureOperator(simplified()->NumberEqual());
- } else if (r.GetCompareNumberOperationHint(&hint)) {
+ } else if (r.GetCompareNumberOperationHint(&hint) &&
+ hint != NumberOperationHint::kNumberOrOddball &&
+ hint != NumberOperationHint::kNumberOrBoolean) {
+ // SpeculativeNumberEqual performs implicit conversion of oddballs to
+ // numbers, so me must not generate it for strict equality with respective
+ // hint.
+ DCHECK(hint == NumberOperationHint::kNumber ||
+ hint == NumberOperationHint::kSignedSmall);
return r.ChangeToSpeculativeOperator(
simplified()->SpeculativeNumberEqual(hint), Type::Boolean());
} else if (r.BothInputsAre(Type::Number())) {
@@ -1463,17 +1493,34 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
CallDescriptor::Flags flags) {
// Patch {node} to a direct CEntry call.
//
+ // When V8_REVERSE_JSARGS is set:
+ // ----------- A r g u m e n t s -----------
+ // -- 0: CEntry
+ // --- Stack args ---
+ // -- 1: new_target
+ // -- 2: target
+ // -- 3: argc, including the receiver and implicit args (Smi)
+ // -- 4: padding
+ // -- 5: receiver
+ // -- [6, 6 + n[: the n actual arguments passed to the builtin
+ // --- Register args ---
+ // -- 6 + n: the C entry point
+ // -- 6 + n + 1: argc (Int32)
+ // -----------------------------------
+ //
+ // Otherwise:
// ----------- A r g u m e n t s -----------
// -- 0: CEntry
// --- Stack args ---
// -- 1: receiver
// -- [2, 2 + n[: the n actual arguments passed to the builtin
- // -- 2 + n: argc, including the receiver and implicit args (Smi)
- // -- 2 + n + 1: target
- // -- 2 + n + 2: new_target
+ // -- 2 + n: padding
+ // -- 2 + n + 1: argc, including the receiver and implicit args (Smi)
+ // -- 2 + n + 2: target
+ // -- 2 + n + 3: new_target
// --- Register args ---
- // -- 2 + n + 3: the C entry point
- // -- 2 + n + 4: argc (Int32)
+ // -- 2 + n + 4: the C entry point
+ // -- 2 + n + 5: argc (Int32)
// -----------------------------------
// The logic contained here is mirrored in Builtins::Generate_Adaptor.
@@ -1496,6 +1543,25 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
node->ReplaceInput(0, stub);
Zone* zone = jsgraph->zone();
+ const int argc = arity + BuiltinArguments::kNumExtraArgsWithReceiver;
+ Node* argc_node = jsgraph->Constant(argc);
+
+ static const int kStubAndReceiver = 2;
+#ifdef V8_REVERSE_JSARGS
+ node->InsertInput(zone, 1, new_target);
+ node->InsertInput(zone, 2, target);
+ node->InsertInput(zone, 3, argc_node);
+ node->InsertInput(zone, 4, jsgraph->PaddingConstant());
+
+ if (is_construct) {
+ // Unify representations between construct and call nodes.
+ // Remove new target and add receiver as a stack parameter.
+ Node* receiver = jsgraph->UndefinedConstant();
+ node->RemoveInput(argc);
+ node->InsertInput(zone, 5, receiver);
+ }
+ int cursor = arity + kStubAndReceiver + BuiltinArguments::kNumExtraArgs;
+#else
if (is_construct) {
// Unify representations between construct and call nodes.
// Remove new target and add receiver as a stack parameter.
@@ -1504,15 +1570,12 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
node->InsertInput(zone, 1, receiver);
}
- const int argc = arity + BuiltinArguments::kNumExtraArgsWithReceiver;
- Node* argc_node = jsgraph->Constant(argc);
-
- static const int kStubAndReceiver = 2;
int cursor = arity + kStubAndReceiver;
node->InsertInput(zone, cursor++, jsgraph->PaddingConstant());
node->InsertInput(zone, cursor++, argc_node);
node->InsertInput(zone, cursor++, target);
node->InsertInput(zone, cursor++, new_target);
+#endif
Address entry = Builtins::CppEntryOf(builtin_index);
ExternalReference entry_ref = ExternalReference::Create(entry);
@@ -1525,7 +1588,8 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
const char* debug_name = Builtins::name(builtin_index);
Operator::Properties properties = node->op()->properties();
auto call_descriptor = Linkage::GetCEntryStubCallDescriptor(
- zone, kReturnCount, argc, debug_name, properties, flags);
+ zone, kReturnCount, argc, debug_name, properties, flags,
+ StackArgumentOrder::kJS);
NodeProperties::ChangeOp(node, jsgraph->common()->Call(call_descriptor));
}
@@ -1577,8 +1641,7 @@ Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
- DCHECK_LE(2u, p.arity());
- int const arity = static_cast<int>(p.arity() - 2);
+ int const arity = p.arity_without_implicit_args();
Node* target = NodeProperties::GetValueInput(node, 0);
Type target_type = NodeProperties::GetType(target);
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
@@ -1649,7 +1712,7 @@ Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
Reduction JSTypedLowering::ReduceJSCall(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
- int arity = static_cast<int>(p.arity() - 2);
+ int arity = p.arity_without_implicit_args();
ConvertReceiverMode convert_mode = p.convert_mode();
Node* target = NodeProperties::GetValueInput(node, 0);
Type target_type = NodeProperties::GetType(target);
diff --git a/chromium/v8/src/compiler/linkage.cc b/chromium/v8/src/compiler/linkage.cc
index e16290f2a11..08ab34a892e 100644
--- a/chromium/v8/src/compiler/linkage.cc
+++ b/chromium/v8/src/compiler/linkage.cc
@@ -180,7 +180,7 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
switch (function) {
// Most runtime functions need a FrameState. A few chosen ones that we know
// not to call into arbitrary JavaScript, not to throw, and not to lazily
- // deoptimize are whitelisted here and can be called without a FrameState.
+ // deoptimize are allowlisted here and can be called without a FrameState.
case Runtime::kAbort:
case Runtime::kAllocateInOldGeneration:
case Runtime::kCreateIterResultObject:
@@ -218,7 +218,7 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
break;
}
- // For safety, default to needing a FrameState unless whitelisted.
+ // For safety, default to needing a FrameState unless allowlisted.
return true;
}
@@ -253,7 +253,7 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetCEntryStubCallDescriptor(
Zone* zone, int return_count, int js_parameter_count,
const char* debug_name, Operator::Properties properties,
- CallDescriptor::Flags flags) {
+ CallDescriptor::Flags flags, StackArgumentOrder stack_order) {
const size_t function_count = 1;
const size_t num_args_count = 1;
const size_t context_count = 1;
@@ -305,7 +305,8 @@ CallDescriptor* Linkage::GetCEntryStubCallDescriptor(
kNoCalleeSaved, // callee-saved
kNoCalleeSaved, // callee-saved fp
flags, // flags
- debug_name); // debug name
+ debug_name, // debug name
+ stack_order); // stack order
}
CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
@@ -325,7 +326,11 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
// All parameters to JS calls go on the stack.
for (int i = 0; i < js_parameter_count; i++) {
+#ifdef V8_REVERSE_JSARGS
+ int spill_slot_index = -i - 1;
+#else
int spill_slot_index = i - js_parameter_count;
+#endif
locations.AddParam(LinkageLocation::ForCallerFrameSlot(
spill_slot_index, MachineType::AnyTagged()));
}
@@ -358,7 +363,8 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
kNoCalleeSaved, // callee-saved
kNoCalleeSaved, // callee-saved fp
flags, // flags
- "js-call");
+ "js-call", // debug name
+ StackArgumentOrder::kJS); // stack order
}
// TODO(turbofan): cache call descriptors for code stub calls.
@@ -458,6 +464,7 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
kNoCalleeSaved, // callee-saved fp
CallDescriptor::kCanUseRoots | flags, // flags
descriptor.DebugName(), // debug name
+ descriptor.GetStackArgumentOrder(), // stack order
descriptor.allocatable_registers());
}
diff --git a/chromium/v8/src/compiler/linkage.h b/chromium/v8/src/compiler/linkage.h
index b55f3cdcb7c..346e9bda0cc 100644
--- a/chromium/v8/src/compiler/linkage.h
+++ b/chromium/v8/src/compiler/linkage.h
@@ -237,6 +237,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
RegList callee_saved_registers,
RegList callee_saved_fp_registers, Flags flags,
const char* debug_name = "",
+ StackArgumentOrder stack_order = StackArgumentOrder::kDefault,
const RegList allocatable_registers = 0,
size_t stack_return_count = 0)
: kind_(kind),
@@ -250,6 +251,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
callee_saved_fp_registers_(callee_saved_fp_registers),
allocatable_registers_(allocatable_registers),
flags_(flags),
+ stack_order_(stack_order),
debug_name_(debug_name) {}
// Returns the kind of this call.
@@ -292,6 +294,19 @@ class V8_EXPORT_PRIVATE CallDescriptor final
return stack_param_count_;
}
+ int GetStackIndexFromSlot(int slot_index) const {
+#ifdef V8_REVERSE_JSARGS
+ switch (GetStackArgumentOrder()) {
+ case StackArgumentOrder::kDefault:
+ return -slot_index - 1;
+ case StackArgumentOrder::kJS:
+ return slot_index + static_cast<int>(StackParameterCount());
+ }
+#else
+ return -slot_index - 1;
+#endif
+ }
+
// The total number of inputs to this call, which includes the target,
// receiver, context, etc.
// TODO(titzer): this should input the framestate input too.
@@ -338,6 +353,8 @@ class V8_EXPORT_PRIVATE CallDescriptor final
return location_sig_->GetParam(index).GetType();
}
+ StackArgumentOrder GetStackArgumentOrder() const { return stack_order_; }
+
// Operator properties describe how this call can be optimized, if at all.
Operator::Properties properties() const { return properties_; }
@@ -391,6 +408,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// register allocator to use.
const RegList allocatable_registers_;
const Flags flags_;
+ const StackArgumentOrder stack_order_;
const char* const debug_name_;
const CFunctionInfo* c_function_info_ = nullptr;
@@ -438,7 +456,8 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
static CallDescriptor* GetCEntryStubCallDescriptor(
Zone* zone, int return_count, int js_parameter_count,
const char* debug_name, Operator::Properties properties,
- CallDescriptor::Flags flags);
+ CallDescriptor::Flags flags,
+ StackArgumentOrder stack_order = StackArgumentOrder::kDefault);
static CallDescriptor* GetStubCallDescriptor(
Zone* zone, const CallInterfaceDescriptor& descriptor,
diff --git a/chromium/v8/src/compiler/load-elimination.h b/chromium/v8/src/compiler/load-elimination.h
index b97fd7b8834..fb5aee23aa0 100644
--- a/chromium/v8/src/compiler/load-elimination.h
+++ b/chromium/v8/src/compiler/load-elimination.h
@@ -228,8 +228,6 @@ class V8_EXPORT_PRIVATE LoadElimination final
class AbstractState final : public ZoneObject {
public:
- AbstractState() {}
-
bool Equals(AbstractState const* that) const;
void Merge(AbstractState const* that, Zone* zone);
diff --git a/chromium/v8/src/compiler/machine-graph-verifier.cc b/chromium/v8/src/compiler/machine-graph-verifier.cc
index 8b318d1430b..5eeb5dc2486 100644
--- a/chromium/v8/src/compiler/machine-graph-verifier.cc
+++ b/chromium/v8/src/compiler/machine-graph-verifier.cc
@@ -735,23 +735,6 @@ class MachineRepresentationChecker {
}
}
- void CheckValueInputIsCompressed(Node const* node, int index) {
- Node const* input = node->InputAt(index);
- switch (inferrer_->GetRepresentation(input)) {
- case MachineRepresentation::kCompressed:
- case MachineRepresentation::kCompressedPointer:
- return;
- default:
- break;
- }
- std::ostringstream str;
- str << "TypeError: node #" << node->id() << ":" << *node->op()
- << " uses node #" << input->id() << ":" << *input->op()
- << " which doesn't have a compressed representation.";
- PrintDebugHelp(str, node);
- FATAL("%s", str.str().c_str());
- }
-
void CheckValueInputIsTagged(Node const* node, int index) {
Node const* input = node->InputAt(index);
switch (inferrer_->GetRepresentation(input)) {
@@ -985,35 +968,6 @@ class MachineRepresentationChecker {
}
}
- bool Intersect(MachineRepresentation lhs, MachineRepresentation rhs) {
- return (GetRepresentationProperties(lhs) &
- GetRepresentationProperties(rhs)) != 0;
- }
-
- enum RepresentationProperties { kIsPointer = 1, kIsTagged = 2 };
-
- int GetRepresentationProperties(MachineRepresentation representation) {
- switch (representation) {
- case MachineRepresentation::kTagged:
- case MachineRepresentation::kTaggedPointer:
- return kIsPointer | kIsTagged;
- case MachineRepresentation::kTaggedSigned:
- return kIsTagged;
- case MachineRepresentation::kWord32:
- return MachineRepresentation::kWord32 ==
- MachineType::PointerRepresentation()
- ? kIsPointer
- : 0;
- case MachineRepresentation::kWord64:
- return MachineRepresentation::kWord64 ==
- MachineType::PointerRepresentation()
- ? kIsPointer
- : 0;
- default:
- return 0;
- }
- }
-
bool IsCompatible(MachineRepresentation expected,
MachineRepresentation actual) {
switch (expected) {
diff --git a/chromium/v8/src/compiler/machine-graph.cc b/chromium/v8/src/compiler/machine-graph.cc
index 0a00392f4b2..34464cfb052 100644
--- a/chromium/v8/src/compiler/machine-graph.cc
+++ b/chromium/v8/src/compiler/machine-graph.cc
@@ -32,6 +32,11 @@ Node* MachineGraph::IntPtrConstant(intptr_t value) {
: Int64Constant(static_cast<int64_t>(value));
}
+Node* MachineGraph::UintPtrConstant(uintptr_t value) {
+ return machine()->Is32() ? Uint32Constant(static_cast<uint32_t>(value))
+ : Uint64Constant(static_cast<uint64_t>(value));
+}
+
Node* MachineGraph::TaggedIndexConstant(intptr_t value) {
int32_t value32 = static_cast<int32_t>(value);
Node** loc = cache_.FindTaggedIndexConstant(value32);
diff --git a/chromium/v8/src/compiler/machine-graph.h b/chromium/v8/src/compiler/machine-graph.h
index 9eb5998dfc7..87175847f54 100644
--- a/chromium/v8/src/compiler/machine-graph.h
+++ b/chromium/v8/src/compiler/machine-graph.h
@@ -43,6 +43,7 @@ class V8_EXPORT_PRIVATE MachineGraph : public NON_EXPORTED_BASE(ZoneObject) {
// TODO(turbofan): Code using Int32Constant/Int64Constant to store pointer
// constants is probably not serializable.
Node* IntPtrConstant(intptr_t value);
+ Node* UintPtrConstant(uintptr_t value);
Node* TaggedIndexConstant(intptr_t value);
diff --git a/chromium/v8/src/compiler/machine-operator-reducer.cc b/chromium/v8/src/compiler/machine-operator-reducer.cc
index 1b600291691..127c7681099 100644
--- a/chromium/v8/src/compiler/machine-operator-reducer.cc
+++ b/chromium/v8/src/compiler/machine-operator-reducer.cc
@@ -311,35 +311,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
break;
}
case IrOpcode::kWord32Equal: {
- Int32BinopMatcher m(node);
- if (m.IsFoldable()) { // K == K => K
- return ReplaceBool(m.left().Value() == m.right().Value());
- }
- if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y == 0 => x == y
- Int32BinopMatcher msub(m.left().node());
- node->ReplaceInput(0, msub.left().node());
- node->ReplaceInput(1, msub.right().node());
- return Changed(node);
- }
- // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
- if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
- if (m.right().HasValue()) {
- base::Optional<std::pair<Node*, uint32_t>> replacements;
- if (m.left().IsTruncateInt64ToInt32()) {
- replacements = ReduceWord32EqualForConstantRhs<Word64Adapter>(
- NodeProperties::GetValueInput(m.left().node(), 0),
- static_cast<uint32_t>(m.right().Value()));
- } else {
- replacements = ReduceWord32EqualForConstantRhs<Word32Adapter>(
- m.left().node(), static_cast<uint32_t>(m.right().Value()));
- }
- if (replacements) {
- node->ReplaceInput(0, replacements->first);
- node->ReplaceInput(1, Uint32Constant(replacements->second));
- return Changed(node);
- }
- }
- break;
+ return ReduceWord32Equal(node);
}
case IrOpcode::kWord64Equal: {
Int64BinopMatcher m(node);
@@ -1623,9 +1595,117 @@ Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
return NoChange();
}
+namespace {
+
+// Represents an operation of the form `(source & mask) == masked_value`.
+struct BitfieldCheck {
+ Node* source;
+ uint32_t mask;
+ uint32_t masked_value;
+ bool truncate_from_64_bit;
+
+ static base::Optional<BitfieldCheck> Detect(Node* node) {
+ // There are two patterns to check for here:
+ // 1. Single-bit checks: `(val >> shift) & 1`, where:
+ // - the shift may be omitted, and/or
+ // - the result may be truncated from 64 to 32
+ // 2. Equality checks: `(val & mask) == expected`, where:
+ // - val may be truncated from 64 to 32 before masking (see
+ // ReduceWord32EqualForConstantRhs)
+ if (node->opcode() == IrOpcode::kWord32Equal) {
+ Uint32BinopMatcher eq(node);
+ if (eq.left().IsWord32And()) {
+ Uint32BinopMatcher mand(eq.left().node());
+ if (mand.right().HasValue()) {
+ BitfieldCheck result{mand.left().node(), mand.right().Value(),
+ eq.right().Value(), false};
+ if (mand.left().IsTruncateInt64ToInt32()) {
+ result.truncate_from_64_bit = true;
+ result.source =
+ NodeProperties::GetValueInput(mand.left().node(), 0);
+ }
+ return result;
+ }
+ }
+ } else {
+ if (node->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+ return TryDetectShiftAndMaskOneBit<Word64Adapter>(
+ NodeProperties::GetValueInput(node, 0));
+ } else {
+ return TryDetectShiftAndMaskOneBit<Word32Adapter>(node);
+ }
+ }
+ return {};
+ }
+
+ base::Optional<BitfieldCheck> TryCombine(const BitfieldCheck& other) {
+ if (source != other.source ||
+ truncate_from_64_bit != other.truncate_from_64_bit)
+ return {};
+ uint32_t overlapping_bits = mask & other.mask;
+ // It would be kind of strange to have any overlapping bits, but they can be
+ // allowed as long as they don't require opposite values in the same
+ // positions.
+ if ((masked_value & overlapping_bits) !=
+ (other.masked_value & overlapping_bits))
+ return {};
+ return BitfieldCheck{source, mask | other.mask,
+ masked_value | other.masked_value,
+ truncate_from_64_bit};
+ }
+
+ private:
+ template <typename WordNAdapter>
+ static base::Optional<BitfieldCheck> TryDetectShiftAndMaskOneBit(Node* node) {
+ // Look for the pattern `(val >> shift) & 1`. The shift may be omitted.
+ if (WordNAdapter::IsWordNAnd(NodeMatcher(node))) {
+ typename WordNAdapter::IntNBinopMatcher mand(node);
+ if (mand.right().HasValue() && mand.right().Value() == 1) {
+ if (WordNAdapter::IsWordNShr(mand.left()) ||
+ WordNAdapter::IsWordNSar(mand.left())) {
+ typename WordNAdapter::UintNBinopMatcher shift(mand.left().node());
+ if (shift.right().HasValue() && shift.right().Value() < 32u) {
+ uint32_t mask = 1 << shift.right().Value();
+ return BitfieldCheck{shift.left().node(), mask, mask,
+ WordNAdapter::WORD_SIZE == 64};
+ }
+ }
+ return BitfieldCheck{mand.left().node(), 1, 1,
+ WordNAdapter::WORD_SIZE == 64};
+ }
+ }
+ return {};
+ }
+};
+
+} // namespace
+
Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
DCHECK_EQ(IrOpcode::kWord32And, node->opcode());
- return ReduceWordNAnd<Word32Adapter>(node);
+ Reduction reduction = ReduceWordNAnd<Word32Adapter>(node);
+ if (reduction.Changed()) {
+ return reduction;
+ }
+
+ // Attempt to detect multiple bitfield checks from the same bitfield struct
+ // and fold them into a single check.
+ Int32BinopMatcher m(node);
+ if (auto right_bitfield = BitfieldCheck::Detect(m.right().node())) {
+ if (auto left_bitfield = BitfieldCheck::Detect(m.left().node())) {
+ if (auto combined_bitfield = left_bitfield->TryCombine(*right_bitfield)) {
+ Node* source = combined_bitfield->source;
+ if (combined_bitfield->truncate_from_64_bit) {
+ source = TruncateInt64ToInt32(source);
+ }
+ node->ReplaceInput(0, Word32And(source, combined_bitfield->mask));
+ node->ReplaceInput(1, Int32Constant(combined_bitfield->masked_value));
+ NodeProperties::ChangeOp(node, machine()->Word32Equal());
+ return Changed(node).FollowedBy(ReduceWord32Equal(node));
+ }
+ }
+ }
+
+ return NoChange();
}
Reduction MachineOperatorReducer::ReduceWord64And(Node* node) {
@@ -1756,6 +1836,39 @@ Reduction MachineOperatorReducer::ReduceWord64Xor(Node* node) {
return ReduceWordNXor<Word64Adapter>(node);
}
+Reduction MachineOperatorReducer::ReduceWord32Equal(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.IsFoldable()) { // K == K => K
+ return ReplaceBool(m.left().Value() == m.right().Value());
+ }
+ if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y == 0 => x == y
+ Int32BinopMatcher msub(m.left().node());
+ node->ReplaceInput(0, msub.left().node());
+ node->ReplaceInput(1, msub.right().node());
+ return Changed(node);
+ }
+ // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
+ if (m.LeftEqualsRight()) return ReplaceBool(true); // x == x => true
+ if (m.right().HasValue()) {
+ base::Optional<std::pair<Node*, uint32_t>> replacements;
+ if (m.left().IsTruncateInt64ToInt32()) {
+ replacements = ReduceWord32EqualForConstantRhs<Word64Adapter>(
+ NodeProperties::GetValueInput(m.left().node(), 0),
+ static_cast<uint32_t>(m.right().Value()));
+ } else {
+ replacements = ReduceWord32EqualForConstantRhs<Word32Adapter>(
+ m.left().node(), static_cast<uint32_t>(m.right().Value()));
+ }
+ if (replacements) {
+ node->ReplaceInput(0, replacements->first);
+ node->ReplaceInput(1, Uint32Constant(replacements->second));
+ return Changed(node);
+ }
+ }
+
+ return NoChange();
+}
+
Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
DCHECK_EQ(IrOpcode::kFloat64InsertLowWord32, node->opcode());
Float64Matcher mlhs(node->InputAt(0));
diff --git a/chromium/v8/src/compiler/machine-operator-reducer.h b/chromium/v8/src/compiler/machine-operator-reducer.h
index 7970daefce9..9f12f818374 100644
--- a/chromium/v8/src/compiler/machine-operator-reducer.h
+++ b/chromium/v8/src/compiler/machine-operator-reducer.h
@@ -109,6 +109,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Reduction ReduceWord64Or(Node* node);
Reduction ReduceWord32Xor(Node* node);
Reduction ReduceWord64Xor(Node* node);
+ Reduction ReduceWord32Equal(Node* node);
Reduction ReduceFloat64InsertLowWord32(Node* node);
Reduction ReduceFloat64InsertHighWord32(Node* node);
Reduction ReduceFloat64Compare(Node* node);
diff --git a/chromium/v8/src/compiler/machine-operator.cc b/chromium/v8/src/compiler/machine-operator.cc
index 9a985eb5fa4..ed180283fed 100644
--- a/chromium/v8/src/compiler/machine-operator.cc
+++ b/chromium/v8/src/compiler/machine-operator.cc
@@ -339,6 +339,10 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(F64x2Qfms, Operator::kNoProperties, 3, 0, 1) \
V(F64x2Pmin, Operator::kNoProperties, 2, 0, 1) \
V(F64x2Pmax, Operator::kNoProperties, 2, 0, 1) \
+ V(F64x2Ceil, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Floor, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Trunc, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2NearestInt, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
@@ -362,6 +366,10 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(F32x4Qfms, Operator::kNoProperties, 3, 0, 1) \
V(F32x4Pmin, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Pmax, Operator::kNoProperties, 2, 0, 1) \
+ V(F32x4Ceil, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4Floor, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4Trunc, Operator::kNoProperties, 1, 0, 1) \
+ V(F32x4NearestInt, Operator::kNoProperties, 1, 0, 1) \
V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \
V(I64x2SplatI32Pair, Operator::kNoProperties, 2, 0, 1) \
V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \
@@ -408,6 +416,7 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(I32x4GeU, Operator::kNoProperties, 2, 0, 1) \
V(I32x4Abs, Operator::kNoProperties, 1, 0, 1) \
V(I32x4BitMask, Operator::kNoProperties, 1, 0, 1) \
+ V(I32x4DotI16x8S, Operator::kCommutative, 2, 0, 1) \
V(I16x8Splat, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
@@ -476,14 +485,14 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(S128Not, Operator::kNoProperties, 1, 0, 1) \
V(S128Select, Operator::kNoProperties, 3, 0, 1) \
V(S128AndNot, Operator::kNoProperties, 2, 0, 1) \
- V(S1x2AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x2AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V64x2AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V64x2AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V32x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V16x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V16x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V8x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(V8x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(S8x16Swizzle, Operator::kNoProperties, 2, 0, 1)
// The format is:
diff --git a/chromium/v8/src/compiler/machine-operator.h b/chromium/v8/src/compiler/machine-operator.h
index aa4f2dcf2ca..f013337478e 100644
--- a/chromium/v8/src/compiler/machine-operator.h
+++ b/chromium/v8/src/compiler/machine-operator.h
@@ -576,6 +576,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F64x2Qfms();
const Operator* F64x2Pmin();
const Operator* F64x2Pmax();
+ const Operator* F64x2Ceil();
+ const Operator* F64x2Floor();
+ const Operator* F64x2Trunc();
+ const Operator* F64x2NearestInt();
const Operator* F32x4Splat();
const Operator* F32x4ExtractLane(int32_t);
@@ -602,6 +606,10 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4Qfms();
const Operator* F32x4Pmin();
const Operator* F32x4Pmax();
+ const Operator* F32x4Ceil();
+ const Operator* F32x4Floor();
+ const Operator* F32x4Trunc();
+ const Operator* F32x4NearestInt();
const Operator* I64x2Splat();
const Operator* I64x2SplatI32Pair();
@@ -656,6 +664,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4GeU();
const Operator* I32x4Abs();
const Operator* I32x4BitMask();
+ const Operator* I32x4DotI16x8S();
const Operator* I16x8Splat();
const Operator* I16x8ExtractLaneU(int32_t);
@@ -740,14 +749,14 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* S8x16Swizzle();
const Operator* S8x16Shuffle(const uint8_t shuffle[16]);
- const Operator* S1x2AnyTrue();
- const Operator* S1x2AllTrue();
- const Operator* S1x4AnyTrue();
- const Operator* S1x4AllTrue();
- const Operator* S1x8AnyTrue();
- const Operator* S1x8AllTrue();
- const Operator* S1x16AnyTrue();
- const Operator* S1x16AllTrue();
+ const Operator* V64x2AnyTrue();
+ const Operator* V64x2AllTrue();
+ const Operator* V32x4AnyTrue();
+ const Operator* V32x4AllTrue();
+ const Operator* V16x8AnyTrue();
+ const Operator* V16x8AllTrue();
+ const Operator* V8x16AnyTrue();
+ const Operator* V8x16AllTrue();
// load [base + index]
const Operator* Load(LoadRepresentation rep);
diff --git a/chromium/v8/src/compiler/memory-lowering.h b/chromium/v8/src/compiler/memory-lowering.h
index 45015e98bbf..1c6ef8a3722 100644
--- a/chromium/v8/src/compiler/memory-lowering.h
+++ b/chromium/v8/src/compiler/memory-lowering.h
@@ -78,7 +78,6 @@ class MemoryLowering final : public Reducer {
WriteBarrierAssertFailedCallback callback = [](Node*, Node*, const char*,
Zone*) { UNREACHABLE(); },
const char* function_debug_name = nullptr);
- ~MemoryLowering() = default;
const char* reducer_name() const override { return "MemoryReducer"; }
diff --git a/chromium/v8/src/compiler/node-matchers.h b/chromium/v8/src/compiler/node-matchers.h
index cf0df2d6360..bd93b545e12 100644
--- a/chromium/v8/src/compiler/node-matchers.h
+++ b/chromium/v8/src/compiler/node-matchers.h
@@ -39,7 +39,7 @@ struct NodeMatcher {
bool IsComparison() const;
-#define DEFINE_IS_OPCODE(Opcode) \
+#define DEFINE_IS_OPCODE(Opcode, ...) \
bool Is##Opcode() const { return opcode() == IrOpcode::k##Opcode; }
ALL_OP_LIST(DEFINE_IS_OPCODE)
#undef DEFINE_IS_OPCODE
diff --git a/chromium/v8/src/compiler/node.h b/chromium/v8/src/compiler/node.h
index 8072bab46eb..add4116dac9 100644
--- a/chromium/v8/src/compiler/node.h
+++ b/chromium/v8/src/compiler/node.h
@@ -303,6 +303,16 @@ Node** Node::OutOfLineInputs::inputs() {
std::ostream& operator<<(std::ostream& os, const Node& n);
+// Base class for node wrappers.
+class NodeWrapper {
+ public:
+ explicit constexpr NodeWrapper(Node* node) : node_(node) {}
+ operator Node*() const { return node_; }
+ Node* operator->() const { return node_; }
+
+ private:
+ Node* node_;
+};
// Typedefs to shorten commonly used Node containers.
using NodeDeque = ZoneDeque<Node*>;
diff --git a/chromium/v8/src/compiler/opcodes.cc b/chromium/v8/src/compiler/opcodes.cc
index c465422d346..3cd464d6b1b 100644
--- a/chromium/v8/src/compiler/opcodes.cc
+++ b/chromium/v8/src/compiler/opcodes.cc
@@ -16,7 +16,7 @@ namespace compiler {
namespace {
char const* const kMnemonics[] = {
-#define DECLARE_MNEMONIC(x) #x,
+#define DECLARE_MNEMONIC(x, ...) #x,
ALL_OP_LIST(DECLARE_MNEMONIC)
#undef DECLARE_MNEMONIC
"UnknownOpcode"};
diff --git a/chromium/v8/src/compiler/opcodes.h b/chromium/v8/src/compiler/opcodes.h
index f3b3ff8c8eb..9db2a912a2a 100644
--- a/chromium/v8/src/compiler/opcodes.h
+++ b/chromium/v8/src/compiler/opcodes.h
@@ -85,29 +85,30 @@
V(StaticAssert)
// Opcodes for JavaScript operators.
-#define JS_COMPARE_BINOP_LIST(V) \
- V(JSEqual) \
- V(JSStrictEqual) \
- V(JSLessThan) \
- V(JSGreaterThan) \
- V(JSLessThanOrEqual) \
- V(JSGreaterThanOrEqual)
+// Arguments are JSName (the name with a 'JS' prefix), and Name.
+#define JS_COMPARE_BINOP_LIST(V) \
+ V(JSEqual, Equal) \
+ V(JSStrictEqual, StrictEqual) \
+ V(JSLessThan, LessThan) \
+ V(JSGreaterThan, GreaterThan) \
+ V(JSLessThanOrEqual, LessThanOrEqual) \
+ V(JSGreaterThanOrEqual, GreaterThanOrEqual)
#define JS_BITWISE_BINOP_LIST(V) \
- V(JSBitwiseOr) \
- V(JSBitwiseXor) \
- V(JSBitwiseAnd) \
- V(JSShiftLeft) \
- V(JSShiftRight) \
- V(JSShiftRightLogical)
+ V(JSBitwiseOr, BitwiseOr) \
+ V(JSBitwiseXor, BitwiseXor) \
+ V(JSBitwiseAnd, BitwiseAnd) \
+ V(JSShiftLeft, ShiftLeft) \
+ V(JSShiftRight, ShiftRight) \
+ V(JSShiftRightLogical, ShiftRightLogical)
#define JS_ARITH_BINOP_LIST(V) \
- V(JSAdd) \
- V(JSSubtract) \
- V(JSMultiply) \
- V(JSDivide) \
- V(JSModulus) \
- V(JSExponentiate)
+ V(JSAdd, Add) \
+ V(JSSubtract, Subtract) \
+ V(JSMultiply, Multiply) \
+ V(JSDivide, Divide) \
+ V(JSModulus, Modulus) \
+ V(JSExponentiate, Exponentiate)
#define JS_SIMPLE_BINOP_LIST(V) \
JS_COMPARE_BINOP_LIST(V) \
@@ -127,12 +128,18 @@
V(JSToString) \
V(JSParseInt)
+#define JS_BITWISE_UNOP_LIST(V) \
+ V(JSBitwiseNot, BitwiseNot) \
+ V(JSNegate, Negate)
+
+#define JS_ARITH_UNOP_LIST(V) \
+ V(JSDecrement, Decrement) \
+ V(JSIncrement, Increment)
+
#define JS_SIMPLE_UNOP_LIST(V) \
- JS_CONVERSION_UNOP_LIST(V) \
- V(JSBitwiseNot) \
- V(JSDecrement) \
- V(JSIncrement) \
- V(JSNegate)
+ JS_ARITH_UNOP_LIST(V) \
+ JS_BITWISE_UNOP_LIST(V) \
+ JS_CONVERSION_UNOP_LIST(V)
#define JS_CREATE_OP_LIST(V) \
V(JSCloneObject) \
@@ -765,6 +772,10 @@
V(F64x2Qfms) \
V(F64x2Pmin) \
V(F64x2Pmax) \
+ V(F64x2Ceil) \
+ V(F64x2Floor) \
+ V(F64x2Trunc) \
+ V(F64x2NearestInt) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
V(F32x4ReplaceLane) \
@@ -792,6 +803,10 @@
V(F32x4Qfms) \
V(F32x4Pmin) \
V(F32x4Pmax) \
+ V(F32x4Ceil) \
+ V(F32x4Floor) \
+ V(F32x4Trunc) \
+ V(F32x4NearestInt) \
V(I64x2Splat) \
V(I64x2SplatI32Pair) \
V(I64x2ExtractLane) \
@@ -847,6 +862,7 @@
V(I32x4GeU) \
V(I32x4Abs) \
V(I32x4BitMask) \
+ V(I32x4DotI16x8S) \
V(I16x8Splat) \
V(I16x8ExtractLaneU) \
V(I16x8ExtractLaneS) \
@@ -931,14 +947,14 @@
V(S128AndNot) \
V(S8x16Swizzle) \
V(S8x16Shuffle) \
- V(S1x2AnyTrue) \
- V(S1x2AllTrue) \
- V(S1x4AnyTrue) \
- V(S1x4AllTrue) \
- V(S1x8AnyTrue) \
- V(S1x8AllTrue) \
- V(S1x16AnyTrue) \
- V(S1x16AllTrue) \
+ V(V64x2AnyTrue) \
+ V(V64x2AllTrue) \
+ V(V32x4AnyTrue) \
+ V(V32x4AllTrue) \
+ V(V16x8AnyTrue) \
+ V(V16x8AllTrue) \
+ V(V8x16AnyTrue) \
+ V(V8x16AllTrue) \
V(LoadTransform)
#define VALUE_OP_LIST(V) \
@@ -962,12 +978,12 @@ namespace compiler {
class V8_EXPORT_PRIVATE IrOpcode {
public:
enum Value {
-#define DECLARE_OPCODE(x) k##x,
+#define DECLARE_OPCODE(x, ...) k##x,
ALL_OP_LIST(DECLARE_OPCODE)
#undef DECLARE_OPCODE
- kLast = -1
-#define COUNT_OPCODE(x) +1
- ALL_OP_LIST(COUNT_OPCODE)
+ kLast = -1
+#define COUNT_OPCODE(...) +1
+ ALL_OP_LIST(COUNT_OPCODE)
#undef COUNT_OPCODE
};
@@ -991,7 +1007,16 @@ class V8_EXPORT_PRIVATE IrOpcode {
// Returns true if opcode for constant operator.
static bool IsConstantOpcode(Value value) {
- return kInt32Constant <= value && value <= kRelocatableInt64Constant;
+#define CASE(Name) \
+ case k##Name: \
+ return true;
+ switch (value) {
+ CONSTANT_OP_LIST(CASE);
+ default:
+ return false;
+ }
+#undef CASE
+ UNREACHABLE();
}
static bool IsPhiOpcode(Value value) {
@@ -1006,8 +1031,9 @@ class V8_EXPORT_PRIVATE IrOpcode {
return kIfTrue <= value && value <= kIfDefault;
}
- // Returns true if opcode terminates control flow in a graph (i.e. respective
- // nodes are expected to have control uses by the graphs {End} node only).
+ // Returns true if opcode terminates control flow in a graph (i.e.
+ // respective nodes are expected to have control uses by the graphs {End}
+ // node only).
static bool IsGraphTerminator(Value value) {
return value == kDeoptimize || value == kReturn || value == kTailCall ||
value == kTerminate || value == kThrow;
@@ -1020,9 +1046,18 @@ class V8_EXPORT_PRIVATE IrOpcode {
// Returns true if opcode for comparison operator.
static bool IsComparisonOpcode(Value value) {
- return (kJSEqual <= value && value <= kJSGreaterThanOrEqual) ||
- (kNumberEqual <= value && value <= kStringLessThanOrEqual) ||
- (kWord32Equal <= value && value <= kFloat64LessThanOrEqual);
+#define CASE(Name, ...) \
+ case k##Name: \
+ return true;
+ switch (value) {
+ JS_COMPARE_BINOP_LIST(CASE);
+ SIMPLIFIED_COMPARE_BINOP_LIST(CASE);
+ MACHINE_COMPARE_BINOP_LIST(CASE);
+ default:
+ return false;
+ }
+#undef CASE
+ UNREACHABLE();
}
static bool IsContextChainExtendingOpcode(Value value) {
diff --git a/chromium/v8/src/compiler/operator-properties.cc b/chromium/v8/src/compiler/operator-properties.cc
index a4892cdb2a2..bf0f724a993 100644
--- a/chromium/v8/src/compiler/operator-properties.cc
+++ b/chromium/v8/src/compiler/operator-properties.cc
@@ -24,7 +24,7 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) {
DCHECK(HasContextInput(op));
IrOpcode::Value const opcode = static_cast<IrOpcode::Value>(op->opcode());
switch (opcode) {
-#define CASE(Name) case IrOpcode::k##Name:
+#define CASE(Name, ...) case IrOpcode::k##Name:
// Binary/unary operators, calls and constructor calls only
// need the context to generate exceptions or lookup fields
// on the native context, so passing any context is fine.
diff --git a/chromium/v8/src/compiler/pipeline-statistics.cc b/chromium/v8/src/compiler/pipeline-statistics.cc
index 5e2c7feffd1..7989cfacfb6 100644
--- a/chromium/v8/src/compiler/pipeline-statistics.cc
+++ b/chromium/v8/src/compiler/pipeline-statistics.cc
@@ -18,10 +18,10 @@ namespace compiler {
namespace {
// We log detailed phase information about the pipeline
-// in both the v8.turbofan and the v8.wasm categories.
+// in both the v8.turbofan and the v8.wasm.detailed categories.
constexpr const char kTraceCategory[] = // --
TRACE_DISABLED_BY_DEFAULT("v8.turbofan") "," // --
- TRACE_DISABLED_BY_DEFAULT("v8.wasm");
+ TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed");
} // namespace
diff --git a/chromium/v8/src/compiler/pipeline.cc b/chromium/v8/src/compiler/pipeline.cc
index ee6609cfa69..6f3b8923764 100644
--- a/chromium/v8/src/compiler/pipeline.cc
+++ b/chromium/v8/src/compiler/pipeline.cc
@@ -150,9 +150,9 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
- broker_(new JSHeapBroker(isolate_, info_->zone(),
- info_->trace_heap_broker_enabled(),
- is_concurrent_inlining)),
+ broker_(new JSHeapBroker(
+ isolate_, info_->zone(), info_->trace_heap_broker(),
+ is_concurrent_inlining, info->native_context_independent())),
register_allocation_zone_scope_(zone_stats_,
kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
@@ -160,9 +160,9 @@ class PipelineData {
PhaseScope scope(pipeline_statistics, "V8.TFInitPipelineData");
graph_ = new (graph_zone_) Graph(graph_zone_);
source_positions_ = new (graph_zone_) SourcePositionTable(graph_);
- node_origins_ = info->trace_turbo_json_enabled()
- ? new (graph_zone_) NodeOriginTable(graph_)
- : nullptr;
+ node_origins_ = info->trace_turbo_json() ? new (graph_zone_)
+ NodeOriginTable(graph_)
+ : nullptr;
simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
machine_ = new (graph_zone_) MachineOperatorBuilder(
graph_zone_, MachineType::PointerRepresentation(),
@@ -349,11 +349,6 @@ class PipelineData {
return register_allocation_data_;
}
- BasicBlockProfiler::Data* profiler_data() const { return profiler_data_; }
- void set_profiler_data(BasicBlockProfiler::Data* profiler_data) {
- profiler_data_ = profiler_data;
- }
-
std::string const& source_position_output() const {
return source_position_output_;
}
@@ -370,7 +365,7 @@ class PipelineData {
}
void ChooseSpecializationContext() {
- if (info()->is_function_context_specializing()) {
+ if (info()->function_context_specializing()) {
DCHECK(info()->has_context());
specialization_context_ =
Just(OuterContext(handle(info()->context(), isolate()), 0));
@@ -599,9 +594,6 @@ class PipelineData {
Zone* register_allocation_zone_;
RegisterAllocationData* register_allocation_data_ = nullptr;
- // Basic block profiling support.
- BasicBlockProfiler::Data* profiler_data_ = nullptr;
-
// Source position output for --trace-turbo.
std::string source_position_output_;
@@ -680,9 +672,9 @@ void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
Handle<Script> script(Script::cast(shared->script()), isolate);
if (!script->source().IsUndefined(isolate)) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
Object source_name = script->name();
- OFStream os(tracing_scope.file());
+ auto& os = tracing_scope.stream();
os << "--- FUNCTION SOURCE (";
if (source_name.IsString()) {
os << String::cast(source_name).ToCString().get() << ":";
@@ -711,8 +703,8 @@ void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
void PrintInlinedFunctionInfo(
OptimizedCompilationInfo* info, Isolate* isolate, int source_id,
int inlining_id, const OptimizedCompilationInfo::InlinedFunctionHolder& h) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
+ CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
+ auto& os = tracing_scope.stream();
os << "INLINE (" << h.shared_info->DebugName().ToCString().get() << ") id{"
<< info->optimization_id() << "," << source_id << "} AS " << inlining_id
<< " AT ";
@@ -753,8 +745,8 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
info->shared_info()->PassesFilter(FLAG_print_opt_code_filter));
if (print_code) {
std::unique_ptr<char[]> debug_name = info->GetDebugName();
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
+ CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
+ auto& os = tracing_scope.stream();
// Print the source code if available.
bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION;
@@ -795,7 +787,7 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
void TraceScheduleAndVerify(OptimizedCompilationInfo* info, PipelineData* data,
Schedule* schedule, const char* phase_name) {
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
AllowHandleDereference allow_deref;
TurboJsonFile json_of(info, std::ios_base::app);
json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"schedule\""
@@ -808,11 +800,12 @@ void TraceScheduleAndVerify(OptimizedCompilationInfo* info, PipelineData* data,
}
json_of << "\"},\n";
}
- if (info->trace_turbo_graph_enabled() || FLAG_trace_turbo_scheduler) {
+ if (info->trace_turbo_graph() || FLAG_trace_turbo_scheduler) {
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "-- Schedule --------------------------------------\n" << *schedule;
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream()
+ << "-- Schedule --------------------------------------\n"
+ << *schedule;
}
if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
@@ -865,13 +858,13 @@ class NodeOriginsWrapper final : public Reducer {
void AddReducer(PipelineData* data, GraphReducer* graph_reducer,
Reducer* reducer) {
- if (data->info()->is_source_positions_enabled()) {
+ if (data->info()->source_positions()) {
void* const buffer = data->graph_zone()->New(sizeof(SourcePositionWrapper));
SourcePositionWrapper* const wrapper =
new (buffer) SourcePositionWrapper(reducer, data->source_positions());
reducer = wrapper;
}
- if (data->info()->trace_turbo_json_enabled()) {
+ if (data->info()->trace_turbo_json()) {
void* const buffer = data->graph_zone()->New(sizeof(NodeOriginsWrapper));
NodeOriginsWrapper* const wrapper =
new (buffer) NodeOriginsWrapper(reducer, data->node_origins());
@@ -919,7 +912,7 @@ PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
pipeline_statistics->BeginPhaseKind("V8.TFInitializing");
}
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
TurboJsonFile json_of(info, std::ios_base::trunc);
json_of << "{\"function\" : ";
JsonPrintFunctionSource(json_of, -1, info->GetDebugName(), script, isolate,
@@ -937,15 +930,15 @@ PipelineStatistics* CreatePipelineStatistics(
PipelineStatistics* pipeline_statistics = nullptr;
bool tracing_enabled;
- TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- &tracing_enabled);
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"), &tracing_enabled);
if (tracing_enabled || FLAG_turbo_stats_wasm) {
pipeline_statistics = new PipelineStatistics(
info, wasm_engine->GetOrCreateTurboStatistics(), zone_stats);
pipeline_statistics->BeginPhaseKind("V8.WasmInitializing");
}
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
TurboJsonFile json_of(info, std::ios_base::trunc);
std::unique_ptr<char[]> function_name = info->GetDebugName();
json_of << "{\"function\":\"" << function_name.get() << "\", \"source\":\"";
@@ -1015,7 +1008,8 @@ PipelineCompilationJob::PipelineCompilationJob(
zone_(function->GetIsolate()->allocator(),
kPipelineCompilationJobZoneName),
zone_stats_(function->GetIsolate()->allocator()),
- compilation_info_(&zone_, function->GetIsolate(), shared_info, function),
+ compilation_info_(&zone_, function->GetIsolate(), shared_info, function,
+ FLAG_turbo_nci),
pipeline_statistics_(CreatePipelineStatistics(
handle(Script::cast(shared_info->script()), isolate),
compilation_info(), function->GetIsolate(), &zone_stats_)),
@@ -1027,7 +1021,7 @@ PipelineCompilationJob::PipelineCompilationJob(
compilation_info_.SetOptimizingForOsr(osr_offset, osr_frame);
}
-PipelineCompilationJob::~PipelineCompilationJob() {}
+PipelineCompilationJob::~PipelineCompilationJob() = default;
namespace {
// Ensure that the RuntimeStats table is set on the PipelineData for
@@ -1058,14 +1052,15 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
return AbortOptimization(BailoutReason::kFunctionTooBig);
}
- if (!FLAG_always_opt) {
- compilation_info()->MarkAsBailoutOnUninitialized();
+ if (!FLAG_always_opt && !compilation_info()->native_context_independent()) {
+ compilation_info()->set_bailout_on_uninitialized();
}
if (FLAG_turbo_loop_peeling) {
- compilation_info()->MarkAsLoopPeelingEnabled();
+ compilation_info()->set_loop_peeling();
}
- if (FLAG_turbo_inlining) {
- compilation_info()->MarkAsInliningEnabled();
+ if (FLAG_turbo_inlining &&
+ !compilation_info()->native_context_independent()) {
+ compilation_info()->set_inlining();
}
// This is the bottleneck for computing and setting poisoning level in the
@@ -1080,7 +1075,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
compilation_info()->SetPoisoningMitigationLevel(load_poisoning);
if (FLAG_turbo_allocation_folding) {
- compilation_info()->MarkAsAllocationFoldingEnabled();
+ compilation_info()->set_allocation_folding();
}
// Determine whether to specialize the code for the function's context.
@@ -1091,11 +1086,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (compilation_info()->closure()->raw_feedback_cell().map() ==
ReadOnlyRoots(isolate).one_closure_cell_map() &&
!compilation_info()->is_osr()) {
- compilation_info()->MarkAsFunctionContextSpecializing();
+ compilation_info()->set_function_context_specializing();
data_.ChooseSpecializationContext();
}
- if (compilation_info()->is_source_positions_enabled()) {
+ if (compilation_info()->source_positions()) {
SharedFunctionInfo::EnsureSourcePositionsAvailable(
isolate, compilation_info()->shared_info());
}
@@ -1269,20 +1264,20 @@ CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
&info_, wasm_engine_->GetOrCreateTurboStatistics(), &zone_stats_));
pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
}
- if (info_.trace_turbo_json_enabled() || info_.trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data_.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling method " << info_.GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (info_.trace_turbo_json() || info_.trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data_.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Begin compiling method " << info_.GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
- if (info_.trace_turbo_graph_enabled()) { // Simple textual RPO.
+ if (info_.trace_turbo_graph()) { // Simple textual RPO.
StdoutStream{} << "-- wasm stub " << Code::Kind2String(info_.code_kind())
<< " graph -- " << std::endl
<< AsRPO(*data_.graph());
}
- if (info_.trace_turbo_json_enabled()) {
+ if (info_.trace_turbo_json()) {
TurboJsonFile json_of(&info_, std::ios_base::trunc);
json_of << "{\"function\":\"" << info_.GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
@@ -1306,9 +1301,9 @@ CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
info_.SetCode(code);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
- code->Disassemble(compilation_info()->GetDebugName().get(), os, isolate);
+ CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
+ code->Disassemble(compilation_info()->GetDebugName().get(),
+ tracing_scope.stream(), isolate);
}
#endif
return SUCCEEDED;
@@ -1341,12 +1336,15 @@ struct GraphBuilderPhase {
void Run(PipelineData* data, Zone* temp_zone) {
BytecodeGraphBuilderFlags flags;
- if (data->info()->is_analyze_environment_liveness()) {
+ if (data->info()->analyze_environment_liveness()) {
flags |= BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness;
}
- if (data->info()->is_bailout_on_uninitialized()) {
+ if (data->info()->bailout_on_uninitialized()) {
flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
}
+ if (data->info()->native_context_independent()) {
+ flags |= BytecodeGraphBuilderFlag::kNativeContextIndependent;
+ }
JSFunctionRef closure(data->broker(), data->info()->closure());
CallFrequency frequency(1.0f);
@@ -1372,7 +1370,7 @@ struct InliningPhase {
data->broker(), data->common(),
data->machine(), temp_zone);
JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
- if (data->info()->is_bailout_on_uninitialized()) {
+ if (data->info()->bailout_on_uninitialized()) {
call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
}
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
@@ -1381,12 +1379,12 @@ struct InliningPhase {
JSContextSpecialization context_specialization(
&graph_reducer, data->jsgraph(), data->broker(),
data->specialization_context(),
- data->info()->is_function_context_specializing()
+ data->info()->function_context_specializing()
? data->info()->closure()
: MaybeHandle<JSFunction>());
JSNativeContextSpecialization::Flags flags =
JSNativeContextSpecialization::kNoFlags;
- if (data->info()->is_bailout_on_uninitialized()) {
+ if (data->info()->bailout_on_uninitialized()) {
flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
}
// Passing the OptimizedCompilationInfo's shared zone here as
@@ -1404,11 +1402,13 @@ struct InliningPhase {
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &checkpoint_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
- AddReducer(data, &graph_reducer, &native_context_specialization);
- AddReducer(data, &graph_reducer, &context_specialization);
+ if (!data->info()->native_context_independent()) {
+ AddReducer(data, &graph_reducer, &native_context_specialization);
+ AddReducer(data, &graph_reducer, &context_specialization);
+ }
AddReducer(data, &graph_reducer, &intrinsic_lowering);
AddReducer(data, &graph_reducer, &call_reducer);
- if (data->info()->is_inlining_enabled()) {
+ if (data->info()->inlining()) {
AddReducer(data, &graph_reducer, &inlining);
}
graph_reducer.ReduceGraph();
@@ -1497,17 +1497,17 @@ struct SerializationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
SerializerForBackgroundCompilationFlags flags;
- if (data->info()->is_bailout_on_uninitialized()) {
+ if (data->info()->bailout_on_uninitialized()) {
flags |= SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized;
}
- if (data->info()->is_source_positions_enabled()) {
+ if (data->info()->source_positions()) {
flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions;
}
- if (data->info()->is_analyze_environment_liveness()) {
+ if (data->info()->analyze_environment_liveness()) {
flags |=
SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness;
}
- if (data->info()->is_inlining_enabled()) {
+ if (data->info()->inlining()) {
flags |= SerializerForBackgroundCompilationFlag::kEnableTurboInlining;
}
RunSerializerForBackgroundCompilation(
@@ -1545,7 +1545,9 @@ struct TypedLoweringPhase {
data->broker(), data->common(),
data->machine(), temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
- AddReducer(data, &graph_reducer, &create_lowering);
+ if (!data->info()->native_context_independent()) {
+ AddReducer(data, &graph_reducer, &create_lowering);
+ }
AddReducer(data, &graph_reducer, &constant_folding_reducer);
AddReducer(data, &graph_reducer, &typed_lowering);
AddReducer(data, &graph_reducer, &typed_optimization);
@@ -1805,7 +1807,7 @@ struct MemoryOptimizationPhase {
// Optimize allocations and load/store operations.
MemoryOptimizer optimizer(
data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
- data->info()->is_allocation_folding_enabled()
+ data->info()->allocation_folding()
? MemoryLowering::AllocationFolding::kDoAllocationFolding
: MemoryLowering::AllocationFolding::kDontAllocationFolding,
data->debug_name(), &data->info()->tick_counter());
@@ -1997,8 +1999,8 @@ struct ComputeSchedulePhase {
void Run(PipelineData* data, Zone* temp_zone) {
Schedule* schedule = Scheduler::ComputeSchedule(
temp_zone, data->graph(),
- data->info()->is_splitting_enabled() ? Scheduler::kSplitNodes
- : Scheduler::kNoFlags,
+ data->info()->splitting() ? Scheduler::kSplitNodes
+ : Scheduler::kNoFlags,
&data->info()->tick_counter());
data->set_schedule(schedule);
}
@@ -2043,13 +2045,13 @@ struct InstructionSelectionPhase {
InstructionSelector selector(
temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
data->schedule(), data->source_positions(), data->frame(),
- data->info()->switch_jump_table_enabled()
+ data->info()->switch_jump_table()
? InstructionSelector::kEnableSwitchJumpTable
: InstructionSelector::kDisableSwitchJumpTable,
&data->info()->tick_counter(),
data->address_of_max_unoptimized_frame_height(),
data->address_of_max_pushed_argument_count(),
- data->info()->is_source_positions_enabled()
+ data->info()->source_positions()
? InstructionSelector::kAllSourcePositions
: InstructionSelector::kCallSourcePositions,
InstructionSelector::SupportedFeatures(),
@@ -2060,13 +2062,13 @@ struct InstructionSelectionPhase {
? InstructionSelector::kEnableRootsRelativeAddressing
: InstructionSelector::kDisableRootsRelativeAddressing,
data->info()->GetPoisoningMitigationLevel(),
- data->info()->trace_turbo_json_enabled()
+ data->info()->trace_turbo_json()
? InstructionSelector::kEnableTraceTurboJson
: InstructionSelector::kDisableTraceTurboJson);
if (!selector.SelectInstructions()) {
data->set_compilation_failed();
}
- if (data->info()->trace_turbo_json_enabled()) {
+ if (data->info()->trace_turbo_json()) {
TurboJsonFile json_of(data->info(), std::ios_base::app);
json_of << "{\"name\":\"" << phase_name()
<< "\",\"type\":\"instructions\""
@@ -2283,7 +2285,7 @@ struct PrintGraphPhase {
OptimizedCompilationInfo* info = data->info();
Graph* graph = data->graph();
- if (info->trace_turbo_json_enabled()) { // Print JSON.
+ if (info->trace_turbo_json()) { // Print JSON.
AllowHandleDereference allow_deref;
TurboJsonFile json_of(info, std::ios_base::app);
@@ -2292,7 +2294,7 @@ struct PrintGraphPhase {
<< "},\n";
}
- if (info->trace_turbo_scheduled_enabled()) {
+ if (info->trace_turbo_scheduled()) {
AccountingAllocator allocator;
Schedule* schedule = data->schedule();
if (schedule == nullptr) {
@@ -2302,16 +2304,16 @@ struct PrintGraphPhase {
}
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "-- Graph after " << phase << " -- " << std::endl;
- os << AsScheduledGraph(schedule);
- } else if (info->trace_turbo_graph_enabled()) { // Simple textual RPO.
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream()
+ << "-- Graph after " << phase << " -- " << std::endl
+ << AsScheduledGraph(schedule);
+ } else if (info->trace_turbo_graph()) { // Simple textual RPO.
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "-- Graph after " << phase << " -- " << std::endl;
- os << AsRPO(*graph);
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream()
+ << "-- Graph after " << phase << " -- " << std::endl
+ << AsRPO(*graph);
}
}
};
@@ -2345,8 +2347,7 @@ struct VerifyGraphPhase {
#undef DECL_PIPELINE_PHASE_CONSTANTS_HELPER
void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
- if (info()->trace_turbo_json_enabled() ||
- info()->trace_turbo_graph_enabled()) {
+ if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
Run<PrintGraphPhase>(phase);
}
if (FLAG_turbo_verify) {
@@ -2359,21 +2360,20 @@ void PipelineImpl::Serialize() {
data->BeginPhaseKind("V8.TFBrokerInitAndSerialization");
- if (info()->trace_turbo_json_enabled() ||
- info()->trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling method " << info()->GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Begin compiling method " << info()->GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
- if (info()->trace_turbo_json_enabled()) {
+ if (info()->trace_turbo_json()) {
TurboCfgFile tcf(isolate());
tcf << AsC1VCompilation(info());
}
data->source_positions()->AddDecorator();
- if (data->info()->trace_turbo_json_enabled()) {
+ if (data->info()->trace_turbo_json()) {
data->node_origins()->AddDecorator();
}
@@ -2442,7 +2442,7 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<TypedLoweringPhase>();
RunPrintAndVerify(TypedLoweringPhase::phase_name());
- if (data->info()->is_loop_peeling_enabled()) {
+ if (data->info()->loop_peeling()) {
Run<LoopPeelingPhase>();
RunPrintAndVerify(LoopPeelingPhase::phase_name(), true);
} else {
@@ -2531,7 +2531,7 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(), true);
data->source_positions()->RemoveDecorator();
- if (data->info()->trace_turbo_json_enabled()) {
+ if (data->info()->trace_turbo_json()) {
data->node_origins()->RemoveDecorator();
}
@@ -2598,7 +2598,7 @@ bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
RunPrintAndVerify(ScheduledMachineLoweringPhase::phase_name(), true);
data->source_positions()->RemoveDecorator();
- if (data->info()->trace_turbo_json_enabled()) {
+ if (data->info()->trace_turbo_json()) {
data->node_origins()->RemoveDecorator();
}
@@ -2621,8 +2621,9 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
ZoneStats zone_stats(isolate->allocator());
NodeOriginTable node_origins(graph);
JumpOptimizationInfo jump_opt;
- bool should_optimize_jumps =
- isolate->serializer_enabled() && FLAG_turbo_rewrite_far_jumps;
+ bool should_optimize_jumps = isolate->serializer_enabled() &&
+ FLAG_turbo_rewrite_far_jumps &&
+ !FLAG_turbo_profiling;
PipelineData data(&zone_stats, &info, isolate, isolate->allocator(), graph,
jsgraph, nullptr, source_positions, &node_origins,
should_optimize_jumps ? &jump_opt : nullptr, options);
@@ -2639,12 +2640,12 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
PipelineImpl pipeline(&data);
- if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling " << debug_name << " using TurboFan" << std::endl;
- if (info.trace_turbo_json_enabled()) {
+ if (info.trace_turbo_json() || info.trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Begin compiling " << debug_name << " using TurboFan" << std::endl;
+ if (info.trace_turbo_json()) {
TurboJsonFile json_of(&info, std::ios_base::trunc);
json_of << "{\"function\" : ";
JsonPrintFunctionSource(json_of, -1, info.GetDebugName(),
@@ -2741,21 +2742,21 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
PipelineImpl pipeline(&data);
- if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling method " << info.GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (info.trace_turbo_json() || info.trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Begin compiling method " << info.GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
- if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
+ if (info.trace_turbo_graph()) { // Simple textual RPO.
StdoutStream{} << "-- wasm stub " << Code::Kind2String(kind) << " graph -- "
<< std::endl
<< AsRPO(*graph);
}
- if (info.trace_turbo_json_enabled()) {
+ if (info.trace_turbo_json()) {
TurboJsonFile json_of(&info, std::ios_base::trunc);
json_of << "{\"function\":\"" << info.GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
@@ -2783,7 +2784,7 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
DCHECK(result.succeeded());
- if (info.trace_turbo_json_enabled()) {
+ if (info.trace_turbo_json()) {
TurboJsonFile json_of(&info, std::ios_base::app);
json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
<< BlockStartsAsJSON{&code_generator->block_starts()}
@@ -2802,12 +2803,12 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
json_of << "\n}";
}
- if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Finished compiling method " << info.GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (info.trace_turbo_json() || info.trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Finished compiling method " << info.GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
return result;
@@ -2862,7 +2863,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
PipelineImpl pipeline(&data);
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
TurboJsonFile json_of(info, std::ios_base::trunc);
json_of << "{\"function\":\"" << info->GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
@@ -2915,13 +2916,12 @@ void Pipeline::GenerateCodeForWasmFunction(
PipelineImpl pipeline(&data);
- if (data.info()->trace_turbo_json_enabled() ||
- data.info()->trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling method " << data.info()->GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Begin compiling method " << data.info()->GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
@@ -2929,7 +2929,7 @@ void Pipeline::GenerateCodeForWasmFunction(
data.BeginPhaseKind("V8.WasmOptimization");
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_turbo_splitting && !is_asm_js) {
- data.info()->MarkAsSplittingEnabled();
+ data.info()->set_splitting();
}
if (FLAG_wasm_opt || is_asm_js) {
PipelineRunScope scope(&data, "V8.WasmFullOptimization",
@@ -2987,7 +2987,7 @@ void Pipeline::GenerateCodeForWasmFunction(
code_generator->GetProtectedInstructionsData();
result->result_tier = wasm::ExecutionTier::kTurbofan;
- if (data.info()->trace_turbo_json_enabled()) {
+ if (data.info()->trace_turbo_json()) {
TurboJsonFile json_of(data.info(), std::ios_base::app);
json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
<< BlockStartsAsJSON{&code_generator->block_starts()}
@@ -3006,13 +3006,12 @@ void Pipeline::GenerateCodeForWasmFunction(
json_of << "\n}";
}
- if (data.info()->trace_turbo_json_enabled() ||
- data.info()->trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Finished compiling method " << data.info()->GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Finished compiling method " << data.info()->GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
DCHECK(result->succeeded());
@@ -3054,7 +3053,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
DCHECK_NOT_NULL(data->schedule());
if (FLAG_turbo_profiling) {
- data->set_profiler_data(BasicBlockInstrumentor::Instrument(
+ data->info()->set_profiler_data(BasicBlockInstrumentor::Instrument(
info(), data->graph(), data->schedule(), data->isolate()));
}
@@ -3074,15 +3073,16 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
if (verify_stub_graph) {
if (FLAG_trace_verify_csa) {
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "--------------------------------------------------\n"
- << "--- Verifying " << data->debug_name() << " generated by TurboFan\n"
- << "--------------------------------------------------\n"
- << *data->schedule()
- << "--------------------------------------------------\n"
- << "--- End of " << data->debug_name() << " generated by TurboFan\n"
- << "--------------------------------------------------\n";
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream()
+ << "--------------------------------------------------\n"
+ << "--- Verifying " << data->debug_name()
+ << " generated by TurboFan\n"
+ << "--------------------------------------------------\n"
+ << *data->schedule()
+ << "--------------------------------------------------\n"
+ << "--- End of " << data->debug_name() << " generated by TurboFan\n"
+ << "--------------------------------------------------\n";
}
Zone temp_zone(data->allocator(), kMachineGraphVerifierZoneName);
MachineGraphVerifier::Run(
@@ -3102,14 +3102,14 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
return false;
}
- if (info()->trace_turbo_json_enabled() && !data->MayHaveUnverifiableGraph()) {
+ if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
AllowHandleDereference allow_deref;
TurboCfgFile tcf(isolate());
tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
data->sequence());
}
- if (info()->trace_turbo_json_enabled()) {
+ if (info()->trace_turbo_json()) {
std::ostringstream source_position_output;
// Output source position information before the graph is deleted.
if (data_->source_positions() != nullptr) {
@@ -3244,7 +3244,7 @@ void PipelineImpl::AssembleCode(Linkage* linkage,
data->InitializeCodeGenerator(linkage, std::move(buffer));
Run<AssembleCodePhase>();
- if (data->info()->trace_turbo_json_enabled()) {
+ if (data->info()->trace_turbo_json()) {
TurboJsonFile json_of(data->info(), std::ios_base::app);
json_of << "{\"name\":\"code generation\""
<< ", \"type\":\"instructions\""
@@ -3271,18 +3271,10 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
return maybe_code;
}
- if (data->profiler_data()) {
-#ifdef ENABLE_DISASSEMBLER
- std::ostringstream os;
- code->Disassemble(nullptr, os, isolate());
- data->profiler_data()->SetCode(&os);
-#endif // ENABLE_DISASSEMBLER
- }
-
info()->SetCode(code);
PrintCode(isolate(), code, info());
- if (info()->trace_turbo_json_enabled()) {
+ if (info()->trace_turbo_json()) {
TurboJsonFile json_of(info(), std::ios_base::app);
json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
@@ -3302,13 +3294,12 @@ MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
JsonPrintAllSourceWithPositions(json_of, data->info(), isolate());
json_of << "\n}";
}
- if (info()->trace_turbo_json_enabled() ||
- info()->trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Finished compiling method " << info()->GetDebugName().get()
- << " using TurboFan" << std::endl;
+ if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream()
+ << "---------------------------------------------------\n"
+ << "Finished compiling method " << info()->GetDebugName().get()
+ << " using TurboFan" << std::endl;
}
data->EndPhaseKind();
return code;
@@ -3342,19 +3333,22 @@ namespace {
void TraceSequence(OptimizedCompilationInfo* info, PipelineData* data,
const char* phase_name) {
- if (info->trace_turbo_json_enabled()) {
+ if (info->trace_turbo_json()) {
AllowHandleDereference allow_deref;
TurboJsonFile json_of(info, std::ios_base::app);
- json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"sequence\",";
- json_of << InstructionSequenceAsJSON{data->sequence()};
- json_of << "},\n";
- }
- if (info->trace_turbo_graph_enabled()) {
+ json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"sequence\""
+ << ",\"blocks\":" << InstructionSequenceAsJSON{data->sequence()}
+ << ",\"register_allocation\":{"
+ << RegisterAllocationDataAsJSON{*(data->register_allocation_data()),
+ *(data->sequence())}
+ << "}},\n";
+ }
+ if (info->trace_turbo_graph()) {
AllowHandleDereference allow_deref;
- CodeTracer::Scope tracing_scope(data->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "----- Instruction sequence " << phase_name << " -----\n"
- << *data->sequence();
+ CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
+ tracing_scope.stream() << "----- Instruction sequence " << phase_name
+ << " -----\n"
+ << *data->sequence();
}
}
@@ -3381,13 +3375,13 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
#endif
RegisterAllocationFlags flags;
- if (data->info()->is_turbo_control_flow_aware_allocation()) {
+ if (data->info()->turbo_control_flow_aware_allocation()) {
flags |= RegisterAllocationFlag::kTurboControlFlowAwareAllocation;
}
- if (data->info()->is_turbo_preprocess_ranges()) {
+ if (data->info()->turbo_preprocess_ranges()) {
flags |= RegisterAllocationFlag::kTurboPreprocessRanges;
}
- if (data->info()->trace_turbo_allocation_enabled()) {
+ if (data->info()->trace_turbo_allocation()) {
flags |= RegisterAllocationFlag::kTraceAllocation;
}
data->InitializeRegisterAllocationData(config, call_descriptor, flags);
@@ -3405,16 +3399,15 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
->RangesDefinedInDeferredStayInDeferred());
}
- if (info()->trace_turbo_json_enabled() && !data->MayHaveUnverifiableGraph()) {
+ if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
TurboCfgFile tcf(isolate());
tcf << AsC1VRegisterAllocationData("PreAllocation",
data->register_allocation_data());
}
- if (info()->is_turbo_preprocess_ranges()) {
+ if (info()->turbo_preprocess_ranges()) {
Run<SplinterLiveRangesPhase>();
- if (info()->trace_turbo_json_enabled() &&
- !data->MayHaveUnverifiableGraph()) {
+ if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
TurboCfgFile tcf(isolate());
tcf << AsC1VRegisterAllocationData("PostSplinter",
data->register_allocation_data());
@@ -3427,7 +3420,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
}
- if (info()->is_turbo_preprocess_ranges()) {
+ if (info()->turbo_preprocess_ranges()) {
Run<MergeSplintersPhase>();
}
@@ -3459,7 +3452,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
verifier->VerifyGapMoves();
}
- if (info()->trace_turbo_json_enabled() && !data->MayHaveUnverifiableGraph()) {
+ if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
TurboCfgFile tcf(isolate());
tcf << AsC1VRegisterAllocationData("CodeGen",
data->register_allocation_data());
diff --git a/chromium/v8/src/compiler/representation-change.cc b/chromium/v8/src/compiler/representation-change.cc
index 7077f7d643f..5967d1005e5 100644
--- a/chromium/v8/src/compiler/representation-change.cc
+++ b/chromium/v8/src/compiler/representation-change.cc
@@ -11,6 +11,7 @@
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/heap/factory-inl.h"
@@ -210,6 +211,7 @@ Node* RepresentationChanger::GetRepresentationFor(
return GetFloat32RepresentationFor(node, output_rep, output_type,
use_info.truncation());
case MachineRepresentation::kFloat64:
+ DCHECK_NE(TypeCheckKind::kBigInt, use_info.type_check());
return GetFloat64RepresentationFor(node, output_rep, output_type,
use_node, use_info);
case MachineRepresentation::kBit:
@@ -402,7 +404,22 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
return jsgraph()->graph()->NewNode(
jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedPointer),
node);
- } else if (output_rep == MachineRepresentation::kBit) {
+ }
+
+ if (use_info.type_check() == TypeCheckKind::kBigInt &&
+ !output_type.Is(Type::BigInt())) {
+ // BigInt checks can only be performed on tagged representations. Note that
+ // a corresponding check is inserted down below.
+ if (!CanBeTaggedPointer(output_rep)) {
+ Node* unreachable =
+ InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotABigInt);
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedPointer),
+ unreachable);
+ }
+ }
+
+ if (output_rep == MachineRepresentation::kBit) {
if (output_type.Is(Type::Boolean())) {
op = simplified()->ChangeBitToTagged();
} else {
@@ -427,7 +444,8 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
op = machine()->ChangeInt64ToFloat64();
node = jsgraph()->graph()->NewNode(op, node);
op = simplified()->ChangeFloat64ToTaggedPointer();
- } else if (output_type.Is(Type::BigInt())) {
+ } else if (output_type.Is(Type::BigInt()) &&
+ use_info.type_check() == TypeCheckKind::kBigInt) {
op = simplified()->ChangeUint64ToBigInt();
} else {
return TypeError(node, output_rep, output_type,
@@ -662,6 +680,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
switch (use_info.type_check()) {
case TypeCheckKind::kNone:
case TypeCheckKind::kNumber:
+ case TypeCheckKind::kNumberOrBoolean:
case TypeCheckKind::kNumberOrOddball:
return jsgraph()->Float64Constant(m.Value());
case TypeCheckKind::kBigInt:
@@ -695,6 +714,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
} else if (output_rep == MachineRepresentation::kBit) {
CHECK(output_type.Is(Type::Boolean()));
if (use_info.truncation().TruncatesOddballAndBigIntToNumber() ||
+ use_info.type_check() == TypeCheckKind::kNumberOrBoolean ||
use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
op = machine()->ChangeUint32ToFloat64();
} else {
@@ -707,9 +727,16 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
}
} else if (IsAnyTagged(output_rep)) {
if (output_type.Is(Type::Undefined())) {
- return jsgraph()->Float64Constant(
- std::numeric_limits<double>::quiet_NaN());
-
+ if (use_info.type_check() == TypeCheckKind::kNumberOrBoolean) {
+ Node* unreachable = InsertUnconditionalDeopt(
+ use_node, DeoptimizeReason::kNotANumberOrBoolean);
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kFloat64),
+ unreachable);
+ } else {
+ return jsgraph()->Float64Constant(
+ std::numeric_limits<double>::quiet_NaN());
+ }
} else if (output_rep == MachineRepresentation::kTaggedSigned) {
node = InsertChangeTaggedSignedToInt32(node);
op = machine()->ChangeInt32ToFloat64();
@@ -732,6 +759,9 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
!output_type.Maybe(Type::BooleanOrNullOrNumber()))) {
op = simplified()->CheckedTaggedToFloat64(CheckTaggedInputMode::kNumber,
use_info.feedback());
+ } else if (use_info.type_check() == TypeCheckKind::kNumberOrBoolean) {
+ op = simplified()->CheckedTaggedToFloat64(
+ CheckTaggedInputMode::kNumberOrBoolean, use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
op = simplified()->CheckedTaggedToFloat64(
CheckTaggedInputMode::kNumberOrOddball, use_info.feedback());
@@ -1045,12 +1075,14 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
case IrOpcode::kFloat64Constant:
UNREACHABLE();
case IrOpcode::kNumberConstant: {
- double const fv = OpParameter<double>(node->op());
- using limits = std::numeric_limits<int64_t>;
- if (fv <= limits::max() && fv >= limits::min()) {
- int64_t const iv = static_cast<int64_t>(fv);
- if (static_cast<double>(iv) == fv) {
- return jsgraph()->Int64Constant(iv);
+ if (use_info.type_check() != TypeCheckKind::kBigInt) {
+ double const fv = OpParameter<double>(node->op());
+ using limits = std::numeric_limits<int64_t>;
+ if (fv <= limits::max() && fv >= limits::min()) {
+ int64_t const iv = static_cast<int64_t>(fv);
+ if (static_cast<double>(iv) == fv) {
+ return jsgraph()->Int64Constant(iv);
+ }
}
}
break;
@@ -1069,6 +1101,19 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
break;
}
+ if (use_info.type_check() == TypeCheckKind::kBigInt) {
+ // BigInts are only represented as tagged pointer and word64.
+ if (!CanBeTaggedPointer(output_rep) &&
+ output_rep != MachineRepresentation::kWord64) {
+ DCHECK(!output_type.Is(Type::BigInt()));
+ Node* unreachable =
+ InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotABigInt);
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kWord64),
+ unreachable);
+ }
+ }
+
// Select the correct X -> Word64 operator.
const Operator* op;
if (output_type.Is(Type::None())) {
@@ -1079,6 +1124,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
CHECK(output_type.Is(Type::Boolean()));
CHECK_NE(use_info.type_check(), TypeCheckKind::kNone);
CHECK_NE(use_info.type_check(), TypeCheckKind::kNumberOrOddball);
+ CHECK_NE(use_info.type_check(), TypeCheckKind::kBigInt);
Node* unreachable =
InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotASmi);
return jsgraph()->graph()->NewNode(
diff --git a/chromium/v8/src/compiler/representation-change.h b/chromium/v8/src/compiler/representation-change.h
index 78fa1fbe9dc..3654b089fca 100644
--- a/chromium/v8/src/compiler/representation-change.h
+++ b/chromium/v8/src/compiler/representation-change.h
@@ -119,6 +119,7 @@ enum class TypeCheckKind : uint8_t {
kSigned32,
kSigned64,
kNumber,
+ kNumberOrBoolean,
kNumberOrOddball,
kHeapObject,
kBigInt,
@@ -137,6 +138,8 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
return os << "Signed64";
case TypeCheckKind::kNumber:
return os << "Number";
+ case TypeCheckKind::kNumberOrBoolean:
+ return os << "NumberOrBoolean";
case TypeCheckKind::kNumberOrOddball:
return os << "NumberOrOddball";
case TypeCheckKind::kHeapObject:
@@ -266,6 +269,12 @@ class UseInfo {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(),
TypeCheckKind::kNumber, feedback);
}
+ static UseInfo CheckedNumberOrBooleanAsFloat64(
+ IdentifyZeros identify_zeros, const FeedbackSource& feedback) {
+ return UseInfo(MachineRepresentation::kFloat64,
+ Truncation::Any(identify_zeros),
+ TypeCheckKind::kNumberOrBoolean, feedback);
+ }
static UseInfo CheckedNumberOrOddballAsFloat64(
IdentifyZeros identify_zeros, const FeedbackSource& feedback) {
return UseInfo(MachineRepresentation::kFloat64,
diff --git a/chromium/v8/src/compiler/schedule.cc b/chromium/v8/src/compiler/schedule.cc
index cc3243cb2e1..1b0caa7567f 100644
--- a/chromium/v8/src/compiler/schedule.cc
+++ b/chromium/v8/src/compiler/schedule.cc
@@ -228,7 +228,7 @@ namespace {
bool IsPotentiallyThrowingCall(IrOpcode::Value opcode) {
switch (opcode) {
-#define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+#define BUILD_BLOCK_JS_CASE(Name, ...) case IrOpcode::k##Name:
JS_OP_LIST(BUILD_BLOCK_JS_CASE)
#undef BUILD_BLOCK_JS_CASE
case IrOpcode::kCall:
diff --git a/chromium/v8/src/compiler/scheduler.cc b/chromium/v8/src/compiler/scheduler.cc
index 0b0a5484117..ddd97f3e1e0 100644
--- a/chromium/v8/src/compiler/scheduler.cc
+++ b/chromium/v8/src/compiler/scheduler.cc
@@ -354,7 +354,7 @@ class CFGBuilder : public ZoneObject {
case IrOpcode::kSwitch:
BuildBlocksForSuccessors(node);
break;
-#define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+#define BUILD_BLOCK_JS_CASE(Name, ...) case IrOpcode::k##Name:
JS_OP_LIST(BUILD_BLOCK_JS_CASE)
// JS opcodes are just like calls => fall through.
#undef BUILD_BLOCK_JS_CASE
@@ -398,7 +398,7 @@ class CFGBuilder : public ZoneObject {
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectThrow(node);
break;
-#define CONNECT_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+#define CONNECT_BLOCK_JS_CASE(Name, ...) case IrOpcode::k##Name:
JS_OP_LIST(CONNECT_BLOCK_JS_CASE)
// JS opcodes are just like calls => fall through.
#undef CONNECT_BLOCK_JS_CASE
diff --git a/chromium/v8/src/compiler/simd-scalar-lowering.cc b/chromium/v8/src/compiler/simd-scalar-lowering.cc
index 21d34b21d25..82ccc463261 100644
--- a/chromium/v8/src/compiler/simd-scalar-lowering.cc
+++ b/chromium/v8/src/compiler/simd-scalar-lowering.cc
@@ -142,12 +142,13 @@ void SimdScalarLowering::LowerGraph() {
V(S128Or) \
V(S128Xor) \
V(S128Not) \
- V(S1x4AnyTrue) \
- V(S1x4AllTrue) \
- V(S1x8AnyTrue) \
- V(S1x8AllTrue) \
- V(S1x16AnyTrue) \
- V(S1x16AllTrue)
+ V(V32x4AnyTrue) \
+ V(V32x4AllTrue) \
+ V(V16x8AnyTrue) \
+ V(V16x8AllTrue) \
+ V(V8x16AnyTrue) \
+ V(V8x16AllTrue) \
+ V(I32x4BitMask)
#define FOREACH_FLOAT64X2_OPCODE(V) V(F64x2Splat)
@@ -212,7 +213,8 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8LtU) \
V(I16x8LeU) \
V(I16x8RoundingAverageU) \
- V(I16x8Abs)
+ V(I16x8Abs) \
+ V(I16x8BitMask)
#define FOREACH_INT8X16_OPCODE(V) \
V(I8x16Splat) \
@@ -245,7 +247,8 @@ void SimdScalarLowering::LowerGraph() {
V(S8x16Swizzle) \
V(S8x16Shuffle) \
V(I8x16RoundingAverageU) \
- V(I8x16Abs)
+ V(I8x16Abs) \
+ V(I8x16BitMask)
MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
switch (simdType) {
@@ -1025,6 +1028,44 @@ void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
ReplaceNode(node, rep_node, num_lanes);
}
+void SimdScalarLowering::LowerBitMaskOp(Node* node, SimdType rep_type,
+ int msb_index) {
+ Node** reps = GetReplacementsWithType(node->InputAt(0), rep_type);
+ int num_lanes = NumLanes(rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(1);
+ Node* result = mcgraph_->Int32Constant(0);
+ uint32_t mask = 1 << msb_index;
+
+ for (int i = 0; i < num_lanes; ++i) {
+ // Lane i should end up at bit i in the final result.
+ // +-----------------------------------------------------------------+
+ // | | msb_index | (i < msb_index) | (i > msb_index) |
+ // +-------+-----------+----------------------+----------------------+
+ // | i8x16 | 7 | msb >> (msb_index-i) | msb << (i-msb_index) |
+ // | i16x8 | 15 | msb >> (msb_index-i) | n/a |
+ // | i32x4 | 31 | msb >> (msb_index-i) | n/a |
+ // +-------+-----------+----------------------+----------------------+
+ Node* msb = Mask(reps[i], mask);
+
+ if (i < msb_index) {
+ int shift = msb_index - i;
+ Node* shifted = graph()->NewNode(machine()->Word32Shr(), msb,
+ mcgraph_->Int32Constant(shift));
+ result = graph()->NewNode(machine()->Word32Or(), shifted, result);
+ } else if (i > msb_index) {
+ int shift = i - msb_index;
+ Node* shifted = graph()->NewNode(machine()->Word32Shl(), msb,
+ mcgraph_->Int32Constant(shift));
+ result = graph()->NewNode(machine()->Word32Or(), shifted, result);
+ } else {
+ result = graph()->NewNode(machine()->Word32Or(), msb, result);
+ }
+ }
+
+ rep_node[0] = result;
+ ReplaceNode(node, rep_node, 1);
+}
+
void SimdScalarLowering::LowerNode(Node* node) {
SimdType rep_type = ReplacementType(node);
int num_lanes = NumLanes(rep_type);
@@ -1627,12 +1668,12 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, 16);
break;
}
- case IrOpcode::kS1x4AnyTrue:
- case IrOpcode::kS1x4AllTrue:
- case IrOpcode::kS1x8AnyTrue:
- case IrOpcode::kS1x8AllTrue:
- case IrOpcode::kS1x16AnyTrue:
- case IrOpcode::kS1x16AllTrue: {
+ case IrOpcode::kV32x4AnyTrue:
+ case IrOpcode::kV32x4AllTrue:
+ case IrOpcode::kV16x8AnyTrue:
+ case IrOpcode::kV16x8AllTrue:
+ case IrOpcode::kV8x16AnyTrue:
+ case IrOpcode::kV8x16AllTrue: {
DCHECK_EQ(1, node->InputCount());
SimdType input_rep_type = ReplacementType(node->InputAt(0));
Node** rep;
@@ -1649,18 +1690,18 @@ void SimdScalarLowering::LowerNode(Node* node) {
Node* true_node = mcgraph_->Int32Constant(1);
Node* false_node = mcgraph_->Int32Constant(0);
Node* tmp_result = false_node;
- if (node->opcode() == IrOpcode::kS1x4AllTrue ||
- node->opcode() == IrOpcode::kS1x8AllTrue ||
- node->opcode() == IrOpcode::kS1x16AllTrue) {
+ if (node->opcode() == IrOpcode::kV32x4AllTrue ||
+ node->opcode() == IrOpcode::kV16x8AllTrue ||
+ node->opcode() == IrOpcode::kV8x16AllTrue) {
tmp_result = true_node;
}
for (int i = 0; i < input_num_lanes; ++i) {
Diamond is_false(
graph(), common(),
graph()->NewNode(machine()->Word32Equal(), rep[i], false_node));
- if (node->opcode() == IrOpcode::kS1x4AllTrue ||
- node->opcode() == IrOpcode::kS1x8AllTrue ||
- node->opcode() == IrOpcode::kS1x16AllTrue) {
+ if (node->opcode() == IrOpcode::kV32x4AllTrue ||
+ node->opcode() == IrOpcode::kV16x8AllTrue ||
+ node->opcode() == IrOpcode::kV8x16AllTrue) {
tmp_result = is_false.Phi(MachineRepresentation::kWord32, false_node,
tmp_result);
} else {
@@ -1675,6 +1716,18 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
+ case IrOpcode::kI8x16BitMask: {
+ LowerBitMaskOp(node, rep_type, 7);
+ break;
+ }
+ case IrOpcode::kI16x8BitMask: {
+ LowerBitMaskOp(node, rep_type, 15);
+ break;
+ }
+ case IrOpcode::kI32x4BitMask: {
+ LowerBitMaskOp(node, rep_type, 31);
+ break;
+ }
case IrOpcode::kI8x16RoundingAverageU:
case IrOpcode::kI16x8RoundingAverageU: {
DCHECK_EQ(2, node->InputCount());
@@ -1707,7 +1760,7 @@ bool SimdScalarLowering::DefaultLowering(Node* node) {
something_changed = true;
node->ReplaceInput(i, GetReplacements(input)[0]);
}
- if (HasReplacement(1, input)) {
+ if (ReplacementCount(input) > 1 && HasReplacement(1, input)) {
something_changed = true;
for (int j = 1; j < ReplacementCount(input); ++j) {
node->InsertInput(zone(), i + j, GetReplacements(input)[j]);
diff --git a/chromium/v8/src/compiler/simd-scalar-lowering.h b/chromium/v8/src/compiler/simd-scalar-lowering.h
index d91e6285f4e..a852f94c7c7 100644
--- a/chromium/v8/src/compiler/simd-scalar-lowering.h
+++ b/chromium/v8/src/compiler/simd-scalar-lowering.h
@@ -110,6 +110,7 @@ class SimdScalarLowering {
Node* BuildF64Trunc(Node* input);
void LowerNotEqual(Node* node, SimdType input_rep_type, const Operator* op);
MachineType MachineTypeFrom(SimdType simdType);
+ void LowerBitMaskOp(Node* node, SimdType rep_type, int msb_index);
MachineGraph* const mcgraph_;
NodeMarker<State> state_;
diff --git a/chromium/v8/src/compiler/simplified-lowering.cc b/chromium/v8/src/compiler/simplified-lowering.cc
index d00acefc39c..dd297d0b121 100644
--- a/chromium/v8/src/compiler/simplified-lowering.cc
+++ b/chromium/v8/src/compiler/simplified-lowering.cc
@@ -105,6 +105,9 @@ UseInfo CheckedUseInfoAsWord32FromHint(
return UseInfo::CheckedSigned32AsWord32(identify_zeros, feedback);
case NumberOperationHint::kNumber:
return UseInfo::CheckedNumberAsWord32(feedback);
+ case NumberOperationHint::kNumberOrBoolean:
+ // Not used currently.
+ UNREACHABLE();
case NumberOperationHint::kNumberOrOddball:
return UseInfo::CheckedNumberOrOddballAsWord32(feedback);
}
@@ -122,6 +125,8 @@ UseInfo CheckedUseInfoAsFloat64FromHint(
UNREACHABLE();
case NumberOperationHint::kNumber:
return UseInfo::CheckedNumberAsFloat64(identify_zeros, feedback);
+ case NumberOperationHint::kNumberOrBoolean:
+ return UseInfo::CheckedNumberOrBooleanAsFloat64(identify_zeros, feedback);
case NumberOperationHint::kNumberOrOddball:
return UseInfo::CheckedNumberOrOddballAsFloat64(identify_zeros, feedback);
}
@@ -178,10 +183,16 @@ void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) {
}
bool CanOverflowSigned32(const Operator* op, Type left, Type right,
- Zone* type_zone) {
- // We assume the inputs are checked Signed32 (or known statically
- // to be Signed32). Technically, the inputs could also be minus zero, but
- // that cannot cause overflow.
+ TypeCache const* type_cache, Zone* type_zone) {
+ // We assume the inputs are checked Signed32 (or known statically to be
+ // Signed32). Technically, the inputs could also be minus zero, which we treat
+ // as 0 for the purpose of this function.
+ if (left.Maybe(Type::MinusZero())) {
+ left = Type::Union(left, type_cache->kSingletonZero, type_zone);
+ }
+ if (right.Maybe(Type::MinusZero())) {
+ right = Type::Union(right, type_cache->kSingletonZero, type_zone);
+ }
left = Type::Intersect(left, Type::Signed32(), type_zone);
right = Type::Intersect(right, Type::Signed32(), type_zone);
if (left.IsNone() || right.IsNone()) return false;
@@ -291,11 +302,10 @@ class RepresentationSelector {
#ifdef DEBUG
node_input_use_infos_(count_, InputUseInfos(zone), zone),
#endif
- nodes_(zone),
replacements_(zone),
changer_(changer),
- queue_(zone),
- typing_stack_(zone),
+ revisit_queue_(zone),
+ traversal_nodes_(zone),
source_positions_(source_positions),
node_origins_(node_origins),
type_cache_(TypeCache::Get()),
@@ -303,90 +313,6 @@ class RepresentationSelector {
tick_counter_(tick_counter) {
}
- // Forward propagation of types from type feedback.
- void RunTypePropagationPhase() {
- // Run type propagation.
- TRACE("--{Type propagation phase}--\n");
- ResetNodeInfoState();
-
- DCHECK(typing_stack_.empty());
- typing_stack_.push({graph()->end(), 0});
- GetInfo(graph()->end())->set_pushed();
- while (!typing_stack_.empty()) {
- NodeState& current = typing_stack_.top();
-
- // If there is an unvisited input, push it and continue.
- bool pushed_unvisited = false;
- while (current.input_index < current.node->InputCount()) {
- Node* input = current.node->InputAt(current.input_index);
- NodeInfo* input_info = GetInfo(input);
- current.input_index++;
- if (input_info->unvisited()) {
- input_info->set_pushed();
- typing_stack_.push({input, 0});
- pushed_unvisited = true;
- break;
- } else if (input_info->pushed()) {
- // If we had already pushed (and not visited) an input, it means that
- // the current node will be visited before one of its inputs. If this
- // happens, the current node might need to be revisited.
- MarkAsPossibleRevisit(current.node, input);
- }
- }
- if (pushed_unvisited) continue;
-
- // Process the top of the stack.
- Node* node = current.node;
- typing_stack_.pop();
- NodeInfo* info = GetInfo(node);
- info->set_visited();
- bool updated = UpdateFeedbackType(node);
- TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
- VisitNode<RETYPE>(node, info->truncation(), nullptr);
- TRACE(" ==> output ");
- PrintOutputInfo(info);
- TRACE("\n");
- if (updated) {
- auto it = might_need_revisit_.find(node);
- if (it == might_need_revisit_.end()) continue;
-
- for (Node* const user : it->second) {
- if (GetInfo(user)->visited()) {
- TRACE(" QUEUEING #%d: %s\n", user->id(), user->op()->mnemonic());
- GetInfo(user)->set_queued();
- queue_.push(user);
- }
- }
- }
- }
-
- // Process the revisit queue.
- while (!queue_.empty()) {
- Node* node = queue_.front();
- queue_.pop();
- NodeInfo* info = GetInfo(node);
- info->set_visited();
- bool updated = UpdateFeedbackType(node);
- TRACE(" revisit #%d: %s\n", node->id(), node->op()->mnemonic());
- VisitNode<RETYPE>(node, info->truncation(), nullptr);
- TRACE(" ==> output ");
- PrintOutputInfo(info);
- TRACE("\n");
- if (updated) {
- // Here we need to check all uses since we can't easily know which nodes
- // will need to be revisited due to having an input which was a
- // revisited node.
- for (Node* const user : node->uses()) {
- if (GetInfo(user)->visited()) {
- TRACE(" QUEUEING #%d: %s\n", user->id(), user->op()->mnemonic());
- GetInfo(user)->set_queued();
- queue_.push(user);
- }
- }
- }
- }
- }
-
void ResetNodeInfoState() {
// Clean up for the next phase.
for (NodeInfo& info : info_) {
@@ -421,10 +347,6 @@ class RepresentationSelector {
bool UpdateFeedbackType(Node* node) {
if (node->op()->ValueOutputCount() == 0) return false;
- NodeInfo* info = GetInfo(node);
- Type type = info->feedback_type();
- Type new_type = type;
-
// For any non-phi node just wait until we get all inputs typed. We only
// allow untyped inputs for phi nodes because phis are the only places
// where cycles need to be broken.
@@ -436,6 +358,10 @@ class RepresentationSelector {
}
}
+ NodeInfo* info = GetInfo(node);
+ Type type = info->feedback_type();
+ Type new_type = NodeProperties::GetType(node);
+
// We preload these values here to avoid increasing the binary size too
// much, which happens if we inline the calls into the macros below.
Type input0_type;
@@ -604,33 +530,140 @@ class RepresentationSelector {
graph_zone());
}
- // Backward propagation of truncations.
- void RunTruncationPropagationPhase() {
- // Run propagation phase to a fixpoint.
- TRACE("--{Propagation phase}--\n");
- EnqueueInitial(jsgraph_->graph()->end());
- // Process nodes from the queue until it is empty.
- while (!queue_.empty()) {
- Node* node = queue_.front();
+ // Generates a pre-order traversal of the nodes, starting with End.
+ void GenerateTraversal() {
+ ZoneStack<NodeState> stack(zone_);
+
+ stack.push({graph()->end(), 0});
+ GetInfo(graph()->end())->set_pushed();
+ while (!stack.empty()) {
+ NodeState& current = stack.top();
+ Node* node = current.node;
+
+ // If there is an unvisited input, push it and continue with that node.
+ bool pushed_unvisited = false;
+ while (current.input_index < node->InputCount()) {
+ Node* input = node->InputAt(current.input_index);
+ NodeInfo* input_info = GetInfo(input);
+ current.input_index++;
+ if (input_info->unvisited()) {
+ input_info->set_pushed();
+ stack.push({input, 0});
+ pushed_unvisited = true;
+ break;
+ } else if (input_info->pushed()) {
+ // Optimization for the Retype phase.
+ // If we had already pushed (and not visited) an input, it means that
+ // the current node will be visited in the Retype phase before one of
+ // its inputs. If this happens, the current node might need to be
+ // revisited.
+ MarkAsPossibleRevisit(node, input);
+ }
+ }
+
+ if (pushed_unvisited) continue;
+
+ stack.pop();
NodeInfo* info = GetInfo(node);
- queue_.pop();
info->set_visited();
- TRACE(" visit #%d: %s (trunc: %s)\n", node->id(), node->op()->mnemonic(),
- info->truncation().description());
- VisitNode<PROPAGATE>(node, info->truncation(), nullptr);
+
+ // Generate the traversal
+ traversal_nodes_.push_back(node);
}
}
- void Run(SimplifiedLowering* lowering) {
- RunTruncationPropagationPhase();
+ void PushNodeToRevisitIfVisited(Node* node) {
+ NodeInfo* info = GetInfo(node);
+ if (info->visited()) {
+ TRACE(" QUEUEING #%d: %s\n", node->id(), node->op()->mnemonic());
+ info->set_queued();
+ revisit_queue_.push(node);
+ }
+ }
- RunTypePropagationPhase();
+ // Tries to update the feedback type of the node, as well as setting its
+ // machine representation (in VisitNode). Returns true iff updating the
+ // feedback type is successful.
+ bool RetypeNode(Node* node) {
+ NodeInfo* info = GetInfo(node);
+ info->set_visited();
+ bool updated = UpdateFeedbackType(node);
+ TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
+ VisitNode<RETYPE>(node, info->truncation(), nullptr);
+ TRACE(" ==> output %s\n", MachineReprToString(info->representation()));
+ return updated;
+ }
- // Run lowering and change insertion phase.
- TRACE("--{Simplified lowering phase}--\n");
- // Process nodes from the collected {nodes_} vector.
- for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) {
- Node* node = *i;
+ // Visits the node and marks it as visited. Inside of VisitNode, we might
+ // change the truncation of one of our inputs (see EnqueueInput<PROPAGATE> for
+ // this). If we change the truncation of an already visited node, we will add
+ // it to the revisit queue.
+ void PropagateTruncation(Node* node) {
+ NodeInfo* info = GetInfo(node);
+ info->set_visited();
+ TRACE(" visit #%d: %s (trunc: %s)\n", node->id(), node->op()->mnemonic(),
+ info->truncation().description());
+ VisitNode<PROPAGATE>(node, info->truncation(), nullptr);
+ }
+
+ // Backward propagation of truncations to a fixpoint.
+ void RunPropagatePhase() {
+ TRACE("--{Propagate phase}--\n");
+ ResetNodeInfoState();
+ DCHECK(revisit_queue_.empty());
+
+ // Process nodes in reverse post order, with End as the root.
+ for (auto it = traversal_nodes_.crbegin(); it != traversal_nodes_.crend();
+ ++it) {
+ PropagateTruncation(*it);
+
+ while (!revisit_queue_.empty()) {
+ Node* node = revisit_queue_.front();
+ revisit_queue_.pop();
+ PropagateTruncation(node);
+ }
+ }
+ }
+
+ // Forward propagation of types from type feedback to a fixpoint.
+ void RunRetypePhase() {
+ TRACE("--{Retype phase}--\n");
+ ResetNodeInfoState();
+ DCHECK(revisit_queue_.empty());
+
+ for (auto it = traversal_nodes_.cbegin(); it != traversal_nodes_.cend();
+ ++it) {
+ Node* node = *it;
+ if (!RetypeNode(node)) continue;
+
+ auto revisit_it = might_need_revisit_.find(node);
+ if (revisit_it == might_need_revisit_.end()) continue;
+
+ for (Node* const user : revisit_it->second) {
+ PushNodeToRevisitIfVisited(user);
+ }
+
+ // Process the revisit queue.
+ while (!revisit_queue_.empty()) {
+ Node* revisit_node = revisit_queue_.front();
+ revisit_queue_.pop();
+ if (!RetypeNode(revisit_node)) continue;
+ // Here we need to check all uses since we can't easily know which
+ // nodes will need to be revisited due to having an input which was
+ // a revisited node.
+ for (Node* const user : revisit_node->uses()) {
+ PushNodeToRevisitIfVisited(user);
+ }
+ }
+ }
+ }
+
+ // Lowering and change insertion phase.
+ void RunLowerPhase(SimplifiedLowering* lowering) {
+ TRACE("--{Lower phase}--\n");
+ for (auto it = traversal_nodes_.cbegin(); it != traversal_nodes_.cend();
+ ++it) {
+ Node* node = *it;
NodeInfo* info = GetInfo(node);
TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
// Reuse {VisitNode()} so the representation rules are in one place.
@@ -656,11 +689,11 @@ class RepresentationSelector {
}
}
- void EnqueueInitial(Node* node) {
- NodeInfo* info = GetInfo(node);
- info->set_queued();
- nodes_.push_back(node);
- queue_.push(node);
+ void Run(SimplifiedLowering* lowering) {
+ GenerateTraversal();
+ RunPropagatePhase();
+ RunRetypePhase();
+ RunLowerPhase(lowering);
}
// Just assert for Retype and Lower. Propagate specialized below.
@@ -793,10 +826,10 @@ class RepresentationSelector {
// it takes the input from the input node {TypeOf(node->InputAt(index))}.
void ConvertInput(Node* node, int index, UseInfo use,
Type input_type = Type::Invalid()) {
- Node* input = node->InputAt(index);
// In the change phase, insert a change before the use if necessary.
if (use.representation() == MachineRepresentation::kNone)
return; // No input requirement on the use.
+ Node* input = node->InputAt(index);
DCHECK_NOT_NULL(input);
NodeInfo* input_info = GetInfo(input);
MachineRepresentation input_rep = input_info->representation();
@@ -805,16 +838,15 @@ class RepresentationSelector {
// Output representation doesn't match usage.
TRACE(" change: #%d:%s(@%d #%d:%s) ", node->id(), node->op()->mnemonic(),
index, input->id(), input->op()->mnemonic());
- TRACE(" from ");
- PrintOutputInfo(input_info);
- TRACE(" to ");
- PrintUseInfo(use);
- TRACE("\n");
+ TRACE("from %s to %s:%s\n",
+ MachineReprToString(input_info->representation()),
+ MachineReprToString(use.representation()),
+ use.truncation().description());
if (input_type.IsInvalid()) {
input_type = TypeOf(input);
}
- Node* n = changer_->GetRepresentationFor(
- input, input_info->representation(), input_type, node, use);
+ Node* n = changer_->GetRepresentationFor(input, input_rep, input_type,
+ node, use);
node->ReplaceInput(index, n);
}
}
@@ -854,18 +886,16 @@ class RepresentationSelector {
template <Phase T>
void VisitReturn(Node* node) {
- int tagged_limit = node->op()->ValueInputCount() +
- OperatorProperties::GetContextInputCount(node->op()) +
- OperatorProperties::GetFrameStateInputCount(node->op());
+ int first_effect_index = NodeProperties::FirstEffectIndex(node);
// Visit integer slot count to pop
ProcessInput<T>(node, 0, UseInfo::TruncatingWord32());
// Visit value, context and frame state inputs as tagged.
- for (int i = 1; i < tagged_limit; i++) {
+ for (int i = 1; i < first_effect_index; i++) {
ProcessInput<T>(node, i, UseInfo::AnyTagged());
}
// Only enqueue other inputs (effects, control).
- for (int i = tagged_limit; i < node->InputCount(); i++) {
+ for (int i = first_effect_index; i < node->InputCount(); i++) {
EnqueueInput<T>(node, i);
}
}
@@ -873,13 +903,11 @@ class RepresentationSelector {
// Helper for an unused node.
template <Phase T>
void VisitUnused(Node* node) {
- int value_count = node->op()->ValueInputCount() +
- OperatorProperties::GetContextInputCount(node->op()) +
- OperatorProperties::GetFrameStateInputCount(node->op());
- for (int i = 0; i < value_count; i++) {
+ int first_effect_index = NodeProperties::FirstEffectIndex(node);
+ for (int i = 0; i < first_effect_index; i++) {
ProcessInput<T>(node, i, UseInfo::None());
}
- ProcessRemainingInputs<T>(node, value_count);
+ ProcessRemainingInputs<T>(node, first_effect_index);
if (lower<T>()) Kill(node);
}
@@ -1083,19 +1111,27 @@ class RepresentationSelector {
auto call_descriptor = CallDescriptorOf(node->op());
int params = static_cast<int>(call_descriptor->ParameterCount());
int value_input_count = node->op()->ValueInputCount();
- // Propagate representation information from call descriptor.
- for (int i = 0; i < value_input_count; i++) {
- if (i == 0) {
- // The target of the call.
- ProcessInput<T>(node, i, UseInfo::Any());
- } else if ((i - 1) < params) {
- ProcessInput<T>(node, i,
- TruncatingUseInfoFromRepresentation(
- call_descriptor->GetInputType(i).representation()));
- } else {
- ProcessInput<T>(node, i, UseInfo::AnyTagged());
- }
+
+ DCHECK_GT(value_input_count, 0);
+ DCHECK_GE(value_input_count, params);
+
+ // The target of the call.
+ ProcessInput<T>(node, 0, UseInfo::Any());
+
+ // For the parameters (indexes [1, ..., params]), propagate representation
+ // information from call descriptor.
+ for (int i = 1; i <= params; i++) {
+ ProcessInput<T>(node, i,
+ TruncatingUseInfoFromRepresentation(
+ call_descriptor->GetInputType(i).representation()));
}
+
+ // Rest of the value inputs.
+ for (int i = params + 1; i < value_input_count; i++) {
+ ProcessInput<T>(node, i, UseInfo::AnyTagged());
+ }
+
+ // Effect and Control.
ProcessRemainingInputs<T>(node, value_input_count);
if (call_descriptor->ReturnCount() > 0) {
@@ -1457,7 +1493,8 @@ class RepresentationSelector {
if (lower<T>()) {
if (truncation.IsUsedAsWord32() ||
!CanOverflowSigned32(node->op(), left_feedback_type,
- right_feedback_type, graph_zone())) {
+ right_feedback_type, type_cache_,
+ graph_zone())) {
ChangeToPureOp(node, Int32Op(node));
} else {
@@ -1789,9 +1826,8 @@ class RepresentationSelector {
// Note: We must not do this for constants, as they are cached and we
// would thus kill the cached {node} during lowering (i.e. replace all
// uses with Dead), but at that point some node lowering might have
- // already taken the constant {node} from the cache (while it was in
- // a sane state still) and we would afterwards replace that use with
- // Dead as well.
+ // already taken the constant {node} from the cache (while it was not
+ // yet killed) and we would afterwards replace that use with Dead as well.
if (node->op()->ValueInputCount() > 0 &&
node->op()->HasProperty(Operator::kPure) && truncation.IsUnused()) {
return VisitUnused<T>(node);
@@ -2059,6 +2095,7 @@ class RepresentationSelector {
// hint with Oddball feedback here.
DCHECK_NE(IrOpcode::kSpeculativeNumberEqual, node->opcode());
V8_FALLTHROUGH;
+ case NumberOperationHint::kNumberOrBoolean:
case NumberOperationHint::kNumber:
VisitBinop<T>(node,
CheckedUseInfoAsFloat64FromHint(
@@ -2806,6 +2843,11 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeBigIntAdd: {
+ // TODO(nicohartmann@, chromium:1073440): There should be special
+ // handling for trunction.IsUnused() that correctly propagates deadness,
+ // but preserves type checking which may throw exceptions. Until this
+ // is fully supported, we lower to int64 operations but keep pushing
+ // type constraints.
if (truncation.IsUsedAsWord64()) {
VisitBinop<T>(
node, UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}),
@@ -3278,6 +3320,7 @@ class RepresentationSelector {
MachineRepresentation::kWord32, Type::Signed32());
break;
case NumberOperationHint::kNumber:
+ case NumberOperationHint::kNumberOrBoolean:
case NumberOperationHint::kNumberOrOddball:
VisitUnop<T>(
node, CheckedUseInfoAsFloat64FromHint(p.hint(), p.feedback()),
@@ -3681,7 +3724,7 @@ class RepresentationSelector {
case IrOpcode::kUnreachable:
case IrOpcode::kRuntimeAbort:
// All JavaScript operators except JSToNumber have uniform handling.
-#define OPCODE_CASE(name) case IrOpcode::k##name:
+#define OPCODE_CASE(name, ...) case IrOpcode::k##name:
JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
JS_OBJECT_OP_LIST(OPCODE_CASE)
JS_CONTEXT_OP_LIST(OPCODE_CASE)
@@ -3757,31 +3800,6 @@ class RepresentationSelector {
node->NullAllInputs(); // The {node} is now dead.
}
- void PrintOutputInfo(NodeInfo* info) {
- if (FLAG_trace_representation) {
- StdoutStream{} << info->representation();
- }
- }
-
- void PrintRepresentation(MachineRepresentation rep) {
- if (FLAG_trace_representation) {
- StdoutStream{} << rep;
- }
- }
-
- void PrintTruncation(Truncation truncation) {
- if (FLAG_trace_representation) {
- StdoutStream{} << truncation.description() << std::endl;
- }
- }
-
- void PrintUseInfo(UseInfo info) {
- if (FLAG_trace_representation) {
- StdoutStream{} << info.representation() << ":"
- << info.truncation().description();
- }
- }
-
private:
JSGraph* jsgraph_;
Zone* zone_; // Temporary zone.
@@ -3793,16 +3811,15 @@ class RepresentationSelector {
ZoneVector<InputUseInfos> node_input_use_infos_; // Debug information about
// requirements on inputs.
#endif // DEBUG
- NodeVector nodes_; // collected nodes
NodeVector replacements_; // replacements to be done after lowering
RepresentationChanger* changer_; // for inserting representation changes
- ZoneQueue<Node*> queue_; // queue for traversing the graph
+ ZoneQueue<Node*> revisit_queue_; // Queue for revisiting nodes.
struct NodeState {
Node* node;
int input_index;
};
- ZoneStack<NodeState> typing_stack_; // stack for graph typing.
+ NodeVector traversal_nodes_; // Order in which to traverse the nodes.
// TODO(danno): RepresentationSelector shouldn't know anything about the
// source positions table, but must for now since there currently is no other
// way to pass down source position information to nodes created during
@@ -3825,8 +3842,7 @@ class RepresentationSelector {
// Template specializations
// Enqueue {use_node}'s {index} input if the {use_info} contains new information
-// for that input node. Add the input to {nodes_} if this is the first time it's
-// been visited.
+// for that input node.
template <>
void RepresentationSelector::EnqueueInput<PROPAGATE>(Node* use_node, int index,
UseInfo use_info) {
@@ -3838,28 +3854,21 @@ void RepresentationSelector::EnqueueInput<PROPAGATE>(Node* use_node, int index,
use_info);
#endif // DEBUG
if (info->unvisited()) {
- // First visit of this node.
- info->set_queued();
- nodes_.push_back(node);
- queue_.push(node);
- TRACE(" initial #%i: ", node->id());
info->AddUse(use_info);
- PrintTruncation(info->truncation());
+ TRACE(" initial #%i: %s\n", node->id(), info->truncation().description());
return;
}
- TRACE(" queue #%i?: ", node->id());
- PrintTruncation(info->truncation());
+ TRACE(" queue #%i?: %s\n", node->id(), info->truncation().description());
if (info->AddUse(use_info)) {
// New usage information for the node is available.
if (!info->queued()) {
DCHECK(info->visited());
- queue_.push(node);
+ revisit_queue_.push(node);
info->set_queued();
- TRACE(" added: ");
+ TRACE(" added: %s\n", info->truncation().description());
} else {
- TRACE(" inqueue: ");
+ TRACE(" inqueue: %s\n", info->truncation().description());
}
- PrintTruncation(info->truncation());
}
}
@@ -3918,15 +3927,12 @@ void RepresentationSelector::ProcessInput<LOWER>(Node* node, int index,
template <>
void RepresentationSelector::ProcessRemainingInputs<PROPAGATE>(Node* node,
int index) {
- DCHECK_GE(index, NodeProperties::PastValueIndex(node));
DCHECK_GE(index, NodeProperties::PastContextIndex(node));
+
+ // Enqueue other inputs (effects, control).
for (int i = std::max(index, NodeProperties::FirstEffectIndex(node));
- i < NodeProperties::PastEffectIndex(node); ++i) {
- EnqueueInput<PROPAGATE>(node, i); // Effect inputs: just visit
- }
- for (int i = std::max(index, NodeProperties::FirstControlIndex(node));
- i < NodeProperties::PastControlIndex(node); ++i) {
- EnqueueInput<PROPAGATE>(node, i); // Control inputs: just visit
+ i < node->InputCount(); ++i) {
+ EnqueueInput<PROPAGATE>(node, i);
}
}
@@ -3936,26 +3942,22 @@ void RepresentationSelector::ProcessRemainingInputs<PROPAGATE>(Node* node,
// values {kTypeAny}.
template <>
void RepresentationSelector::VisitInputs<PROPAGATE>(Node* node) {
- int tagged_count = node->op()->ValueInputCount() +
- OperatorProperties::GetContextInputCount(node->op()) +
- OperatorProperties::GetFrameStateInputCount(node->op());
+ int first_effect_index = NodeProperties::FirstEffectIndex(node);
// Visit value, context and frame state inputs as tagged.
- for (int i = 0; i < tagged_count; i++) {
+ for (int i = 0; i < first_effect_index; i++) {
ProcessInput<PROPAGATE>(node, i, UseInfo::AnyTagged());
}
// Only enqueue other inputs (effects, control).
- for (int i = tagged_count; i < node->InputCount(); i++) {
+ for (int i = first_effect_index; i < node->InputCount(); i++) {
EnqueueInput<PROPAGATE>(node, i);
}
}
template <>
void RepresentationSelector::VisitInputs<LOWER>(Node* node) {
- int tagged_count = node->op()->ValueInputCount() +
- OperatorProperties::GetContextInputCount(node->op()) +
- OperatorProperties::GetFrameStateInputCount(node->op());
+ int first_effect_index = NodeProperties::FirstEffectIndex(node);
// Visit value, context and frame state inputs as tagged.
- for (int i = 0; i < tagged_count; i++) {
+ for (int i = 0; i < first_effect_index; i++) {
ProcessInput<LOWER>(node, i, UseInfo::AnyTagged());
}
}
diff --git a/chromium/v8/src/compiler/simplified-operator.cc b/chromium/v8/src/compiler/simplified-operator.cc
index 1be2bed0013..ff5d69cd669 100644
--- a/chromium/v8/src/compiler/simplified-operator.cc
+++ b/chromium/v8/src/compiler/simplified-operator.cc
@@ -49,7 +49,7 @@ bool operator==(ConstFieldInfo const& lhs, ConstFieldInfo const& rhs) {
}
size_t hash_value(ConstFieldInfo const& const_field_info) {
- return (size_t)const_field_info.owner_map.address();
+ return static_cast<size_t>(const_field_info.owner_map.address());
}
bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
@@ -304,6 +304,8 @@ std::ostream& operator<<(std::ostream& os, CheckTaggedInputMode mode) {
switch (mode) {
case CheckTaggedInputMode::kNumber:
return os << "Number";
+ case CheckTaggedInputMode::kNumberOrBoolean:
+ return os << "NumberOrBoolean";
case CheckTaggedInputMode::kNumberOrOddball:
return os << "NumberOrOddball";
}
@@ -532,6 +534,8 @@ std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) {
return os << "Signed32";
case NumberOperationHint::kNumber:
return os << "Number";
+ case NumberOperationHint::kNumberOrBoolean:
+ return os << "NumberOrBoolean";
case NumberOperationHint::kNumberOrOddball:
return os << "NumberOrOddball";
}
@@ -1045,6 +1049,8 @@ struct SimplifiedOperatorGlobalCache final {
};
CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumber>
kCheckedTaggedToFloat64NumberOperator;
+ CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumberOrBoolean>
+ kCheckedTaggedToFloat64NumberOrBooleanOperator;
CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumberOrOddball>
kCheckedTaggedToFloat64NumberOrOddballOperator;
@@ -1157,6 +1163,8 @@ struct SimplifiedOperatorGlobalCache final {
k##Name##NumberOrOddballOperator;
SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
#undef SPECULATIVE_NUMBER_BINOP
+ SpeculativeNumberEqualOperator<NumberOperationHint::kNumberOrBoolean>
+ kSpeculativeNumberEqualNumberOrBooleanOperator;
template <NumberOperationHint kHint>
struct SpeculativeToNumberOperator final
@@ -1402,6 +1410,8 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
switch (mode) {
case CheckTaggedInputMode::kNumber:
return &cache_.kCheckedTaggedToFloat64NumberOperator;
+ case CheckTaggedInputMode::kNumberOrBoolean:
+ return &cache_.kCheckedTaggedToFloat64NumberOrBooleanOperator;
case CheckTaggedInputMode::kNumberOrOddball:
return &cache_.kCheckedTaggedToFloat64NumberOrOddballOperator;
}
@@ -1418,6 +1428,9 @@ const Operator* SimplifiedOperatorBuilder::CheckedTruncateTaggedToWord32(
switch (mode) {
case CheckTaggedInputMode::kNumber:
return &cache_.kCheckedTruncateTaggedToWord32NumberOperator;
+ case CheckTaggedInputMode::kNumberOrBoolean:
+ // Not used currently.
+ UNREACHABLE();
case CheckTaggedInputMode::kNumberOrOddball:
return &cache_.kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
}
@@ -1541,6 +1554,9 @@ const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
return &cache_.kSpeculativeToNumberSigned32Operator;
case NumberOperationHint::kNumber:
return &cache_.kSpeculativeToNumberNumberOperator;
+ case NumberOperationHint::kNumberOrBoolean:
+ // Not used currently.
+ UNREACHABLE();
case NumberOperationHint::kNumberOrOddball:
return &cache_.kSpeculativeToNumberNumberOrOddballOperator;
}
@@ -1778,14 +1794,38 @@ const Operator* SimplifiedOperatorBuilder::AllocateRaw(
return &cache_.k##Name##Signed32Operator; \
case NumberOperationHint::kNumber: \
return &cache_.k##Name##NumberOperator; \
+ case NumberOperationHint::kNumberOrBoolean: \
+ /* Not used currenly. */ \
+ UNREACHABLE(); \
case NumberOperationHint::kNumberOrOddball: \
return &cache_.k##Name##NumberOrOddballOperator; \
} \
UNREACHABLE(); \
return nullptr; \
}
-SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
+SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
+SPECULATIVE_NUMBER_BINOP(SpeculativeNumberLessThan)
+SPECULATIVE_NUMBER_BINOP(SpeculativeNumberLessThanOrEqual)
#undef SPECULATIVE_NUMBER_BINOP
+const Operator* SimplifiedOperatorBuilder::SpeculativeNumberEqual(
+ NumberOperationHint hint) {
+ switch (hint) {
+ case NumberOperationHint::kSignedSmall:
+ return &cache_.kSpeculativeNumberEqualSignedSmallOperator;
+ case NumberOperationHint::kSignedSmallInputs:
+ return &cache_.kSpeculativeNumberEqualSignedSmallInputsOperator;
+ case NumberOperationHint::kSigned32:
+ return &cache_.kSpeculativeNumberEqualSigned32Operator;
+ case NumberOperationHint::kNumber:
+ return &cache_.kSpeculativeNumberEqualNumberOperator;
+ case NumberOperationHint::kNumberOrBoolean:
+ return &cache_.kSpeculativeNumberEqualNumberOrBooleanOperator;
+ case NumberOperationHint::kNumberOrOddball:
+ return &cache_.kSpeculativeNumberEqualNumberOrOddballOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
#define ACCESS_OP_LIST(V) \
V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \
diff --git a/chromium/v8/src/compiler/simplified-operator.h b/chromium/v8/src/compiler/simplified-operator.h
index df2516646b2..649aea0d0b9 100644
--- a/chromium/v8/src/compiler/simplified-operator.h
+++ b/chromium/v8/src/compiler/simplified-operator.h
@@ -312,6 +312,7 @@ Handle<FeedbackCell> FeedbackCellOf(const Operator* op);
enum class CheckTaggedInputMode : uint8_t {
kNumber,
+ kNumberOrBoolean,
kNumberOrOddball,
};
@@ -507,6 +508,7 @@ enum class NumberOperationHint : uint8_t {
kSignedSmallInputs, // Inputs were Smi, output was Number.
kSigned32, // Inputs were Signed32, output was Number.
kNumber, // Inputs were Number, output was Number.
+ kNumberOrBoolean, // Inputs were Number or Boolean, output was Number.
kNumberOrOddball, // Inputs were Number or Oddball, output was Number.
};
diff --git a/chromium/v8/src/compiler/typed-optimization.cc b/chromium/v8/src/compiler/typed-optimization.cc
index c8c422f66bf..abc88c4b8ed 100644
--- a/chromium/v8/src/compiler/typed-optimization.cc
+++ b/chromium/v8/src/compiler/typed-optimization.cc
@@ -181,7 +181,7 @@ Reduction TypedOptimization::ReduceMaybeGrowFastElements(Node* node) {
simplified()->CheckBounds(FeedbackSource{},
CheckBoundsFlag::kAbortOnOutOfBounds),
index, length, effect, control);
- ReplaceWithValue(node, elements);
+ ReplaceWithValue(node, elements, check_bounds);
return Replace(check_bounds);
}
diff --git a/chromium/v8/src/compiler/typer.cc b/chromium/v8/src/compiler/typer.cc
index 6d53531f1cb..94e2c9a1e06 100644
--- a/chromium/v8/src/compiler/typer.cc
+++ b/chromium/v8/src/compiler/typer.cc
@@ -61,8 +61,7 @@ class Typer::Visitor : public Reducer {
explicit Visitor(Typer* typer, LoopVariableOptimizer* induction_vars)
: typer_(typer),
induction_vars_(induction_vars),
- weakened_nodes_(typer->zone()),
- remembered_types_(typer->zone()) {}
+ weakened_nodes_(typer->zone()) {}
const char* reducer_name() const override { return "Typer"; }
@@ -73,8 +72,8 @@ class Typer::Visitor : public Reducer {
Type TypeNode(Node* node) {
switch (node->opcode()) {
-#define DECLARE_UNARY_CASE(x) \
- case IrOpcode::k##x: \
+#define DECLARE_UNARY_CASE(x, ...) \
+ case IrOpcode::k##x: \
return Type##x(Operand(node, 0));
JS_SIMPLE_UNOP_LIST(DECLARE_UNARY_CASE)
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_UNARY_CASE)
@@ -82,8 +81,8 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_UNARY_CASE)
SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_UNARY_CASE)
#undef DECLARE_UNARY_CASE
-#define DECLARE_BINARY_CASE(x) \
- case IrOpcode::k##x: \
+#define DECLARE_BINARY_CASE(x, ...) \
+ case IrOpcode::k##x: \
return Type##x(Operand(node, 0), Operand(node, 1));
JS_SIMPLE_BINOP_LIST(DECLARE_BINARY_CASE)
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_BINARY_CASE)
@@ -91,8 +90,8 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_BINARY_CASE)
SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_BINARY_CASE)
#undef DECLARE_BINARY_CASE
-#define DECLARE_OTHER_CASE(x) \
- case IrOpcode::k##x: \
+#define DECLARE_OTHER_CASE(x, ...) \
+ case IrOpcode::k##x: \
return Type##x(node);
DECLARE_OTHER_CASE(Start)
DECLARE_OTHER_CASE(IfException)
@@ -103,7 +102,7 @@ class Typer::Visitor : public Reducer {
JS_CONTEXT_OP_LIST(DECLARE_OTHER_CASE)
JS_OTHER_OP_LIST(DECLARE_OTHER_CASE)
#undef DECLARE_OTHER_CASE
-#define DECLARE_IMPOSSIBLE_CASE(x) case IrOpcode::k##x:
+#define DECLARE_IMPOSSIBLE_CASE(x, ...) case IrOpcode::k##x:
DECLARE_IMPOSSIBLE_CASE(Loop)
DECLARE_IMPOSSIBLE_CASE(Branch)
DECLARE_IMPOSSIBLE_CASE(IfTrue)
@@ -141,10 +140,8 @@ class Typer::Visitor : public Reducer {
Typer* typer_;
LoopVariableOptimizer* induction_vars_;
ZoneSet<NodeId> weakened_nodes_;
- // TODO(tebbi): remove once chromium:906567 is resolved.
- ZoneUnorderedMap<std::pair<Node*, int>, Type> remembered_types_;
-#define DECLARE_METHOD(x) inline Type Type##x(Node* node);
+#define DECLARE_METHOD(x, ...) inline Type Type##x(Node* node);
DECLARE_METHOD(Start)
DECLARE_METHOD(IfException)
COMMON_OP_LIST(DECLARE_METHOD)
@@ -154,7 +151,7 @@ class Typer::Visitor : public Reducer {
JS_CONTEXT_OP_LIST(DECLARE_METHOD)
JS_OTHER_OP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
-#define DECLARE_METHOD(x) inline Type Type##x(Type input);
+#define DECLARE_METHOD(x, ...) inline Type Type##x(Type input);
JS_SIMPLE_UNOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
@@ -232,13 +229,13 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
-#define DECLARE_METHOD(Name) \
+#define DECLARE_METHOD(Name, ...) \
inline Type Type##Name(Type left, Type right) { \
return TypeBinaryOp(left, right, Name##Typer); \
}
JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
-#define DECLARE_METHOD(Name) \
+#define DECLARE_METHOD(Name, ...) \
inline Type Type##Name(Type left, Type right) { \
return TypeBinaryOp(left, right, Name); \
}
@@ -247,7 +244,7 @@ class Typer::Visitor : public Reducer {
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
-#define DECLARE_METHOD(Name) \
+#define DECLARE_METHOD(Name, ...) \
inline Type Type##Name(Type input) { return TypeUnaryOp(input, Name); }
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD)
@@ -274,7 +271,7 @@ class Typer::Visitor : public Reducer {
static ComparisonOutcome JSCompareTyper(Type, Type, Typer*);
static ComparisonOutcome NumberCompareTyper(Type, Type, Typer*);
-#define DECLARE_METHOD(x) static Type x##Typer(Type, Type, Typer*);
+#define DECLARE_METHOD(x, ...) static Type x##Typer(Type, Type, Typer*);
JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
@@ -303,49 +300,9 @@ class Typer::Visitor : public Reducer {
AllowHandleDereference allow;
std::ostringstream ostream;
node->Print(ostream);
-
- if (V8_UNLIKELY(node->opcode() == IrOpcode::kNumberAdd)) {
- ostream << "Previous UpdateType run (inputs first):";
- for (int i = 0; i < 3; ++i) {
- ostream << " ";
- if (remembered_types_[{node, i}].IsInvalid()) {
- ostream << "untyped";
- } else {
- remembered_types_[{node, i}].PrintTo(ostream);
- }
- }
-
- ostream << "\nCurrent (output) type: ";
- previous.PrintTo(ostream);
-
- ostream << "\nThis UpdateType run (inputs first):";
- for (int i = 0; i < 2; ++i) {
- ostream << " ";
- Node* input = NodeProperties::GetValueInput(node, i);
- if (NodeProperties::IsTyped(input)) {
- NodeProperties::GetType(input).PrintTo(ostream);
- } else {
- ostream << "untyped";
- }
- }
- ostream << " ";
- current.PrintTo(ostream);
- ostream << "\n";
- }
-
FATAL("UpdateType error for node %s", ostream.str().c_str());
}
- if (V8_UNLIKELY(node->opcode() == IrOpcode::kNumberAdd)) {
- for (int i = 0; i < 2; ++i) {
- Node* input = NodeProperties::GetValueInput(node, i);
- remembered_types_[{node, i}] = NodeProperties::IsTyped(input)
- ? NodeProperties::GetType(input)
- : Type::Invalid();
- }
- remembered_types_[{node, 2}] = current;
- }
-
NodeProperties::SetType(node, current);
if (!current.Is(previous)) {
// If something changed, revisit all uses.
@@ -353,16 +310,6 @@ class Typer::Visitor : public Reducer {
}
return NoChange();
} else {
- if (V8_UNLIKELY(node->opcode() == IrOpcode::kNumberAdd)) {
- for (int i = 0; i < 2; ++i) {
- Node* input = NodeProperties::GetValueInput(node, i);
- remembered_types_[{node, i}] = NodeProperties::IsTyped(input)
- ? NodeProperties::GetType(input)
- : Type::Invalid();
- }
- remembered_types_[{node, 2}] = current;
- }
-
// No previous type, simply update the type.
NodeProperties::SetType(node, current);
return Changed(node);
diff --git a/chromium/v8/src/compiler/types.cc b/chromium/v8/src/compiler/types.cc
index 47280becbd9..c32ae4cd923 100644
--- a/chromium/v8/src/compiler/types.cc
+++ b/chromium/v8/src/compiler/types.cc
@@ -224,7 +224,6 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_ASYNC_FUNCTION_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
- case JS_AGGREGATE_ERROR_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
case JS_REG_EXP_TYPE:
@@ -358,7 +357,6 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case ENUM_CACHE_TYPE:
case WASM_CAPI_FUNCTION_DATA_TYPE:
case WASM_INDIRECT_FUNCTION_TABLE_TYPE:
- case WASM_DEBUG_INFO_TYPE:
case WASM_EXCEPTION_TAG_TYPE:
case WASM_EXPORTED_FUNCTION_DATA_TYPE:
case WASM_JS_FUNCTION_DATA_TYPE:
@@ -372,7 +370,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
#define MAKE_TORQUE_CLASS_TYPE(INSTANCE_TYPE, Name, name) case INSTANCE_TYPE:
- TORQUE_INTERNAL_INSTANCE_TYPE_LIST(MAKE_TORQUE_CLASS_TYPE)
+ TORQUE_DEFINED_INSTANCE_TYPE_LIST(MAKE_TORQUE_CLASS_TYPE)
#undef MAKE_TORQUE_CLASS_TYPE
UNREACHABLE();
}
diff --git a/chromium/v8/src/compiler/wasm-compiler.cc b/chromium/v8/src/compiler/wasm-compiler.cc
index ac7a681336a..12ce3d32558 100644
--- a/chromium/v8/src/compiler/wasm-compiler.cc
+++ b/chromium/v8/src/compiler/wasm-compiler.cc
@@ -55,7 +55,7 @@
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-opcodes-inl.h"
namespace v8 {
namespace internal {
@@ -2116,16 +2116,14 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
values_array, &index,
graph()->NewNode(m->I32x4ExtractLane(3), value));
break;
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kFuncRef:
- case wasm::ValueType::kNullRef:
- case wasm::ValueType::kExnRef:
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
- case wasm::ValueType::kEqRef:
STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value);
++index;
break;
+ case wasm::ValueType::kRtt: // TODO(7748): Implement.
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
case wasm::ValueType::kStmt:
case wasm::ValueType::kBottom:
UNREACHABLE();
@@ -2214,7 +2212,7 @@ Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj,
wasm::WasmCodePosition position) {
- TrapIfTrue(wasm::kTrapBrOnExnNullRef, gasm_->WordEqual(RefNull(), except_obj),
+ TrapIfTrue(wasm::kTrapBrOnExnNull, gasm_->WordEqual(RefNull(), except_obj),
position);
return CALL_BUILTIN(
WasmGetOwnProperty, except_obj,
@@ -2269,16 +2267,14 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
mcgraph()->machine()->I32x4ReplaceLane(3), value,
BuildDecodeException32BitValue(values_array, &index));
break;
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kFuncRef:
- case wasm::ValueType::kNullRef:
- case wasm::ValueType::kExnRef:
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
- case wasm::ValueType::kEqRef:
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
++index;
break;
+ case wasm::ValueType::kRtt: // TODO(7748): Implement.
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
case wasm::ValueType::kStmt:
case wasm::ValueType::kBottom:
UNREACHABLE();
@@ -3257,7 +3253,7 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
}
}
-void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableAnyRefGlobal(
+void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableExternRefGlobal(
const wasm::WasmGlobal& global, Node** base, Node** offset) {
// Load the base from the ImportedMutableGlobalsBuffer of the instance.
Node* buffers = LOAD_INSTANCE_FIELD(ImportedMutableGlobalsBuffers,
@@ -3358,11 +3354,11 @@ Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
const wasm::WasmGlobal& global = env_->module->globals[index];
- if (global.type.IsReferenceType()) {
+ if (global.type.is_reference_type()) {
if (global.mutability && global.imported) {
Node* base = nullptr;
Node* offset = nullptr;
- GetBaseAndOffsetForImportedMutableAnyRefGlobal(global, &base, &offset);
+ GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &base, &offset);
return gasm_->Load(MachineType::AnyTagged(), base, offset);
}
Node* globals_buffer =
@@ -3387,11 +3383,11 @@ Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
const wasm::WasmGlobal& global = env_->module->globals[index];
- if (global.type.IsReferenceType()) {
+ if (global.type.is_reference_type()) {
if (global.mutability && global.imported) {
Node* base = nullptr;
Node* offset = nullptr;
- GetBaseAndOffsetForImportedMutableAnyRefGlobal(global, &base, &offset);
+ GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &base, &offset);
return STORE_RAW_NODE_OFFSET(
base, offset, val, MachineRepresentation::kTagged, kFullWriteBarrier);
@@ -3497,7 +3493,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
return index;
}
- if (!base::IsInBounds(offset, access_size, env_->max_memory_size)) {
+ if (!base::IsInBounds<uint64_t>(offset, access_size, env_->max_memory_size)) {
// The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
return mcgraph()->IntPtrConstant(0);
@@ -3613,20 +3609,43 @@ const Operator* WasmGraphBuilder::GetSafeStoreOperator(int offset,
return mcgraph()->machine()->UnalignedStore(store_rep);
}
+Node* WasmGraphBuilder::TraceFunctionEntry(wasm::WasmCodePosition position) {
+ Node* call = BuildCallToRuntime(Runtime::kWasmTraceEnter, nullptr, 0);
+ SetSourcePosition(call, position);
+ return call;
+}
+
+Node* WasmGraphBuilder::TraceFunctionExit(Vector<Node*> vals,
+ wasm::WasmCodePosition position) {
+ Node* info = gasm_->IntPtrConstant(0);
+ size_t num_returns = vals.size();
+ if (num_returns == 1) {
+ wasm::ValueType return_type = sig_->GetReturn(0);
+ MachineRepresentation rep = return_type.machine_representation();
+ int size = ElementSizeInBytes(rep);
+ info = gasm_->StackSlot(size, size);
+
+ gasm_->Store(StoreRepresentation(rep, kNoWriteBarrier), info,
+ gasm_->Int32Constant(0), vals[0]);
+ }
+
+ Node* call = BuildCallToRuntime(Runtime::kWasmTraceExit, &info, 1);
+ SetSourcePosition(call, position);
+ return call;
+}
+
Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
MachineRepresentation rep,
Node* index, uint32_t offset,
wasm::WasmCodePosition position) {
int kAlign = 4; // Ensure that the LSB is 0, such that this looks like a Smi.
- Node* info = graph()->NewNode(
- mcgraph()->machine()->StackSlot(sizeof(wasm::MemoryTracingInfo), kAlign));
+ TNode<RawPtrT> info =
+ gasm_->StackSlot(sizeof(wasm::MemoryTracingInfo), kAlign);
- Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
- Int32Constant(offset), index);
+ Node* address = gasm_->Int32Add(Int32Constant(offset), index);
auto store = [&](int offset, MachineRepresentation rep, Node* data) {
- SetEffect(graph()->NewNode(
- mcgraph()->machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
- info, mcgraph()->Int32Constant(offset), data, effect(), control()));
+ gasm_->Store(StoreRepresentation(rep, kNoWriteBarrier), info,
+ gasm_->Int32Constant(offset), data);
};
// Store address, is_store, and mem_rep.
store(offsetof(wasm::MemoryTracingInfo, address),
@@ -3638,7 +3657,9 @@ Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
MachineRepresentation::kWord8,
mcgraph()->Int32Constant(static_cast<int>(rep)));
- Node* call = BuildCallToRuntime(Runtime::kWasmTraceMemory, &info, 1);
+ Node* args[] = {info};
+ Node* call =
+ BuildCallToRuntime(Runtime::kWasmTraceMemory, args, arraysize(args));
SetSourcePosition(call, position);
return call;
}
@@ -3699,31 +3720,66 @@ LoadKind GetLoadKind(MachineGraph* mcgraph, MachineType memtype,
// TODO(miladfar): Remove SIM once V8_TARGET_BIG_ENDIAN includes the Sim.
#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
Node* WasmGraphBuilder::LoadTransformBigEndian(
- MachineType memtype, wasm::LoadTransformationKind transform, Node* value) {
+ wasm::ValueType type, MachineType memtype,
+ wasm::LoadTransformationKind transform, Node* index, uint32_t offset,
+ uint32_t alignment, wasm::WasmCodePosition position) {
+#define LOAD_EXTEND(num_lanes, bytes_per_load, replace_lane) \
+ result = graph()->NewNode(mcgraph()->machine()->S128Zero()); \
+ Node* values[num_lanes]; \
+ for (int i = 0; i < num_lanes; i++) { \
+ values[i] = LoadMem(type, memtype, index, offset + i * bytes_per_load, \
+ alignment, position); \
+ if (memtype.IsSigned()) { \
+ /* sign extend */ \
+ values[i] = graph()->NewNode(mcgraph()->machine()->ChangeInt32ToInt64(), \
+ values[i]); \
+ } else { \
+ /* zero extend */ \
+ values[i] = graph()->NewNode( \
+ mcgraph()->machine()->ChangeUint32ToUint64(), values[i]); \
+ } \
+ } \
+ for (int lane = 0; lane < num_lanes; lane++) { \
+ result = graph()->NewNode(mcgraph()->machine()->replace_lane(lane), \
+ result, values[lane]); \
+ }
Node* result;
LoadTransformation transformation = GetLoadTransformation(memtype, transform);
switch (transformation) {
case LoadTransformation::kS8x16LoadSplat: {
- result = graph()->NewNode(mcgraph()->machine()->I8x16Splat(), value);
+ result = LoadMem(type, memtype, index, offset, alignment, position);
+ result = graph()->NewNode(mcgraph()->machine()->I8x16Splat(), result);
break;
}
case LoadTransformation::kI16x8Load8x8S:
- case LoadTransformation::kI16x8Load8x8U:
+ case LoadTransformation::kI16x8Load8x8U: {
+ LOAD_EXTEND(8, 1, I16x8ReplaceLane)
+ break;
+ }
case LoadTransformation::kS16x8LoadSplat: {
- result = graph()->NewNode(mcgraph()->machine()->I16x8Splat(), value);
+ result = LoadMem(type, memtype, index, offset, alignment, position);
+ result = graph()->NewNode(mcgraph()->machine()->I16x8Splat(), result);
break;
}
case LoadTransformation::kI32x4Load16x4S:
- case LoadTransformation::kI32x4Load16x4U:
+ case LoadTransformation::kI32x4Load16x4U: {
+ LOAD_EXTEND(4, 2, I32x4ReplaceLane)
+ break;
+ }
case LoadTransformation::kS32x4LoadSplat: {
- result = graph()->NewNode(mcgraph()->machine()->I32x4Splat(), value);
+ result = LoadMem(type, memtype, index, offset, alignment, position);
+ result = graph()->NewNode(mcgraph()->machine()->I32x4Splat(), result);
break;
}
case LoadTransformation::kI64x2Load32x2S:
- case LoadTransformation::kI64x2Load32x2U:
+ case LoadTransformation::kI64x2Load32x2U: {
+ LOAD_EXTEND(2, 4, I64x2ReplaceLane)
+ break;
+ }
case LoadTransformation::kS64x2LoadSplat: {
- result = graph()->NewNode(mcgraph()->machine()->I64x2Splat(), value);
+ result = LoadMem(type, memtype, index, offset, alignment, position);
+ result = graph()->NewNode(mcgraph()->machine()->I64x2Splat(), result);
break;
}
default:
@@ -3731,6 +3787,7 @@ Node* WasmGraphBuilder::LoadTransformBigEndian(
}
return result;
+#undef LOAD_EXTEND
}
#endif
@@ -3749,8 +3806,8 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
// LoadTransform cannot efficiently be executed on BE machines as a
// single operation since loaded bytes need to be reversed first,
// therefore we divide them into separate "load" and "operation" nodes.
- load = LoadMem(type, memtype, index, offset, alignment, position);
- load = LoadTransformBigEndian(memtype, transform, load);
+ load = LoadTransformBigEndian(type, memtype, transform, index, offset,
+ alignment, position);
USE(GetLoadKind);
#else
// Wasm semantics throw on OOB. Introduce explicit bounds check and
@@ -3983,6 +4040,24 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
return val;
}
+Node* WasmGraphBuilder::BuildF32x4Ceil(Node* input) {
+ MachineType type = MachineType::Simd128();
+ ExternalReference ref = ExternalReference::wasm_f32x4_ceil();
+ return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF32x4Floor(Node* input) {
+ MachineType type = MachineType::Simd128();
+ ExternalReference ref = ExternalReference::wasm_f32x4_floor();
+ return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF32x4Trunc(Node* input) {
+ MachineType type = MachineType::Simd128();
+ ExternalReference ref = ExternalReference::wasm_f32x4_trunc();
+ return BuildCFuncInstruction(ref, type, input);
+}
+
void WasmGraphBuilder::PrintDebugName(Node* node) {
PrintF("#%d:%s", node->id(), node->op()->mnemonic());
}
@@ -4143,6 +4218,15 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF64x2Pmax:
return graph()->NewNode(mcgraph()->machine()->F64x2Pmax(), inputs[0],
inputs[1]);
+ case wasm::kExprF64x2Ceil:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Ceil(), inputs[0]);
+ case wasm::kExprF64x2Floor:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Floor(), inputs[0]);
+ case wasm::kExprF64x2Trunc:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Trunc(), inputs[0]);
+ case wasm::kExprF64x2NearestInt:
+ return graph()->NewNode(mcgraph()->machine()->F64x2NearestInt(),
+ inputs[0]);
case wasm::kExprF32x4Splat:
return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]);
case wasm::kExprF32x4SConvertI32x4:
@@ -4214,6 +4298,25 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF32x4Pmax:
return graph()->NewNode(mcgraph()->machine()->F32x4Pmax(), inputs[0],
inputs[1]);
+ case wasm::kExprF32x4Ceil:
+ // Architecture support for F32x4Ceil and Float32RoundUp is the same.
+ if (!mcgraph()->machine()->Float32RoundUp().IsSupported())
+ return BuildF32x4Ceil(inputs[0]);
+ return graph()->NewNode(mcgraph()->machine()->F32x4Ceil(), inputs[0]);
+ case wasm::kExprF32x4Floor:
+ // Architecture support for F32x4Floor and Float32RoundDown is the same.
+ if (!mcgraph()->machine()->Float32RoundDown().IsSupported())
+ return BuildF32x4Floor(inputs[0]);
+ return graph()->NewNode(mcgraph()->machine()->F32x4Floor(), inputs[0]);
+ case wasm::kExprF32x4Trunc:
+ // Architecture support for F32x4Trunc and Float32RoundTruncate is the
+ // same.
+ if (!mcgraph()->machine()->Float32RoundTruncate().IsSupported())
+ return BuildF32x4Trunc(inputs[0]);
+ return graph()->NewNode(mcgraph()->machine()->F32x4Trunc(), inputs[0]);
+ case wasm::kExprF32x4NearestInt:
+ return graph()->NewNode(mcgraph()->machine()->F32x4NearestInt(),
+ inputs[0]);
case wasm::kExprI64x2Splat:
return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]);
case wasm::kExprI64x2Neg:
@@ -4367,6 +4470,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return graph()->NewNode(mcgraph()->machine()->I32x4Abs(), inputs[0]);
case wasm::kExprI32x4BitMask:
return graph()->NewNode(mcgraph()->machine()->I32x4BitMask(), inputs[0]);
+ case wasm::kExprI32x4DotI16x8S:
+ return graph()->NewNode(mcgraph()->machine()->I32x4DotI16x8S(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8Splat:
return graph()->NewNode(mcgraph()->machine()->I16x8Splat(), inputs[0]);
case wasm::kExprI16x8SConvertI8x16Low:
@@ -4577,22 +4683,22 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprS128AndNot:
return graph()->NewNode(mcgraph()->machine()->S128AndNot(), inputs[0],
inputs[1]);
- case wasm::kExprS1x2AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x2AnyTrue(), inputs[0]);
- case wasm::kExprS1x2AllTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x2AllTrue(), inputs[0]);
- case wasm::kExprS1x4AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x4AnyTrue(), inputs[0]);
- case wasm::kExprS1x4AllTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x4AllTrue(), inputs[0]);
- case wasm::kExprS1x8AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x8AnyTrue(), inputs[0]);
- case wasm::kExprS1x8AllTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x8AllTrue(), inputs[0]);
- case wasm::kExprS1x16AnyTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x16AnyTrue(), inputs[0]);
- case wasm::kExprS1x16AllTrue:
- return graph()->NewNode(mcgraph()->machine()->S1x16AllTrue(), inputs[0]);
+ case wasm::kExprV64x2AnyTrue:
+ return graph()->NewNode(mcgraph()->machine()->V64x2AnyTrue(), inputs[0]);
+ case wasm::kExprV64x2AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->V64x2AllTrue(), inputs[0]);
+ case wasm::kExprV32x4AnyTrue:
+ return graph()->NewNode(mcgraph()->machine()->V32x4AnyTrue(), inputs[0]);
+ case wasm::kExprV32x4AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->V32x4AllTrue(), inputs[0]);
+ case wasm::kExprV16x8AnyTrue:
+ return graph()->NewNode(mcgraph()->machine()->V16x8AnyTrue(), inputs[0]);
+ case wasm::kExprV16x8AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->V16x8AllTrue(), inputs[0]);
+ case wasm::kExprV8x16AnyTrue:
+ return graph()->NewNode(mcgraph()->machine()->V8x16AnyTrue(), inputs[0]);
+ case wasm::kExprV8x16AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->V8x16AllTrue(), inputs[0]);
case wasm::kExprS8x16Swizzle:
return graph()->NewNode(mcgraph()->machine()->S8x16Swizzle(), inputs[0],
inputs[1]);
@@ -5042,9 +5148,10 @@ Node* WasmGraphBuilder::TableFill(uint32_t table_index, Node* start,
namespace {
-MachineType FieldType(const wasm::StructType* type, uint32_t field_index) {
+MachineType FieldType(const wasm::StructType* type, uint32_t field_index,
+ bool is_signed) {
return MachineType::TypeForRepresentation(
- type->field(field_index).machine_representation());
+ type->field(field_index).machine_representation(), is_signed);
}
Node* FieldOffset(MachineGraph* graph, const wasm::StructType* type,
@@ -5054,19 +5161,43 @@ Node* FieldOffset(MachineGraph* graph, const wasm::StructType* type,
return graph->IntPtrConstant(offset);
}
+// It's guaranteed that struct/array fields are aligned to min(field_size,
+// kTaggedSize), with the latter being 4 or 8 depending on platform and
+// pointer compression. So on our most common configurations, 8-byte types
+// must use unaligned loads/stores.
+Node* LoadWithTaggedAlignment(WasmGraphAssembler* gasm, MachineType type,
+ Node* base, Node* offset) {
+ if (ElementSizeInBytes(type.representation()) > kTaggedSize) {
+ return gasm->LoadUnaligned(type, base, offset);
+ } else {
+ return gasm->Load(type, base, offset);
+ }
+}
+
+// Same alignment considerations as above.
+Node* StoreWithTaggedAlignment(WasmGraphAssembler* gasm, Node* base,
+ Node* offset, Node* value,
+ wasm::ValueType type) {
+ MachineRepresentation rep = type.machine_representation();
+ if (ElementSizeInBytes(rep) > kTaggedSize) {
+ return gasm->StoreUnaligned(rep, base, offset, value);
+ } else {
+ WriteBarrierKind write_barrier =
+ type.is_reference_type() ? kPointerWriteBarrier : kNoWriteBarrier;
+ StoreRepresentation store_rep(rep, write_barrier);
+ return gasm->Store(store_rep, base, offset, value);
+ }
+}
+
// Set a field of a struct, without checking if the struct is null.
// Helper method for StructNew and StructSet.
Node* StoreStructFieldUnchecked(MachineGraph* graph, WasmGraphAssembler* gasm,
Node* struct_object,
const wasm::StructType* type,
uint32_t field_index, Node* value) {
- WriteBarrierKind write_barrier = type->field(field_index).IsReferenceType()
- ? kPointerWriteBarrier
- : kNoWriteBarrier;
- StoreRepresentation rep(type->field(field_index).machine_representation(),
- write_barrier);
- Node* offset = FieldOffset(graph, type, field_index);
- return gasm->Store(rep, struct_object, offset, value);
+ return StoreWithTaggedAlignment(gasm, struct_object,
+ FieldOffset(graph, type, field_index), value,
+ type->field(field_index));
}
Node* ArrayElementOffset(GraphAssembler* gasm, Node* index,
@@ -5130,10 +5261,6 @@ Node* WasmGraphBuilder::ArrayNew(uint32_t array_index,
graph()->NewNode(mcgraph()->common()->NumberConstant(
element_type.element_size_bytes())),
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
- WriteBarrierKind write_barrier =
- element_type.IsReferenceType() ? kPointerWriteBarrier : kNoWriteBarrier;
- StoreRepresentation rep(element_type.machine_representation(), write_barrier);
-
auto loop = gasm_->MakeLoopLabel(MachineRepresentation::kWord32);
auto done = gasm_->MakeLabel();
Node* start_offset =
@@ -5153,7 +5280,8 @@ Node* WasmGraphBuilder::ArrayNew(uint32_t array_index,
Node* offset = loop.PhiAt(0);
Node* check = gasm_->Uint32LessThan(offset, end_offset);
gasm_->GotoIfNot(check, &done);
- gasm_->Store(rep, a, offset, initial_value);
+ StoreWithTaggedAlignment(gasm_.get(), a, offset, initial_value,
+ type->element_type());
offset = gasm_->Int32Add(offset, element_size);
gasm_->Goto(&loop, offset);
}
@@ -5161,17 +5289,35 @@ Node* WasmGraphBuilder::ArrayNew(uint32_t array_index,
return a;
}
+Node* WasmGraphBuilder::RttCanon(uint32_t type_index) {
+ // This logic is duplicated from module-instantiate.cc.
+ // TODO(jkummerow): Find a nicer solution.
+ int map_index = 0;
+ const std::vector<uint8_t>& type_kinds = env_->module->type_kinds;
+ for (uint32_t i = 0; i < type_index; i++) {
+ if (type_kinds[i] == wasm::kWasmStructTypeCode ||
+ type_kinds[i] == wasm::kWasmArrayTypeCode) {
+ map_index++;
+ }
+ }
+ Node* maps_list =
+ LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer());
+ return LOAD_FIXED_ARRAY_SLOT_PTR(maps_list, type_index);
+}
+
Node* WasmGraphBuilder::StructGet(Node* struct_object,
const wasm::StructType* struct_type,
uint32_t field_index, CheckForNull null_check,
+ bool is_signed,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
TrapIfTrue(wasm::kTrapNullDereference,
gasm_->WordEqual(struct_object, RefNull()), position);
}
- MachineType machine_type = FieldType(struct_type, field_index);
+ MachineType machine_type = FieldType(struct_type, field_index, is_signed);
Node* offset = FieldOffset(mcgraph(), struct_type, field_index);
- return gasm_->Load(machine_type, struct_object, offset);
+ return LoadWithTaggedAlignment(gasm_.get(), machine_type, struct_object,
+ offset);
}
Node* WasmGraphBuilder::StructSet(Node* struct_object,
@@ -5196,14 +5342,16 @@ void WasmGraphBuilder::BoundsCheck(Node* array, Node* index,
Node* WasmGraphBuilder::ArrayGet(Node* array_object,
const wasm::ArrayType* type, Node* index,
+ bool is_signed,
wasm::WasmCodePosition position) {
TrapIfTrue(wasm::kTrapNullDereference,
gasm_->WordEqual(array_object, RefNull()), position);
BoundsCheck(array_object, index, position);
MachineType machine_type = MachineType::TypeForRepresentation(
- type->element_type().machine_representation());
+ type->element_type().machine_representation(), is_signed);
Node* offset = ArrayElementOffset(gasm_.get(), index, type->element_type());
- return gasm_->Load(machine_type, array_object, offset);
+ return LoadWithTaggedAlignment(gasm_.get(), machine_type, array_object,
+ offset);
}
Node* WasmGraphBuilder::ArraySet(Node* array_object,
@@ -5212,13 +5360,9 @@ Node* WasmGraphBuilder::ArraySet(Node* array_object,
TrapIfTrue(wasm::kTrapNullDereference,
gasm_->WordEqual(array_object, RefNull()), position);
BoundsCheck(array_object, index, position);
- WriteBarrierKind write_barrier = type->element_type().IsReferenceType()
- ? kPointerWriteBarrier
- : kNoWriteBarrier;
- StoreRepresentation rep(type->element_type().machine_representation(),
- write_barrier);
Node* offset = ArrayElementOffset(gasm_.get(), index, type->element_type());
- return gasm_->Store(rep, array_object, offset, value);
+ return StoreWithTaggedAlignment(gasm_.get(), array_object, offset, value,
+ type->element_type());
}
Node* WasmGraphBuilder::ArrayLen(Node* array_object,
@@ -5460,17 +5604,16 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return BuildChangeFloat32ToNumber(node);
case wasm::ValueType::kF64:
return BuildChangeFloat64ToNumber(node);
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kFuncRef:
- case wasm::ValueType::kNullRef:
- case wasm::ValueType::kExnRef:
- return node;
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
- case wasm::ValueType::kEqRef:
- // TODO(7748): Implement properly. For now, we just expose the raw
- // object for testing.
+ case wasm::ValueType::kRtt:
+ // TODO(7748): Implement properly for arrays and structs, figure
+ // out what to do for RTTs.
+ // For now, we just expose the raw object for testing.
return node;
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
+ UNIMPLEMENTED();
case wasm::ValueType::kStmt:
case wasm::ValueType::kBottom:
UNREACHABLE();
@@ -5521,49 +5664,35 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* FromJS(Node* input, Node* js_context, wasm::ValueType type) {
switch (type.kind()) {
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kExnRef:
- return input;
-
- case wasm::ValueType::kNullRef: {
- Node* check = graph()->NewNode(mcgraph()->machine()->WordEqual(), input,
- RefNull());
-
- Diamond null_check(graph(), mcgraph()->common(), check,
- BranchHint::kTrue);
- null_check.Chain(control());
- SetControl(null_check.if_false);
-
- Node* old_effect = effect();
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
- nullptr, 0);
-
- SetEffectControl(null_check.EffectPhi(old_effect, effect()),
- null_check.merge);
-
- return input;
- }
-
- case wasm::ValueType::kFuncRef: {
- Node* check =
- BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
- Runtime::kWasmIsValidFuncRefValue, js_context, &input, 1)));
-
- Diamond type_check(graph(), mcgraph()->common(), check,
- BranchHint::kTrue);
- type_check.Chain(control());
- SetControl(type_check.if_false);
-
- Node* old_effect = effect();
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
- nullptr, 0);
-
- SetEffectControl(type_check.EffectPhi(old_effect, effect()),
- type_check.merge);
-
- return input;
+ case wasm::ValueType::kRef:
+ case wasm::ValueType::kOptRef: {
+ switch (type.heap_type()) {
+ case wasm::kHeapExtern:
+ case wasm::kHeapExn:
+ return input;
+ case wasm::kHeapFunc: {
+ Node* check =
+ BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
+ Runtime::kWasmIsValidFuncRefValue, js_context, &input, 1)));
+
+ Diamond type_check(graph(), mcgraph()->common(), check,
+ BranchHint::kTrue);
+ type_check.Chain(control());
+ SetControl(type_check.if_false);
+
+ Node* old_effect = effect();
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError,
+ js_context, nullptr, 0);
+
+ SetEffectControl(type_check.EffectPhi(old_effect, effect()),
+ type_check.merge);
+
+ return input;
+ }
+ default:
+ UNREACHABLE();
+ }
}
-
case wasm::ValueType::kF32:
return graph()->NewNode(
mcgraph()->machine()->TruncateFloat64ToFloat32(),
@@ -5580,7 +5709,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
DCHECK(enabled_features_.has_bigint());
return BuildChangeBigIntToInt64(input, js_context);
- default:
+ case wasm::ValueType::kRtt: // TODO(7748): Implement.
+ case wasm::ValueType::kS128:
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
+ case wasm::ValueType::kBottom:
+ case wasm::ValueType::kStmt:
UNREACHABLE();
break;
}
@@ -6471,8 +6605,8 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
const wasm::FunctionSig* sig) {
DCHECK_EQ(1, sig->return_count());
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "CompileWasmMathIntrinsic");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.CompileWasmMathIntrinsic");
Zone zone(wasm_engine->allocator(), ZONE_NAME);
@@ -6545,8 +6679,8 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
return CompileWasmMathIntrinsic(wasm_engine, kind, sig);
}
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "CompileWasmImportCallWrapper");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.CompileWasmImportCallWrapper");
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -6588,7 +6722,8 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
wasm::NativeModule* native_module,
const wasm::FunctionSig* sig,
Address address) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "CompileWasmCapiFunction");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.CompileWasmCapiFunction");
Zone zone(wasm_engine->allocator(), ZONE_NAME);
@@ -6683,8 +6818,7 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
return code;
}
-MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate,
- const wasm::FunctionSig* sig) {
+Handle<Code> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig) {
std::unique_ptr<Zone> zone =
std::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
Graph* graph = new (zone.get()) Graph(zone.get());
@@ -6729,14 +6863,11 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate,
Code::C_WASM_ENTRY, std::move(name_buffer),
AssemblerOptions::Default(isolate)));
- if (job->ExecuteJob(isolate->counters()->runtime_call_stats()) ==
- CompilationJob::FAILED ||
- job->FinalizeJob(isolate) == CompilationJob::FAILED) {
- return {};
- }
- Handle<Code> code = job->compilation_info()->code();
+ CHECK_NE(job->ExecuteJob(isolate->counters()->runtime_call_stats()),
+ CompilationJob::FAILED);
+ CHECK_NE(job->FinalizeJob(isolate), CompilationJob::FAILED);
- return code;
+ return job->compilation_info()->code();
}
namespace {
@@ -6799,9 +6930,9 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
wasm::WasmEngine* wasm_engine, wasm::CompilationEnv* env,
const wasm::FunctionBody& func_body, int func_index, Counters* counters,
wasm::WasmFeatures* detected) {
- TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "ExecuteTurbofanCompilation", "func_index", func_index,
- "body_size", func_body.end - func_body.start);
+ TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.CompileTopTier", "func_index", func_index, "body_size",
+ func_body.end - func_body.start);
Zone zone(wasm_engine->allocator(), ZONE_NAME);
MachineGraph* mcgraph = new (&zone) MachineGraph(
new (&zone) Graph(&zone), new (&zone) CommonOperatorBuilder(&zone),
@@ -6813,18 +6944,17 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
OptimizedCompilationInfo info(GetDebugName(&zone, func_index), &zone,
Code::WASM_FUNCTION);
if (env->runtime_exception_support) {
- info.SetWasmRuntimeExceptionSupport();
+ info.set_wasm_runtime_exception_support();
}
- if (info.trace_turbo_json_enabled()) {
+ if (info.trace_turbo_json()) {
TurboCfgFile tcf;
tcf << AsC1VCompilation(&info);
}
- NodeOriginTable* node_origins = info.trace_turbo_json_enabled()
- ? new (&zone)
- NodeOriginTable(mcgraph->graph())
- : nullptr;
+ NodeOriginTable* node_origins =
+ info.trace_turbo_json() ? new (&zone) NodeOriginTable(mcgraph->graph())
+ : nullptr;
SourcePositionTable* source_positions =
new (mcgraph->zone()) SourcePositionTable(mcgraph->graph());
if (!BuildGraphForWasmFunction(wasm_engine->allocator(), env, func_body,
@@ -6976,18 +7106,19 @@ CallDescriptor* GetWasmCallDescriptor(
CallDescriptor::Flags flags =
use_retpoline ? CallDescriptor::kRetpoline : CallDescriptor::kNoFlags;
- return new (zone) CallDescriptor( // --
- descriptor_kind, // kind
- target_type, // target MachineType
- target_loc, // target location
- locations.Build(), // location_sig
- parameter_slots, // stack_parameter_count
- compiler::Operator::kNoProperties, // properties
- kCalleeSaveRegisters, // callee-saved registers
- kCalleeSaveFPRegisters, // callee-saved fp regs
- flags, // flags
- "wasm-call", // debug name
- 0, // allocatable registers
+ return new (zone) CallDescriptor( // --
+ descriptor_kind, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ parameter_slots, // stack_parameter_count
+ compiler::Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ flags, // flags
+ "wasm-call", // debug name
+ StackArgumentOrder::kDefault, // order of the arguments in the stack
+ 0, // allocatable registers
rets.NumStackSlots() - parameter_slots); // stack_return_count
}
@@ -7065,6 +7196,7 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
call_descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
call_descriptor->flags(), // flags
call_descriptor->debug_name(), // debug name
+ call_descriptor->GetStackArgumentOrder(), // stack order
call_descriptor->AllocatableRegisters(), // allocatable registers
rets.NumStackSlots() - params.NumStackSlots()); // stack_return_count
}
diff --git a/chromium/v8/src/compiler/wasm-compiler.h b/chromium/v8/src/compiler/wasm-compiler.h
index 6d662e674d8..d72c2bcab5f 100644
--- a/chromium/v8/src/compiler/wasm-compiler.h
+++ b/chromium/v8/src/compiler/wasm-compiler.h
@@ -138,7 +138,8 @@ enum CWasmEntryParameters {
// Compiles a stub with C++ linkage, to be called from Execution::CallWasm,
// which knows how to feed it its parameters.
-MaybeHandle<Code> CompileCWasmEntry(Isolate*, const wasm::FunctionSig*);
+V8_EXPORT_PRIVATE Handle<Code> CompileCWasmEntry(Isolate*,
+ const wasm::FunctionSig*);
// Values from the instance object are cached between Wasm-level function calls.
// This struct allows the SSA environment handling this cache to be defined
@@ -250,6 +251,10 @@ class WasmGraphBuilder {
Node* arr[] = {fst, more...};
return Return(ArrayVector(arr));
}
+
+ Node* TraceFunctionEntry(wasm::WasmCodePosition position);
+ Node* TraceFunctionExit(Vector<Node*> vals, wasm::WasmCodePosition position);
+
Node* Trap(wasm::TrapReason reason, wasm::WasmCodePosition position);
Node* CallDirect(uint32_t index, Vector<Node*> args, Vector<Node*> rets,
@@ -284,9 +289,10 @@ class WasmGraphBuilder {
uint32_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
#if defined(V8_TARGET_BIG_ENDIAN) || defined(V8_TARGET_ARCH_S390_LE_SIM)
- Node* LoadTransformBigEndian(MachineType memtype,
+ Node* LoadTransformBigEndian(wasm::ValueType type, MachineType memtype,
wasm::LoadTransformationKind transform,
- Node* value);
+ Node* index, uint32_t offset, uint32_t alignment,
+ wasm::WasmCodePosition position);
#endif
Node* LoadTransform(wasm::ValueType type, MachineType memtype,
wasm::LoadTransformationKind transform, Node* index,
@@ -316,7 +322,7 @@ class WasmGraphBuilder {
void GetGlobalBaseAndOffset(MachineType mem_type, const wasm::WasmGlobal&,
Node** base_node, Node** offset_node);
- void GetBaseAndOffsetForImportedMutableAnyRefGlobal(
+ void GetBaseAndOffsetForImportedMutableExternRefGlobal(
const wasm::WasmGlobal& global, Node** base, Node** offset);
// Utilities to manipulate sets of instance cache nodes.
@@ -378,7 +384,7 @@ class WasmGraphBuilder {
Node* StructNew(uint32_t struct_index, const wasm::StructType* type,
Vector<Node*> fields);
Node* StructGet(Node* struct_object, const wasm::StructType* struct_type,
- uint32_t field_index, CheckForNull null_check,
+ uint32_t field_index, CheckForNull null_check, bool is_signed,
wasm::WasmCodePosition position);
Node* StructSet(Node* struct_object, const wasm::StructType* struct_type,
uint32_t field_index, Node* value, CheckForNull null_check,
@@ -387,10 +393,11 @@ class WasmGraphBuilder {
Node* length, Node* initial_value);
void BoundsCheck(Node* array, Node* index, wasm::WasmCodePosition position);
Node* ArrayGet(Node* array_object, const wasm::ArrayType* type, Node* index,
- wasm::WasmCodePosition position);
+ bool is_signed, wasm::WasmCodePosition position);
Node* ArraySet(Node* array_object, const wasm::ArrayType* type, Node* index,
Node* value, wasm::WasmCodePosition position);
Node* ArrayLen(Node* array_object, wasm::WasmCodePosition position);
+ Node* RttCanon(uint32_t type_index);
bool has_simd() const { return has_simd_; }
@@ -547,6 +554,11 @@ class WasmGraphBuilder {
Node* BuildAsmjsLoadMem(MachineType type, Node* index);
Node* BuildAsmjsStoreMem(MachineType type, Node* index, Node* val);
+ // Wasm SIMD.
+ Node* BuildF32x4Ceil(Node* input);
+ Node* BuildF32x4Floor(Node* input);
+ Node* BuildF32x4Trunc(Node* input);
+
void BuildEncodeException32BitValue(Node* values_array, uint32_t* index,
Node* value);
Node* BuildDecodeException32BitValue(Node* values_array, uint32_t* index);
diff --git a/chromium/v8/src/d8/cov.cc b/chromium/v8/src/d8/cov.cc
new file mode 100644
index 00000000000..47e2af599c0
--- /dev/null
+++ b/chromium/v8/src/d8/cov.cc
@@ -0,0 +1,74 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/d8/cov.h"
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#define SHM_SIZE 0x100000
+#define MAX_EDGES ((SHM_SIZE - 4) * 8)
+
+struct shmem_data {
+ uint32_t num_edges;
+ unsigned char edges[];
+};
+
+struct shmem_data* shmem;
+
+uint32_t *__edges_start, *__edges_stop;
+void __sanitizer_cov_reset_edgeguards() {
+ uint32_t N = 0;
+ for (uint32_t* x = __edges_start; x < __edges_stop && N < MAX_EDGES; x++)
+ *x = ++N;
+}
+
+extern "C" void __sanitizer_cov_trace_pc_guard_init(uint32_t* start,
+ uint32_t* stop) {
+ // Map the shared memory region
+ const char* shm_key = getenv("SHM_ID");
+ if (!shm_key) {
+ puts("[COV] no shared memory bitmap available, skipping");
+ shmem = (struct shmem_data*)malloc(SHM_SIZE);
+ } else {
+ int fd = shm_open(shm_key, O_RDWR, S_IREAD | S_IWRITE);
+ if (fd <= -1) {
+ fprintf(stderr, "[COV] Failed to open shared memory region\n");
+ _exit(-1);
+ }
+
+ shmem = (struct shmem_data*)mmap(0, SHM_SIZE, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ if (shmem == MAP_FAILED) {
+ fprintf(stderr, "[COV] Failed to mmap shared memory region\n");
+ _exit(-1);
+ }
+ }
+
+ __edges_start = start;
+ __edges_stop = stop;
+ __sanitizer_cov_reset_edgeguards();
+
+ shmem->num_edges = static_cast<uint32_t>(stop - start);
+ printf("[COV] edge counters initialized. Shared memory: %s with %u edges\n",
+ shm_key, shmem->num_edges);
+}
+
+extern "C" void __sanitizer_cov_trace_pc_guard(uint32_t* guard) {
+ // There's a small race condition here: if this function executes in two
+ // threads for the same edge at the same time, the first thread might disable
+ // the edge (by setting the guard to zero) before the second thread fetches
+ // the guard value (and thus the index). However, our instrumentation ignores
+ // the first edge (see libcoverage.c) and so the race is unproblematic.
+ uint32_t index = *guard;
+ shmem->edges[index / 8] |= 1 << (index % 8);
+ *guard = 0;
+}
diff --git a/chromium/v8/src/d8/cov.h b/chromium/v8/src/d8/cov.h
new file mode 100644
index 00000000000..d2d26752337
--- /dev/null
+++ b/chromium/v8/src/d8/cov.h
@@ -0,0 +1,15 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_D8_COV_H_
+#define V8_D8_COV_H_
+
+// This file is defining functions to handle coverage which are needed for
+// fuzzilli fuzzer It communicates coverage bitmap with fuzzilli through shared
+// memory
+// https://clang.llvm.org/docs/SanitizerCoverage.html
+
+void __sanitizer_cov_reset_edgeguards();
+
+#endif // V8_D8_COV_H_
diff --git a/chromium/v8/src/d8/d8-platforms.cc b/chromium/v8/src/d8/d8-platforms.cc
index 5ea43b14760..8f528e2d43c 100644
--- a/chromium/v8/src/d8/d8-platforms.cc
+++ b/chromium/v8/src/d8/d8-platforms.cc
@@ -102,7 +102,7 @@ class DelayedTasksPlatform : public Platform {
DCHECK_NOT_NULL(platform_);
}
- ~DelayedTasksPlatform() {
+ ~DelayedTasksPlatform() override {
// When the platform shuts down, all task runners must be freed.
DCHECK_EQ(0, delayed_task_runners_.size());
}
diff --git a/chromium/v8/src/d8/d8-posix.cc b/chromium/v8/src/d8/d8-posix.cc
index 1c9f506641c..ba6356e4479 100644
--- a/chromium/v8/src/d8/d8-posix.cc
+++ b/chromium/v8/src/d8/d8-posix.cc
@@ -761,20 +761,18 @@ char* Shell::ReadCharsFromTcpPort(const char* name, int* size_out) {
void Shell::AddOSMethods(Isolate* isolate, Local<ObjectTemplate> os_templ) {
if (options.enable_os_system) {
- os_templ->Set(String::NewFromUtf8Literal(isolate, "system"),
- FunctionTemplate::New(isolate, System));
+ os_templ->Set(isolate, "system", FunctionTemplate::New(isolate, System));
}
- os_templ->Set(String::NewFromUtf8Literal(isolate, "chdir"),
+ os_templ->Set(isolate, "chdir",
FunctionTemplate::New(isolate, ChangeDirectory));
- os_templ->Set(String::NewFromUtf8Literal(isolate, "setenv"),
+ os_templ->Set(isolate, "setenv",
FunctionTemplate::New(isolate, SetEnvironment));
- os_templ->Set(String::NewFromUtf8Literal(isolate, "unsetenv"),
+ os_templ->Set(isolate, "unsetenv",
FunctionTemplate::New(isolate, UnsetEnvironment));
- os_templ->Set(String::NewFromUtf8Literal(isolate, "umask"),
- FunctionTemplate::New(isolate, SetUMask));
- os_templ->Set(String::NewFromUtf8Literal(isolate, "mkdirp"),
+ os_templ->Set(isolate, "umask", FunctionTemplate::New(isolate, SetUMask));
+ os_templ->Set(isolate, "mkdirp",
FunctionTemplate::New(isolate, MakeDirectory));
- os_templ->Set(String::NewFromUtf8Literal(isolate, "rmdir"),
+ os_templ->Set(isolate, "rmdir",
FunctionTemplate::New(isolate, RemoveDirectory));
}
diff --git a/chromium/v8/src/d8/d8.cc b/chromium/v8/src/d8/d8.cc
index fe1bb58e4a9..117df1cc526 100644
--- a/chromium/v8/src/d8/d8.cc
+++ b/chromium/v8/src/d8/d8.cc
@@ -12,6 +12,7 @@
#include <iomanip>
#include <iterator>
#include <string>
+#include <tuple>
#include <unordered_map>
#include <utility>
#include <vector>
@@ -35,7 +36,6 @@
#include "src/d8/d8.h"
#include "src/debug/debug-interface.h"
#include "src/deoptimizer/deoptimizer.h"
-#include "src/diagnostics/basic-block-profiler.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/maybe-handles.h"
#include "src/init/v8.h"
@@ -55,6 +55,10 @@
#include "src/utils/utils.h"
#include "src/wasm/wasm-engine.h"
+#ifdef V8_FUZZILLI
+#include "src/d8/cov.h"
+#endif // V8_FUZZILLI
+
#ifdef V8_USE_PERFETTO
#include "perfetto/tracing.h"
#endif // V8_USE_PERFETTO
@@ -92,6 +96,19 @@ namespace {
const int kMB = 1024 * 1024;
+#ifdef V8_FUZZILLI
+// REPRL = read-eval-print-loop
+// These file descriptors are being opened when Fuzzilli uses fork & execve to
+// run V8.
+#define REPRL_CRFD 100 // Control read file decriptor
+#define REPRL_CWFD 101 // Control write file decriptor
+#define REPRL_DRFD 102 // Data read file decriptor
+#define REPRL_DWFD 103 // Data write file decriptor
+bool fuzzilli_reprl = true;
+#else
+bool fuzzilli_reprl = false;
+#endif // V8_FUZZILLI
+
const int kMaxSerializerMemoryUsage =
1 * kMB; // Arbitrary maximum for testing.
@@ -417,7 +434,7 @@ static platform::tracing::TraceConfig* CreateTraceConfigFromJSON(
class ExternalOwningOneByteStringResource
: public String::ExternalOneByteStringResource {
public:
- ExternalOwningOneByteStringResource() {}
+ ExternalOwningOneByteStringResource() = default;
ExternalOwningOneByteStringResource(
std::unique_ptr<base::OS::MemoryMappedFile> file)
: file_(std::move(file)) {}
@@ -444,9 +461,11 @@ std::unordered_set<std::shared_ptr<Worker>> Shell::running_workers_;
std::atomic<bool> Shell::script_executed_{false};
base::LazyMutex Shell::isolate_status_lock_;
std::map<v8::Isolate*, bool> Shell::isolate_status_;
+std::map<v8::Isolate*, int> Shell::isolate_running_streaming_tasks_;
base::LazyMutex Shell::cached_code_mutex_;
std::map<std::string, std::unique_ptr<ScriptCompiler::CachedData>>
Shell::cached_code_map_;
+std::atomic<int> Shell::unhandled_promise_rejections_{0};
Global<Context> Shell::evaluation_context_;
ArrayBuffer::Allocator* Shell::array_buffer_allocator;
@@ -486,6 +505,61 @@ void Shell::StoreInCodeCache(Isolate* isolate, Local<Value> source,
ScriptCompiler::CachedData::BufferOwned));
}
+// Dummy external source stream which returns the whole source in one go.
+// TODO(leszeks): Also test chunking the data.
+class DummySourceStream : public v8::ScriptCompiler::ExternalSourceStream {
+ public:
+ explicit DummySourceStream(Local<String> source) : done_(false) {
+ source_buffer_ = Utils::OpenHandle(*source)->ToCString(
+ i::ALLOW_NULLS, i::FAST_STRING_TRAVERSAL, &source_length_);
+ }
+
+ size_t GetMoreData(const uint8_t** src) override {
+ if (done_) {
+ return 0;
+ }
+ *src = reinterpret_cast<uint8_t*>(source_buffer_.release());
+ done_ = true;
+
+ return source_length_;
+ }
+
+ private:
+ int source_length_;
+ std::unique_ptr<char[]> source_buffer_;
+ bool done_;
+};
+
+class StreamingCompileTask final : public v8::Task {
+ public:
+ StreamingCompileTask(Isolate* isolate,
+ v8::ScriptCompiler::StreamedSource* streamed_source)
+ : isolate_(isolate),
+ script_streaming_task_(v8::ScriptCompiler::StartStreamingScript(
+ isolate, streamed_source)) {
+ Shell::NotifyStartStreamingTask(isolate_);
+ }
+
+ void Run() override {
+ script_streaming_task_->Run();
+ // Signal that the task has finished using the task runner to wake the
+ // message loop.
+ Shell::PostForegroundTask(isolate_, std::make_unique<FinishTask>(isolate_));
+ }
+
+ private:
+ class FinishTask final : public v8::Task {
+ public:
+ explicit FinishTask(Isolate* isolate) : isolate_(isolate) {}
+ void Run() final { Shell::NotifyFinishStreamingTask(isolate_); }
+ Isolate* isolate_;
+ };
+
+ Isolate* isolate_;
+ std::unique_ptr<v8::ScriptCompiler::ScriptStreamingTask>
+ script_streaming_task_;
+};
+
// Executes a string within the current v8 context.
bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Value> name, PrintResult print_result,
@@ -512,7 +586,12 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
i::Handle<i::Script> script = parse_info.CreateScript(
i_isolate, str, i::kNullMaybeHandle, ScriptOriginOptions());
- if (!i::parsing::ParseProgram(&parse_info, script, i_isolate)) {
+ if (!i::parsing::ParseProgram(&parse_info, script, i_isolate,
+ i::parsing::ReportStatisticsMode::kYes)) {
+ parse_info.pending_error_handler()->PrepareErrors(
+ i_isolate, parse_info.ast_value_factory());
+ parse_info.pending_error_handler()->ReportErrors(i_isolate, script);
+
fprintf(stderr, "Failed parsing\n");
return false;
}
@@ -547,6 +626,19 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
maybe_script = ScriptCompiler::Compile(
context, &script_source, ScriptCompiler::kNoCompileOptions);
}
+ } else if (options.streaming_compile) {
+ v8::ScriptCompiler::StreamedSource streamed_source(
+ std::make_unique<DummySourceStream>(source),
+ v8::ScriptCompiler::StreamedSource::UTF8);
+
+ PostBlockingBackgroundTask(
+ std::make_unique<StreamingCompileTask>(isolate, &streamed_source));
+
+ // Pump the loop until the streaming task completes.
+ Shell::CompleteMessageLoop(isolate);
+
+ maybe_script =
+ ScriptCompiler::Compile(context, &streamed_source, source, origin);
} else {
ScriptCompiler::Source script_source(source, origin);
maybe_script = ScriptCompiler::Compile(context, &script_source,
@@ -575,7 +667,10 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
StoreInCodeCache(isolate, source, cached_data);
delete cached_data;
}
- if (process_message_queue && !EmptyMessageQueues(isolate)) success = false;
+ if (process_message_queue) {
+ if (!EmptyMessageQueues(isolate)) success = false;
+ if (!HandleUnhandledPromiseRejections(isolate)) success = false;
+ }
data->realm_current_ = data->realm_switch_;
}
Local<Value> result;
@@ -1071,6 +1166,45 @@ MaybeLocal<Context> PerIsolateData::GetTimeoutContext() {
return result;
}
+void PerIsolateData::RemoveUnhandledPromise(Local<Promise> promise) {
+ // Remove handled promises from the list
+ DCHECK_EQ(promise->GetIsolate(), isolate_);
+ for (auto it = unhandled_promises_.begin(); it != unhandled_promises_.end();
+ ++it) {
+ v8::Local<v8::Promise> unhandled_promise = std::get<0>(*it).Get(isolate_);
+ if (unhandled_promise == promise) {
+ unhandled_promises_.erase(it--);
+ }
+ }
+}
+
+void PerIsolateData::AddUnhandledPromise(Local<Promise> promise,
+ Local<Message> message,
+ Local<Value> exception) {
+ DCHECK_EQ(promise->GetIsolate(), isolate_);
+ unhandled_promises_.push_back(
+ std::make_tuple(v8::Global<v8::Promise>(isolate_, promise),
+ v8::Global<v8::Message>(isolate_, message),
+ v8::Global<v8::Value>(isolate_, exception)));
+}
+
+size_t PerIsolateData::GetUnhandledPromiseCount() {
+ return unhandled_promises_.size();
+}
+
+int PerIsolateData::HandleUnhandledPromiseRejections() {
+ int unhandled_promises_count = 0;
+ v8::HandleScope scope(isolate_);
+ for (auto& tuple : unhandled_promises_) {
+ Local<v8::Message> message = std::get<1>(tuple).Get(isolate_);
+ Local<v8::Value> value = std::get<2>(tuple).Get(isolate_);
+ Shell::ReportException(isolate_, message, value);
+ unhandled_promises_count++;
+ }
+ unhandled_promises_.clear();
+ return unhandled_promises_count;
+}
+
PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realm_count_ = 1;
data_->realm_current_ = 0;
@@ -1326,8 +1460,10 @@ void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Invalid argument");
return;
}
+ ScriptOrigin origin(String::NewFromUtf8Literal(isolate, "(d8)",
+ NewStringType::kInternalized));
ScriptCompiler::Source script_source(
- args[1]->ToString(isolate->GetCurrentContext()).ToLocalChecked());
+ args[1]->ToString(isolate->GetCurrentContext()).ToLocalChecked(), origin);
Local<UnboundScript> script;
if (!ScriptCompiler::CompileUnboundScript(isolate, &script_source)
.ToLocal(&script)) {
@@ -1694,6 +1830,57 @@ void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
.ToLocalChecked());
}
+#ifdef V8_FUZZILLI
+
+// We have to assume that the fuzzer will be able to call this function e.g. by
+// enumerating the properties of the global object and eval'ing them. As such
+// this function is implemented in a way that requires passing some magic value
+// as first argument (with the idea being that the fuzzer won't be able to
+// generate this value) which then also acts as a selector for the operation
+// to perform.
+void Shell::Fuzzilli(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope handle_scope(args.GetIsolate());
+
+ String::Utf8Value operation(args.GetIsolate(), args[0]);
+ if (*operation == nullptr) {
+ return;
+ }
+
+ if (strcmp(*operation, "FUZZILLI_CRASH") == 0) {
+ auto arg = args[1]
+ ->Int32Value(args.GetIsolate()->GetCurrentContext())
+ .FromMaybe(0);
+ switch (arg) {
+ case 0:
+ V8_IMMEDIATE_CRASH();
+ break;
+ case 1:
+ CHECK(0);
+ break;
+ default:
+ DCHECK(false);
+ break;
+ }
+ } else if (strcmp(*operation, "FUZZILLI_PRINT") == 0) {
+ static FILE* fzliout = fdopen(REPRL_DWFD, "w");
+ if (!fzliout) {
+ fprintf(
+ stderr,
+ "Fuzzer output channel not available, printing to stdout instead\n");
+ fzliout = stdout;
+ }
+
+ String::Utf8Value string(args.GetIsolate(), args[1]);
+ if (*string == nullptr) {
+ return;
+ }
+ fprintf(fzliout, "%s\n", *string);
+ fflush(fzliout);
+ }
+}
+
+#endif // V8_FUZZILLI
+
void Shell::ReportException(Isolate* isolate, Local<v8::Message> message,
Local<v8::Value> exception_obj) {
HandleScope handle_scope(isolate);
@@ -1958,6 +2145,13 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
AddOSMethods(isolate, os_templ);
global_template->Set(isolate, "os", os_templ);
+#ifdef V8_FUZZILLI
+ global_template->Set(
+ String::NewFromUtf8(isolate, "fuzzilli", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, Fuzzilli), PropertyAttribute::DontEnum);
+#endif // V8_FUZZILLI
+
if (i::FLAG_expose_async_hooks) {
Local<ObjectTemplate> async_hooks_templ = ObjectTemplate::New(isolate);
async_hooks_templ->Set(
@@ -2009,8 +2203,50 @@ static void PrintMessageCallback(Local<Message> message, Local<Value> error) {
printf("%s:%i: %s\n", filename_string, linenum, msg_string);
}
+void Shell::PromiseRejectCallback(v8::PromiseRejectMessage data) {
+ if (options.ignore_unhandled_promises) return;
+ if (data.GetEvent() == v8::kPromiseRejectAfterResolved ||
+ data.GetEvent() == v8::kPromiseResolveAfterResolved) {
+ // Ignore reject/resolve after resolved.
+ return;
+ }
+ v8::Local<v8::Promise> promise = data.GetPromise();
+ v8::Isolate* isolate = promise->GetIsolate();
+ PerIsolateData* isolate_data = PerIsolateData::Get(isolate);
+
+ if (data.GetEvent() == v8::kPromiseHandlerAddedAfterReject) {
+ isolate_data->RemoveUnhandledPromise(promise);
+ return;
+ }
+
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ bool capture_exceptions =
+ i_isolate->get_capture_stack_trace_for_uncaught_exceptions();
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
+ v8::Local<Value> exception = data.GetValue();
+ v8::Local<Message> message;
+ // Assume that all objects are stack-traces.
+ if (exception->IsObject()) {
+ message = v8::Exception::CreateMessage(isolate, exception);
+ }
+ if (!exception->IsNativeError() &&
+ (message.IsEmpty() || message->GetStackTrace().IsEmpty())) {
+ // If there is no real Error object, manually throw and catch a stack trace.
+ v8::TryCatch try_catch(isolate);
+ try_catch.SetVerbose(true);
+ isolate->ThrowException(v8::Exception::Error(
+ v8::String::NewFromUtf8Literal(isolate, "Unhandled Promise.")));
+ message = try_catch.Message();
+ exception = try_catch.Exception();
+ }
+ isolate->SetCaptureStackTraceForUncaughtExceptions(capture_exceptions);
+
+ isolate_data->AddUnhandledPromise(promise, message, exception);
+}
+
void Shell::Initialize(Isolate* isolate, D8Console* console,
bool isOnMainThread) {
+ isolate->SetPromiseRejectCallback(PromiseRejectCallback);
if (isOnMainThread) {
// Set up counters
if (i::FLAG_map_counters[0] != '\0') {
@@ -2029,6 +2265,19 @@ void Shell::Initialize(Isolate* isolate, D8Console* console,
isolate->SetHostInitializeImportMetaObjectCallback(
Shell::HostInitializeImportMetaObject);
+#ifdef V8_FUZZILLI
+ // Let the parent process (Fuzzilli) know we are ready.
+ char helo[] = "HELO";
+ if (write(REPRL_CWFD, helo, 4) != 4 || read(REPRL_CRFD, helo, 4) != 4) {
+ fuzzilli_reprl = false;
+ }
+
+ if (memcmp(helo, "HELO", 4) != 0) {
+ fprintf(stderr, "Invalid response from parent\n");
+ _exit(-1);
+ }
+#endif // V8_FUZZILLI
+
debug::SetConsoleDelegate(isolate, console);
}
@@ -2167,11 +2416,6 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
}
void Shell::OnExit(v8::Isolate* isolate) {
- // Dump basic block profiling data.
- if (i::FLAG_turbo_profiling) {
- i::BasicBlockProfiler* profiler = i::BasicBlockProfiler::Get();
- i::StdoutStream{} << *profiler;
- }
isolate->Dispose();
if (i::FLAG_dump_counters || i::FLAG_dump_counters_nvp) {
@@ -2518,6 +2762,36 @@ bool ends_with(const char* input, const char* suffix) {
bool SourceGroup::Execute(Isolate* isolate) {
bool success = true;
+#ifdef V8_FUZZILLI
+ HandleScope handle_scope(isolate);
+ Local<String> file_name =
+ String::NewFromUtf8(isolate, "fuzzcode.js", NewStringType::kNormal)
+ .ToLocalChecked();
+
+ size_t script_size;
+ CHECK_EQ(read(REPRL_CRFD, &script_size, 8), 8);
+ char* buffer = new char[script_size + 1];
+ char* ptr = buffer;
+ size_t remaining = script_size;
+ while (remaining > 0) {
+ ssize_t rv = read(REPRL_DRFD, ptr, remaining);
+ CHECK_GE(rv, 0);
+ remaining -= rv;
+ ptr += rv;
+ }
+ buffer[script_size] = 0;
+
+ Local<String> source =
+ String::NewFromUtf8(isolate, buffer, NewStringType::kNormal)
+ .ToLocalChecked();
+ delete[] buffer;
+ Shell::set_script_executed();
+ if (!Shell::ExecuteString(isolate, source, file_name, Shell::kNoPrintResult,
+ Shell::kReportExceptions,
+ Shell::kNoProcessMessageQueue)) {
+ return false;
+ }
+#endif // V8_FUZZILLI
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
@@ -2780,9 +3054,7 @@ void Worker::ExecuteInThread() {
in_semaphore_.Wait();
std::unique_ptr<SerializationData> data;
if (!in_queue_.Dequeue(&data)) continue;
- if (!data) {
- break;
- }
+ if (!data) break;
v8::TryCatch try_catch(isolate);
try_catch.SetVerbose(true);
HandleScope scope(isolate);
@@ -2795,6 +3067,7 @@ void Worker::ExecuteInThread() {
USE(result);
}
}
+ // TODO(cbruni): Check for unhandled promises here.
}
}
}
@@ -2895,6 +3168,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
// Ignore any -f flags for compatibility with other stand-alone
// JavaScript engines.
continue;
+ } else if (strcmp(argv[i], "--ignore-unhandled-promises") == 0) {
+ options.ignore_unhandled_promises = true;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--isolate") == 0) {
options.num_isolates++;
} else if (strcmp(argv[i], "--throws") == 0) {
@@ -2935,6 +3211,13 @@ bool Shell::SetOptions(int argc, char* argv[]) {
return false;
}
argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--streaming-compile") == 0) {
+ options.streaming_compile = true;
+ argv[i] = nullptr;
+ } else if ((strcmp(argv[i], "--no-streaming-compile") == 0) ||
+ (strcmp(argv[i], "--nostreaming-compile") == 0)) {
+ options.streaming_compile = false;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "--enable-tracing") == 0) {
options.trace_enabled = true;
argv[i] = nullptr;
@@ -3045,6 +3328,7 @@ int Shell::RunMain(Isolate* isolate, bool last_run) {
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
if (!options.isolate_sources[0].Execute(isolate)) success = false;
if (!CompleteMessageLoop(isolate)) success = false;
+ if (!HandleUnhandledPromiseRejections(isolate)) success = false;
}
if (!use_existing_context) {
DisposeModuleEmbedderData(context);
@@ -3072,6 +3356,11 @@ int Shell::RunMain(Isolate* isolate, bool last_run) {
}
}
WaitForRunningWorkers();
+ if (Shell::unhandled_promise_rejections_.load() > 0) {
+ printf("%i pending unhandled Promise rejection(s) detected.\n",
+ Shell::unhandled_promise_rejections_.load());
+ success = false;
+ }
// In order to finish successfully, success must be != expected_to_throw.
return success == Shell::options.expected_to_throw ? 1 : 0;
}
@@ -3093,11 +3382,20 @@ void Shell::CollectGarbage(Isolate* isolate) {
void Shell::SetWaitUntilDone(Isolate* isolate, bool value) {
base::MutexGuard guard(isolate_status_lock_.Pointer());
- if (isolate_status_.count(isolate) == 0) {
- isolate_status_.insert(std::make_pair(isolate, value));
- } else {
- isolate_status_[isolate] = value;
- }
+ isolate_status_[isolate] = value;
+}
+
+void Shell::NotifyStartStreamingTask(Isolate* isolate) {
+ DCHECK(options.streaming_compile);
+ base::MutexGuard guard(isolate_status_lock_.Pointer());
+ ++isolate_running_streaming_tasks_[isolate];
+}
+
+void Shell::NotifyFinishStreamingTask(Isolate* isolate) {
+ DCHECK(options.streaming_compile);
+ base::MutexGuard guard(isolate_status_lock_.Pointer());
+ --isolate_running_streaming_tasks_[isolate];
+ DCHECK_GE(isolate_running_streaming_tasks_[isolate], 0);
}
namespace {
@@ -3163,7 +3461,8 @@ bool Shell::CompleteMessageLoop(Isolate* isolate) {
i::wasm::WasmEngine* wasm_engine = i_isolate->wasm_engine();
bool should_wait = (options.wait_for_wasm &&
wasm_engine->HasRunningCompileJob(i_isolate)) ||
- isolate_status_[isolate];
+ isolate_status_[isolate] ||
+ isolate_running_streaming_tasks_[isolate] > 0;
return should_wait ? platform::MessageLoopBehavior::kWaitForWork
: platform::MessageLoopBehavior::kDoNotWait;
};
@@ -3175,6 +3474,24 @@ bool Shell::EmptyMessageQueues(Isolate* isolate) {
isolate, []() { return platform::MessageLoopBehavior::kDoNotWait; });
}
+void Shell::PostForegroundTask(Isolate* isolate, std::unique_ptr<Task> task) {
+ g_default_platform->GetForegroundTaskRunner(isolate)->PostTask(
+ std::move(task));
+}
+
+void Shell::PostBlockingBackgroundTask(std::unique_ptr<Task> task) {
+ g_default_platform->CallBlockingTaskOnWorkerThread(std::move(task));
+}
+
+bool Shell::HandleUnhandledPromiseRejections(Isolate* isolate) {
+ if (options.ignore_unhandled_promises) return true;
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ int count = data->HandleUnhandledPromiseRejections();
+ Shell::unhandled_promise_rejections_.store(
+ Shell::unhandled_promise_rejections_.load() + count);
+ return count == 0;
+}
+
class Serializer : public ValueSerializer::Delegate {
public:
explicit Serializer(Isolate* isolate)
@@ -3505,8 +3822,14 @@ int Shell::Main(int argc, char* argv[]) {
std::ofstream trace_file;
if (options.trace_enabled && !i::FLAG_verify_predictable) {
tracing = std::make_unique<platform::tracing::TracingController>();
- trace_file.open(options.trace_path ? options.trace_path : "v8_trace.json");
- DCHECK(trace_file.good());
+ const char* trace_path =
+ options.trace_path ? options.trace_path : "v8_trace.json";
+ trace_file.open(trace_path);
+ if (!trace_file.good()) {
+ printf("Cannot open trace file '%s' for writing: %s.\n", trace_path,
+ strerror(errno));
+ return 1;
+ }
#ifdef V8_USE_PERFETTO
// Set up the in-process backend that the tracing controller will connect
@@ -3611,112 +3934,140 @@ int Shell::Main(int argc, char* argv[]) {
Initialize(isolate, &console);
PerIsolateData data(isolate);
- if (options.trace_enabled) {
- platform::tracing::TraceConfig* trace_config;
- if (options.trace_config) {
- int size = 0;
- char* trace_config_json_str = ReadChars(options.trace_config, &size);
- trace_config =
- tracing::CreateTraceConfigFromJSON(isolate, trace_config_json_str);
- delete[] trace_config_json_str;
- } else {
- trace_config =
- platform::tracing::TraceConfig::CreateDefaultTraceConfig();
+ // Fuzzilli REPRL = read-eval-print-loop
+ do {
+#ifdef V8_FUZZILLI
+ if (fuzzilli_reprl) {
+ unsigned action = 0;
+ ssize_t nread = read(REPRL_CRFD, &action, 4);
+ if (nread != 4 || action != 'cexe') {
+ fprintf(stderr, "Unknown action: %u\n", action);
+ _exit(-1);
+ }
+ }
+#endif // V8_FUZZILLI
+
+ result = 0;
+
+ if (options.trace_enabled) {
+ platform::tracing::TraceConfig* trace_config;
+ if (options.trace_config) {
+ int size = 0;
+ char* trace_config_json_str = ReadChars(options.trace_config, &size);
+ trace_config = tracing::CreateTraceConfigFromJSON(
+ isolate, trace_config_json_str);
+ delete[] trace_config_json_str;
+ } else {
+ trace_config =
+ platform::tracing::TraceConfig::CreateDefaultTraceConfig();
+ }
+ tracing_controller->StartTracing(trace_config);
}
- tracing_controller->StartTracing(trace_config);
- }
-
- CpuProfiler* cpu_profiler;
- if (options.cpu_profiler) {
- cpu_profiler = CpuProfiler::New(isolate);
- CpuProfilingOptions profile_options;
- cpu_profiler->StartProfiling(String::Empty(isolate), profile_options);
- }
- if (options.stress_opt) {
- options.stress_runs = D8Testing::GetStressRuns();
- for (int i = 0; i < options.stress_runs && result == 0; i++) {
- printf("============ Stress %d/%d ============\n", i + 1,
- options.stress_runs);
- D8Testing::PrepareStressRun(i);
- bool last_run = i == options.stress_runs - 1;
- result = RunMain(isolate, last_run);
+ CpuProfiler* cpu_profiler;
+ if (options.cpu_profiler) {
+ cpu_profiler = CpuProfiler::New(isolate);
+ CpuProfilingOptions profile_options;
+ cpu_profiler->StartProfiling(String::Empty(isolate), profile_options);
}
- printf("======== Full Deoptimization =======\n");
- D8Testing::DeoptimizeAll(isolate);
- } else if (i::FLAG_stress_runs > 0) {
- options.stress_runs = i::FLAG_stress_runs;
- for (int i = 0; i < options.stress_runs && result == 0; i++) {
- printf("============ Run %d/%d ============\n", i + 1,
- options.stress_runs);
- bool last_run = i == options.stress_runs - 1;
+
+ if (options.stress_opt) {
+ options.stress_runs = D8Testing::GetStressRuns();
+ for (int i = 0; i < options.stress_runs && result == 0; i++) {
+ printf("============ Stress %d/%d ============\n", i + 1,
+ options.stress_runs);
+ D8Testing::PrepareStressRun(i);
+ bool last_run = i == options.stress_runs - 1;
+ result = RunMain(isolate, last_run);
+ }
+ printf("======== Full Deoptimization =======\n");
+ D8Testing::DeoptimizeAll(isolate);
+ } else if (i::FLAG_stress_runs > 0) {
+ options.stress_runs = i::FLAG_stress_runs;
+ for (int i = 0; i < options.stress_runs && result == 0; i++) {
+ printf("============ Run %d/%d ============\n", i + 1,
+ options.stress_runs);
+ bool last_run = i == options.stress_runs - 1;
+ result = RunMain(isolate, last_run);
+ }
+ } else if (options.code_cache_options !=
+ ShellOptions::CodeCacheOptions::kNoProduceCache) {
+ printf("============ Run: Produce code cache ============\n");
+ // First run to produce the cache
+ Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = Shell::array_buffer_allocator;
+ i::FLAG_hash_seed ^= 1337; // Use a different hash seed.
+ Isolate* isolate2 = Isolate::New(create_params);
+ i::FLAG_hash_seed ^= 1337; // Restore old hash seed.
+ {
+ D8Console console(isolate2);
+ Initialize(isolate2, &console);
+ PerIsolateData data(isolate2);
+ Isolate::Scope isolate_scope(isolate2);
+
+ result = RunMain(isolate2, false);
+ }
+ isolate2->Dispose();
+
+ // Change the options to consume cache
+ DCHECK(options.compile_options == v8::ScriptCompiler::kEagerCompile ||
+ options.compile_options ==
+ v8::ScriptCompiler::kNoCompileOptions);
+ options.compile_options = v8::ScriptCompiler::kConsumeCodeCache;
+ options.code_cache_options =
+ ShellOptions::CodeCacheOptions::kNoProduceCache;
+
+ printf("============ Run: Consume code cache ============\n");
+ // Second run to consume the cache in current isolate
+ result = RunMain(isolate, true);
+ options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
+ } else {
+ bool last_run = true;
result = RunMain(isolate, last_run);
}
- } else if (options.code_cache_options !=
- ShellOptions::CodeCacheOptions::kNoProduceCache) {
- printf("============ Run: Produce code cache ============\n");
- // First run to produce the cache
- Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = Shell::array_buffer_allocator;
- i::FLAG_hash_seed ^= 1337; // Use a different hash seed.
- Isolate* isolate2 = Isolate::New(create_params);
- i::FLAG_hash_seed ^= 1337; // Restore old hash seed.
- {
- D8Console console(isolate2);
- Initialize(isolate2, &console);
- PerIsolateData data(isolate2);
- Isolate::Scope isolate_scope(isolate2);
- result = RunMain(isolate2, false);
+ // Run interactive shell if explicitly requested or if no script has been
+ // executed, but never on --test
+ if (use_interactive_shell()) {
+ RunShell(isolate);
}
- isolate2->Dispose();
-
- // Change the options to consume cache
- DCHECK(options.compile_options == v8::ScriptCompiler::kEagerCompile ||
- options.compile_options == v8::ScriptCompiler::kNoCompileOptions);
- options.compile_options = v8::ScriptCompiler::kConsumeCodeCache;
- options.code_cache_options =
- ShellOptions::CodeCacheOptions::kNoProduceCache;
-
- printf("============ Run: Consume code cache ============\n");
- // Second run to consume the cache in current isolate
- result = RunMain(isolate, true);
- options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
- } else {
- bool last_run = true;
- result = RunMain(isolate, last_run);
- }
- // Run interactive shell if explicitly requested or if no script has been
- // executed, but never on --test
- if (use_interactive_shell()) {
- RunShell(isolate);
- }
-
- if (i::FLAG_trace_ignition_dispatches &&
- i::FLAG_trace_ignition_dispatches_output_file != nullptr) {
- WriteIgnitionDispatchCountersFile(isolate);
- }
+ if (i::FLAG_trace_ignition_dispatches &&
+ i::FLAG_trace_ignition_dispatches_output_file != nullptr) {
+ WriteIgnitionDispatchCountersFile(isolate);
+ }
- if (options.cpu_profiler) {
- CpuProfile* profile = cpu_profiler->StopProfiling(String::Empty(isolate));
- if (options.cpu_profiler_print) {
- const internal::ProfileNode* root =
- reinterpret_cast<const internal::ProfileNode*>(
- profile->GetTopDownRoot());
- root->Print(0);
+ if (options.cpu_profiler) {
+ CpuProfile* profile =
+ cpu_profiler->StopProfiling(String::Empty(isolate));
+ if (options.cpu_profiler_print) {
+ const internal::ProfileNode* root =
+ reinterpret_cast<const internal::ProfileNode*>(
+ profile->GetTopDownRoot());
+ root->Print(0);
+ }
+ profile->Delete();
+ cpu_profiler->Dispose();
}
- profile->Delete();
- cpu_profiler->Dispose();
- }
- // Shut down contexts and collect garbage.
- cached_code_map_.clear();
- evaluation_context_.Reset();
- stringify_function_.Reset();
- CollectGarbage(isolate);
+ // Shut down contexts and collect garbage.
+ cached_code_map_.clear();
+ evaluation_context_.Reset();
+ stringify_function_.Reset();
+ CollectGarbage(isolate);
+
+#ifdef V8_FUZZILLI
+ // Send result to parent (fuzzilli) and reset edge guards.
+ if (fuzzilli_reprl) {
+ int status = result << 8;
+ CHECK_EQ(write(REPRL_CWFD, &status, 4), 4);
+ __sanitizer_cov_reset_edgeguards();
+ }
+#endif // V8_FUZZILLI
+ } while (fuzzilli_reprl);
}
OnExit(isolate);
+
V8::Dispose();
V8::ShutdownPlatform();
diff --git a/chromium/v8/src/d8/d8.h b/chromium/v8/src/d8/d8.h
index bd49b81fd02..203e9edb0cb 100644
--- a/chromium/v8/src/d8/d8.h
+++ b/chromium/v8/src/d8/d8.h
@@ -234,6 +234,12 @@ class PerIsolateData {
AsyncHooks* GetAsyncHooks() { return async_hooks_wrapper_; }
+ void RemoveUnhandledPromise(Local<Promise> promise);
+ void AddUnhandledPromise(Local<Promise> promise, Local<Message> message,
+ Local<Value> exception);
+ int HandleUnhandledPromiseRejections();
+ size_t GetUnhandledPromiseCount();
+
private:
friend class Shell;
friend class RealmScope;
@@ -245,6 +251,8 @@ class PerIsolateData {
Global<Value> realm_shared_;
std::queue<Global<Function>> set_timeout_callbacks_;
std::queue<Global<Context>> set_timeout_contexts_;
+ std::vector<std::tuple<Global<Promise>, Global<Message>, Global<Value>>>
+ unhandled_promises_;
AsyncHooks* async_hooks_wrapper_;
int RealmIndexOrThrow(const v8::FunctionCallbackInfo<v8::Value>& args,
@@ -272,6 +280,7 @@ class ShellOptions {
bool interactive_shell = false;
bool test_shell = false;
bool expected_to_throw = false;
+ bool ignore_unhandled_promises = false;
bool mock_arraybuffer_allocator = false;
size_t mock_arraybuffer_allocator_limit = 0;
bool multi_mapped_mock_allocator = false;
@@ -280,6 +289,7 @@ class ShellOptions {
v8::ScriptCompiler::CompileOptions compile_options =
v8::ScriptCompiler::kNoCompileOptions;
CodeCacheOptions code_cache_options = CodeCacheOptions::kNoProduceCache;
+ bool streaming_compile = false;
SourceGroup* isolate_sources = nullptr;
const char* icu_data_file = nullptr;
const char* icu_locale = nullptr;
@@ -331,6 +341,11 @@ class Shell : public i::AllStatic {
static bool EmptyMessageQueues(Isolate* isolate);
static bool CompleteMessageLoop(Isolate* isolate);
+ static bool HandleUnhandledPromiseRejections(Isolate* isolate);
+
+ static void PostForegroundTask(Isolate* isolate, std::unique_ptr<Task> task);
+ static void PostBlockingBackgroundTask(std::unique_ptr<Task> task);
+
static std::unique_ptr<SerializationData> SerializeValue(
Isolate* isolate, Local<Value> value, Local<Value> transfer);
static MaybeLocal<Value> DeserializeValue(
@@ -434,6 +449,10 @@ class Shell : public i::AllStatic {
Local<Module> module,
Local<Object> meta);
+#ifdef V8_FUZZILLI
+ static void Fuzzilli(const v8::FunctionCallbackInfo<v8::Value>& args);
+#endif // V8_FUZZILLI
+
// Data is of type DynamicImportData*. We use void* here to be able
// to conform with MicrotaskCallback interface and enqueue this
// function in the microtask queue.
@@ -446,6 +465,8 @@ class Shell : public i::AllStatic {
static ArrayBuffer::Allocator* array_buffer_allocator;
static void SetWaitUntilDone(Isolate* isolate, bool value);
+ static void NotifyStartStreamingTask(Isolate* isolate);
+ static void NotifyFinishStreamingTask(Isolate* isolate);
static char* ReadCharsFromTcpPort(const char* name, int* size_out);
@@ -462,6 +483,8 @@ class Shell : public i::AllStatic {
static void Initialize(Isolate* isolate, D8Console* console,
bool isOnMainThread = true);
+ static void PromiseRejectCallback(v8::PromiseRejectMessage reject_message);
+
private:
static Global<Context> evaluation_context_;
static base::OnceType quit_once_;
@@ -506,10 +529,12 @@ class Shell : public i::AllStatic {
// the isolate_status_ needs to be concurrency-safe.
static base::LazyMutex isolate_status_lock_;
static std::map<Isolate*, bool> isolate_status_;
+ static std::map<Isolate*, int> isolate_running_streaming_tasks_;
static base::LazyMutex cached_code_mutex_;
static std::map<std::string, std::unique_ptr<ScriptCompiler::CachedData>>
cached_code_map_;
+ static std::atomic<int> unhandled_promise_rejections_;
};
} // namespace v8
diff --git a/chromium/v8/src/debug/debug-coverage.cc b/chromium/v8/src/debug/debug-coverage.cc
index 9b359fde36e..f4355d9a66c 100644
--- a/chromium/v8/src/debug/debug-coverage.cc
+++ b/chromium/v8/src/debug/debug-coverage.cc
@@ -793,7 +793,9 @@ void Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) {
}
for (Handle<JSFunction> func : funcs_needing_feedback_vector) {
- JSFunction::EnsureFeedbackVector(func);
+ IsCompiledScope is_compiled_scope(func->shared().is_compiled_scope());
+ CHECK(is_compiled_scope.is_compiled());
+ JSFunction::EnsureFeedbackVector(func, &is_compiled_scope);
}
// Root all feedback vectors to avoid early collection.
diff --git a/chromium/v8/src/debug/debug-evaluate.cc b/chromium/v8/src/debug/debug-evaluate.cc
index 473bac1bf99..21b709325fc 100644
--- a/chromium/v8/src/debug/debug-evaluate.cc
+++ b/chromium/v8/src/debug/debug-evaluate.cc
@@ -189,10 +189,10 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
// - Between the function scope and the native context, we only resolve
// variable names that are guaranteed to not be shadowed by stack-allocated
// variables. Contexts between the function context and the original
- // context have a blacklist attached to implement that.
+ // context have a blocklist attached to implement that.
// Context::Lookup has special handling for debug-evaluate contexts:
// - Look up in the materialized stack variables.
- // - Check the blacklist to find out whether to abort further lookup.
+ // - Check the blocklist to find out whether to abort further lookup.
// - Look up in the original context.
for (; !scope_iterator_.Done(); scope_iterator_.Next()) {
ScopeIterator::ScopeType scope_type = scope_iterator_.Type();
@@ -208,7 +208,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
context_chain_element.wrapped_context = scope_iterator_.CurrentContext();
}
if (!scope_iterator_.InInnerScope()) {
- context_chain_element.blacklist = scope_iterator_.GetLocals();
+ context_chain_element.blocklist = scope_iterator_.GetLocals();
}
context_chain_.push_back(context_chain_element);
}
@@ -224,7 +224,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
scope_info->SetIsDebugEvaluateScope();
evaluation_context_ = factory->NewDebugEvaluateContext(
evaluation_context_, scope_info, element.materialized_object,
- element.wrapped_context, element.blacklist);
+ element.wrapped_context, element.blocklist);
}
}
@@ -254,7 +254,7 @@ namespace {
bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
// Use macro to include only the non-inlined version of an intrinsic.
-#define INTRINSIC_WHITELIST(V) \
+#define INTRINSIC_ALLOWLIST(V) \
/* Conversions */ \
V(NumberToStringSlow) \
V(ToBigInt) \
@@ -357,8 +357,8 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(OptimizeOsr) \
V(UnblockConcurrentRecompilation)
-// Intrinsics with inline versions have to be whitelisted here a second time.
-#define INLINE_INTRINSIC_WHITELIST(V) \
+// Intrinsics with inline versions have to be allowlisted here a second time.
+#define INLINE_INTRINSIC_ALLOWLIST(V) \
V(Call) \
V(IsJSReceiver) \
V(AsyncFunctionEnter) \
@@ -368,8 +368,8 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
#define CASE(Name) case Runtime::k##Name:
#define INLINE_CASE(Name) case Runtime::kInline##Name:
switch (id) {
- INTRINSIC_WHITELIST(CASE)
- INLINE_INTRINSIC_WHITELIST(INLINE_CASE)
+ INTRINSIC_ALLOWLIST(CASE)
+ INLINE_INTRINSIC_ALLOWLIST(INLINE_CASE)
return true;
default:
if (FLAG_trace_side_effect_free_debug_evaluate) {
@@ -381,8 +381,8 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
#undef CASE
#undef INLINE_CASE
-#undef INTRINSIC_WHITELIST
-#undef INLINE_INTRINSIC_WHITELIST
+#undef INTRINSIC_ALLOWLIST
+#undef INLINE_INTRINSIC_ALLOWLIST
}
bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
@@ -393,7 +393,7 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
if (Bytecodes::IsJumpIfToBoolean(bytecode)) return true;
if (Bytecodes::IsPrefixScalingBytecode(bytecode)) return true;
switch (bytecode) {
- // Whitelist for bytecodes.
+ // Allowlist for bytecodes.
// Loads.
case Bytecode::kLdaLookupSlot:
case Bytecode::kLdaGlobal:
@@ -496,7 +496,7 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
switch (id) {
- // Whitelist for builtins.
+ // Allowlist for builtins.
// Object builtins.
case Builtins::kObjectConstructor:
case Builtins::kObjectCreate:
@@ -672,7 +672,6 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kMathMax:
case Builtins::kMathMin:
case Builtins::kMathPow:
- case Builtins::kMathRandom:
case Builtins::kMathRound:
case Builtins::kMathSign:
case Builtins::kMathSin:
@@ -862,7 +861,7 @@ DebugInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
DCHECK(info->is_compiled());
DCHECK(!info->needs_script_context());
if (info->HasBytecodeArray()) {
- // Check bytecodes against whitelist.
+ // Check bytecodes against allowlist.
Handle<BytecodeArray> bytecode_array(info->GetBytecodeArray(), isolate);
if (FLAG_trace_side_effect_free_debug_evaluate) {
bytecode_array->Print();
@@ -892,7 +891,7 @@ DebugInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
interpreter::Bytecodes::ToString(bytecode));
}
- // Did not match whitelist.
+ // Did not match allowlist.
return DebugInfo::kHasSideEffects;
}
return requires_runtime_checks ? DebugInfo::kRequiresRuntimeChecks
@@ -904,7 +903,7 @@ DebugInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
: DebugInfo::kHasSideEffects;
}
} else {
- // Check built-ins against whitelist.
+ // Check built-ins against allowlist.
int builtin_index =
info->HasBuiltinId() ? info->builtin_id() : Builtins::kNoBuiltinId;
if (!Builtins::IsBuiltinId(builtin_index))
@@ -1054,7 +1053,7 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
sanity_check = true;
continue;
}
- PrintF("Whitelisted builtin %s calls non-whitelisted builtin %s\n",
+ PrintF("Allowlisted builtin %s calls non-allowlisted builtin %s\n",
Builtins::name(caller), Builtins::name(callee));
failed = true;
}
diff --git a/chromium/v8/src/debug/debug-evaluate.h b/chromium/v8/src/debug/debug-evaluate.h
index 516a0dac7c2..608466e4860 100644
--- a/chromium/v8/src/debug/debug-evaluate.h
+++ b/chromium/v8/src/debug/debug-evaluate.h
@@ -84,7 +84,7 @@ class DebugEvaluate : public AllStatic {
struct ContextChainElement {
Handle<Context> wrapped_context;
Handle<JSObject> materialized_object;
- Handle<StringSet> blacklist;
+ Handle<StringSet> blocklist;
};
Handle<Context> evaluation_context_;
diff --git a/chromium/v8/src/debug/debug-frames.cc b/chromium/v8/src/debug/debug-frames.cc
index 3f79f5ee3f8..d0ee6bda42a 100644
--- a/chromium/v8/src/debug/debug-frames.cc
+++ b/chromium/v8/src/debug/debug-frames.cc
@@ -6,7 +6,6 @@
#include "src/builtins/accessors.h"
#include "src/execution/frames-inl.h"
-#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
diff --git a/chromium/v8/src/debug/debug-interface.h b/chromium/v8/src/debug/debug-interface.h
index 3a46cf9b391..e52bd1ba2a2 100644
--- a/chromium/v8/src/debug/debug-interface.h
+++ b/chromium/v8/src/debug/debug-interface.h
@@ -528,6 +528,7 @@ class PostponeInterruptsScope {
class WeakMap : public v8::Object {
public:
+ WeakMap() = delete;
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT v8::MaybeLocal<v8::Value> Get(
v8::Local<v8::Context> context, v8::Local<v8::Value> key);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT v8::MaybeLocal<WeakMap> Set(
@@ -536,9 +537,6 @@ class WeakMap : public v8::Object {
V8_EXPORT_PRIVATE static Local<WeakMap> New(v8::Isolate* isolate);
V8_INLINE static WeakMap* Cast(Value* obj);
-
- private:
- WeakMap();
};
/**
@@ -549,6 +547,7 @@ class WeakMap : public v8::Object {
*/
class V8_EXPORT_PRIVATE AccessorPair : public v8::Value {
public:
+ AccessorPair() = delete;
v8::Local<v8::Value> getter();
v8::Local<v8::Value> setter();
@@ -556,7 +555,6 @@ class V8_EXPORT_PRIVATE AccessorPair : public v8::Value {
V8_INLINE static AccessorPair* Cast(v8::Value* obj);
private:
- AccessorPair();
static void CheckCast(v8::Value* obj);
};
@@ -596,17 +594,17 @@ class PropertyIterator {
// Wrapper around v8::internal::WasmValue.
class V8_EXPORT_PRIVATE WasmValue : public v8::Value {
public:
+ WasmValue() = delete;
static bool IsWasmValue(v8::Local<v8::Value> obj);
V8_INLINE static WasmValue* Cast(v8::Value* obj);
int value_type();
// Get the underlying values as a byte array, this is only valid if value_type
// is i32, i64, f32, f64, or s128.
v8::Local<v8::Array> bytes();
- // Get the underlying anyref, only valid if value_type is anyref.
+ // Get the underlying externref, only valid if value_type is externref.
v8::Local<v8::Value> ref();
private:
- WasmValue();
static void CheckCast(v8::Value* obj);
};
diff --git a/chromium/v8/src/debug/debug-scopes.cc b/chromium/v8/src/debug/debug-scopes.cc
index 6b838a69af0..3afbcfd309e 100644
--- a/chromium/v8/src/debug/debug-scopes.cc
+++ b/chromium/v8/src/debug/debug-scopes.cc
@@ -269,8 +269,9 @@ void ScopeIterator::TryParseAndRetrieveScopes(ReparseStrategy strategy) {
const bool parse_result =
flags.is_toplevel()
? parsing::ParseProgram(info_.get(), script, maybe_outer_scope,
- isolate_)
- : parsing::ParseFunction(info_.get(), shared_info, isolate_);
+ isolate_, parsing::ReportStatisticsMode::kNo)
+ : parsing::ParseFunction(info_.get(), shared_info, isolate_,
+ parsing::ReportStatisticsMode::kNo);
if (parse_result) {
DeclarationScope* literal_scope = info_->literal()->scope();
@@ -300,14 +301,13 @@ void ScopeIterator::TryParseAndRetrieveScopes(ReparseStrategy strategy) {
UnwrapEvaluationContext();
} else {
// A failed reparse indicates that the preparser has diverged from the
- // parser or that the preparse data given to the initial parse has been
- // faulty. We fail in debug mode but in release mode we only provide the
- // information we get from the context chain but nothing about
- // completely stack allocated scopes or stack allocated locals.
- // Or it could be due to stack overflow.
+ // parser, that the preparse data given to the initial parse was faulty, or
+ // a stack overflow.
+ // TODO(leszeks): This error is pretty unexpected, so we could report the
+ // error in debug mode. Better to not fail in release though, in case it's
+ // just a stack overflow.
+
// Silently fail by presenting an empty context chain.
- CHECK(isolate_->has_pending_exception());
- isolate_->clear_pending_exception();
context_ = Handle<Context>();
}
}
@@ -373,7 +373,8 @@ bool ScopeIterator::DeclaresLocals(Mode mode) const {
if (type == ScopeTypeGlobal) return mode == Mode::ALL;
bool declares_local = false;
- auto visitor = [&](Handle<String> name, Handle<Object> value) {
+ auto visitor = [&](Handle<String> name, Handle<Object> value,
+ ScopeType scope_type) {
declares_local = true;
return true;
};
@@ -421,7 +422,7 @@ void ScopeIterator::AdvanceContext() {
// While advancing one context, we need to advance at least one
// scope, but until we hit the next scope that actually requires
// a context. All the locals collected along the way build the
- // blacklist for debug-evaluate for this context.
+ // blocklist for debug-evaluate for this context.
locals_ = StringSet::New(isolate_);
do {
if (!current_scope_ || !current_scope_->outer_scope()) break;
@@ -462,7 +463,7 @@ void ScopeIterator::Next() {
if (leaving_closure) {
DCHECK(current_scope_ != closure_scope_);
// Edge case when we just go past {closure_scope_}. This case
- // already needs to start collecting locals for the blacklist.
+ // already needs to start collecting locals for the blocklist.
locals_ = StringSet::New(isolate_);
CollectLocalsFromCurrentScope();
}
@@ -546,7 +547,18 @@ Handle<JSObject> ScopeIterator::ScopeObject(Mode mode) {
}
Handle<JSObject> scope = isolate_->factory()->NewJSObjectWithNullProto();
- auto visitor = [=](Handle<String> name, Handle<Object> value) {
+ auto visitor = [=](Handle<String> name, Handle<Object> value,
+ ScopeType scope_type) {
+ if (value->IsTheHole(isolate_)) {
+ // Reflect variables under TDZ as undefined in scope object.
+ if (scope_type == ScopeTypeScript &&
+ JSReceiver::HasOwnProperty(scope, name).FromMaybe(true)) {
+ // We also use the hole to represent overridden let-declarations via
+ // REPL mode in a script context. Catch this case.
+ return false;
+ }
+ value = isolate_->factory()->undefined_value();
+ }
JSObject::AddProperty(isolate_, scope, name, value, NONE);
return false;
};
@@ -562,10 +574,10 @@ void ScopeIterator::VisitScope(const Visitor& visitor, Mode mode) const {
case ScopeTypeCatch:
case ScopeTypeBlock:
case ScopeTypeEval:
- return VisitLocalScope(visitor, mode);
+ return VisitLocalScope(visitor, mode, Type());
case ScopeTypeModule:
if (InInnerScope()) {
- return VisitLocalScope(visitor, mode);
+ return VisitLocalScope(visitor, mode, Type());
}
DCHECK_EQ(Mode::ALL, mode);
return VisitModuleScope(visitor);
@@ -714,7 +726,8 @@ void ScopeIterator::VisitScriptScope(const Visitor& visitor) const {
Handle<Context> context = ScriptContextTable::GetContext(
isolate_, script_contexts, context_index);
Handle<ScopeInfo> scope_info(context->scope_info(), isolate_);
- if (VisitContextLocals(visitor, scope_info, context)) return;
+ if (VisitContextLocals(visitor, scope_info, context, ScopeTypeScript))
+ return;
}
}
@@ -722,7 +735,8 @@ void ScopeIterator::VisitModuleScope(const Visitor& visitor) const {
DCHECK(context_->IsModuleContext());
Handle<ScopeInfo> scope_info(context_->scope_info(), isolate_);
- if (VisitContextLocals(visitor, scope_info, context_)) return;
+ if (VisitContextLocals(visitor, scope_info, context_, ScopeTypeModule))
+ return;
int count_index = scope_info->ModuleVariableCountIndex();
int module_variable_count = Smi::cast(scope_info->get(count_index)).value();
@@ -741,29 +755,27 @@ void ScopeIterator::VisitModuleScope(const Visitor& visitor) const {
Handle<Object> value =
SourceTextModule::LoadVariable(isolate_, module, index);
- // Reflect variables under TDZ as undeclared in scope object.
- if (value->IsTheHole(isolate_)) continue;
- if (visitor(name, value)) return;
+ if (visitor(name, value, ScopeTypeModule)) return;
}
}
bool ScopeIterator::VisitContextLocals(const Visitor& visitor,
Handle<ScopeInfo> scope_info,
- Handle<Context> context) const {
+ Handle<Context> context,
+ ScopeType scope_type) const {
// Fill all context locals to the context extension.
for (int i = 0; i < scope_info->ContextLocalCount(); ++i) {
Handle<String> name(scope_info->ContextLocalName(i), isolate_);
if (ScopeInfo::VariableIsSynthetic(*name)) continue;
int context_index = scope_info->ContextHeaderLength() + i;
Handle<Object> value(context->get(context_index), isolate_);
- // Reflect variables under TDZ as undefined in scope object.
- if (value->IsTheHole(isolate_)) continue;
- if (visitor(name, value)) return true;
+ if (visitor(name, value, scope_type)) return true;
}
return false;
}
-bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
+bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode,
+ ScopeType scope_type) const {
if (mode == Mode::STACK && current_scope_->is_declaration_scope() &&
current_scope_->AsDeclarationScope()->has_this_declaration()) {
// TODO(bmeurer): We should refactor the general variable lookup
@@ -776,10 +788,11 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
: frame_inspector_ == nullptr
? handle(generator_->receiver(), isolate_)
: frame_inspector_->GetReceiver();
- if (receiver->IsOptimizedOut(isolate_) || receiver->IsTheHole(isolate_)) {
+ if (receiver->IsOptimizedOut(isolate_)) {
receiver = isolate_->factory()->undefined_value();
}
- if (visitor(isolate_->factory()->this_string(), receiver)) return true;
+ if (visitor(isolate_->factory()->this_string(), receiver, scope_type))
+ return true;
}
if (current_scope_->is_function_scope()) {
@@ -790,7 +803,7 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
? function_
: frame_inspector_->GetFunction();
Handle<String> name = function_var->name();
- if (visitor(name, function)) return true;
+ if (visitor(name, function, scope_type)) return true;
}
}
@@ -839,9 +852,6 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
index += parameter_count;
DCHECK_LT(index, parameters_and_registers.length());
value = handle(parameters_and_registers.get(index), isolate_);
- if (value->IsTheHole(isolate_)) {
- value = isolate_->factory()->undefined_value();
- }
} else {
value = frame_inspector_->GetExpression(index);
if (value->IsOptimizedOut(isolate_)) {
@@ -851,9 +861,6 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
continue;
}
value = isolate_->factory()->undefined_value();
- } else if (value->IsTheHole(isolate_)) {
- // Reflect variables under TDZ as undeclared in scope object.
- continue;
}
}
break;
@@ -862,8 +869,6 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
if (mode == Mode::STACK) continue;
DCHECK(var->IsContextSlot());
value = handle(context_->get(index), isolate_);
- // Reflect variables under TDZ as undeclared in scope object.
- if (value->IsTheHole(isolate_)) continue;
break;
case VariableLocation::MODULE: {
@@ -871,13 +876,11 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
// if (var->IsExport()) continue;
Handle<SourceTextModule> module(context_->module(), isolate_);
value = SourceTextModule::LoadVariable(isolate_, module, var->index());
- // Reflect variables under TDZ as undeclared in scope object.
- if (value->IsTheHole(isolate_)) continue;
break;
}
}
- if (visitor(var->name(), value)) return true;
+ if (visitor(var->name(), value, scope_type)) return true;
}
return false;
}
@@ -894,9 +897,10 @@ Handle<JSObject> ScopeIterator::WithContextExtension() {
// Create a plain JSObject which materializes the block scope for the specified
// block context.
-void ScopeIterator::VisitLocalScope(const Visitor& visitor, Mode mode) const {
+void ScopeIterator::VisitLocalScope(const Visitor& visitor, Mode mode,
+ ScopeType scope_type) const {
if (InInnerScope()) {
- if (VisitLocals(visitor, mode)) return;
+ if (VisitLocals(visitor, mode, scope_type)) return;
if (mode == Mode::STACK && Type() == ScopeTypeLocal) {
// Hide |this| in arrow functions that may be embedded in other functions
// but don't force |this| to be context-allocated. Otherwise we'd find the
@@ -904,7 +908,7 @@ void ScopeIterator::VisitLocalScope(const Visitor& visitor, Mode mode) const {
if (!closure_scope_->has_this_declaration() &&
!closure_scope_->HasThisReference()) {
if (visitor(isolate_->factory()->this_string(),
- isolate_->factory()->undefined_value()))
+ isolate_->factory()->undefined_value(), scope_type))
return;
}
// Add |arguments| to the function scope even if it wasn't used.
@@ -919,13 +923,15 @@ void ScopeIterator::VisitLocalScope(const Visitor& visitor, Mode mode) const {
JavaScriptFrame* frame = GetFrame();
Handle<JSObject> arguments = Accessors::FunctionGetArguments(
frame, frame_inspector_->inlined_frame_index());
- if (visitor(isolate_->factory()->arguments_string(), arguments)) return;
+ if (visitor(isolate_->factory()->arguments_string(), arguments,
+ scope_type))
+ return;
}
}
} else {
DCHECK_EQ(Mode::ALL, mode);
Handle<ScopeInfo> scope_info(context_->scope_info(), isolate_);
- if (VisitContextLocals(visitor, scope_info, context_)) return;
+ if (VisitContextLocals(visitor, scope_info, context_, scope_type)) return;
}
if (mode == Mode::ALL && HasContext()) {
@@ -945,7 +951,7 @@ void ScopeIterator::VisitLocalScope(const Visitor& visitor, Mode mode) const {
DCHECK(keys->get(i).IsString());
Handle<String> key(String::cast(keys->get(i)), isolate_);
Handle<Object> value = JSReceiver::GetDataProperty(extension, key);
- if (visitor(key, value)) return;
+ if (visitor(key, value, scope_type)) return;
}
}
}
diff --git a/chromium/v8/src/debug/debug-scopes.h b/chromium/v8/src/debug/debug-scopes.h
index a0357c73838..590e9e9bfe6 100644
--- a/chromium/v8/src/debug/debug-scopes.h
+++ b/chromium/v8/src/debug/debug-scopes.h
@@ -141,8 +141,8 @@ class ScopeIterator {
void UnwrapEvaluationContext();
- using Visitor =
- std::function<bool(Handle<String> name, Handle<Object> value)>;
+ using Visitor = std::function<bool(Handle<String> name, Handle<Object> value,
+ ScopeType scope_type)>;
Handle<JSObject> WithContextExtension();
@@ -159,12 +159,14 @@ class ScopeIterator {
// Helper functions.
void VisitScope(const Visitor& visitor, Mode mode) const;
- void VisitLocalScope(const Visitor& visitor, Mode mode) const;
+ void VisitLocalScope(const Visitor& visitor, Mode mode,
+ ScopeType scope_type) const;
void VisitScriptScope(const Visitor& visitor) const;
void VisitModuleScope(const Visitor& visitor) const;
- bool VisitLocals(const Visitor& visitor, Mode mode) const;
+ bool VisitLocals(const Visitor& visitor, Mode mode,
+ ScopeType scope_type) const;
bool VisitContextLocals(const Visitor& visitor, Handle<ScopeInfo> scope_info,
- Handle<Context> context) const;
+ Handle<Context> context, ScopeType scope_type) const;
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
};
diff --git a/chromium/v8/src/debug/debug.cc b/chromium/v8/src/debug/debug.cc
index 627ccc7c567..0c71bf8308f 100644
--- a/chromium/v8/src/debug/debug.cc
+++ b/chromium/v8/src/debug/debug.cc
@@ -820,7 +820,10 @@ void Debug::ClearAllBreakPoints() {
HeapObject raw_wasm_script;
if (wasm_scripts_with_breakpoints_->Get(idx).GetHeapObject(
&raw_wasm_script)) {
- WasmScript::ClearAllBreakpoints(Script::cast(raw_wasm_script));
+ Script wasm_script = Script::cast(raw_wasm_script);
+ WasmScript::ClearAllBreakpoints(wasm_script);
+ wasm_script.wasm_native_module()->GetDebugInfo()->RemoveIsolate(
+ isolate_);
}
}
wasm_scripts_with_breakpoints_ = Handle<WeakArrayList>{};
@@ -2327,7 +2330,7 @@ bool Debug::PerformSideEffectCheckForCallback(
// TODO(7515): always pass a valid callback info object.
if (!callback_info.is_null()) {
if (callback_info->IsAccessorInfo()) {
- // List of whitelisted internal accessors can be found in accessors.h.
+ // List of allowlisted internal accessors can be found in accessors.h.
AccessorInfo info = AccessorInfo::cast(*callback_info);
DCHECK_NE(kNotAccessor, accessor_kind);
switch (accessor_kind == kSetter ? info.setter_side_effect_type()
diff --git a/chromium/v8/src/debug/liveedit.cc b/chromium/v8/src/debug/liveedit.cc
index cd40eae6561..e1891af0ce4 100644
--- a/chromium/v8/src/debug/liveedit.cc
+++ b/chromium/v8/src/debug/liveedit.cc
@@ -757,7 +757,14 @@ bool ParseScript(Isolate* isolate, Handle<Script> script, ParseInfo* parse_info,
success = Compiler::CompileForLiveEdit(parse_info, script, isolate)
.ToHandle(&shared);
} else {
- success = parsing::ParseProgram(parse_info, script, isolate);
+ success = parsing::ParseProgram(parse_info, script, isolate,
+ parsing::ReportStatisticsMode::kYes);
+ if (!success) {
+ // Throw the parser error.
+ parse_info->pending_error_handler()->PrepareErrors(
+ isolate, parse_info->ast_value_factory());
+ parse_info->pending_error_handler()->ReportErrors(isolate, script);
+ }
}
if (!success) {
isolate->OptionalRescheduleException(false);
@@ -1003,7 +1010,8 @@ bool CanRestartFrame(
void TranslateSourcePositionTable(Isolate* isolate, Handle<BytecodeArray> code,
const std::vector<SourceChangeRange>& diffs) {
- SourcePositionTableBuilder builder;
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ SourcePositionTableBuilder builder(&zone);
Handle<ByteArray> source_position_table(code->SourcePositionTable(), isolate);
for (SourcePositionTableIterator iterator(*source_position_table);
@@ -1145,7 +1153,9 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
js_function->set_raw_feedback_cell(
*isolate->factory()->many_closures_cell());
if (!js_function->is_compiled()) continue;
- JSFunction::EnsureFeedbackVector(js_function);
+ IsCompiledScope is_compiled_scope(
+ js_function->shared().is_compiled_scope());
+ JSFunction::EnsureFeedbackVector(js_function, &is_compiled_scope);
}
if (!sfi->HasBytecodeArray()) continue;
@@ -1186,7 +1196,9 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
js_function->set_raw_feedback_cell(
*isolate->factory()->many_closures_cell());
if (!js_function->is_compiled()) continue;
- JSFunction::EnsureFeedbackVector(js_function);
+ IsCompiledScope is_compiled_scope(
+ js_function->shared().is_compiled_scope());
+ JSFunction::EnsureFeedbackVector(js_function, &is_compiled_scope);
}
}
SharedFunctionInfo::ScriptIterator it(isolate, *new_script);
diff --git a/chromium/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc b/chromium/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
index 2bd9b1e5d49..5074acbb63e 100644
--- a/chromium/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
+++ b/chromium/v8/src/debug/wasm/gdb-server/wasm-module-debug.cc
@@ -371,9 +371,8 @@ bool WasmModuleDebug::GetWasmValue(const wasm::WasmValue& wasm_value,
return StoreValue(wasm_value.to_s128(), buffer, buffer_size, size);
case wasm::kWasmStmt.kind():
- case wasm::kWasmAnyRef.kind():
+ case wasm::kWasmExternRef.kind():
case wasm::kWasmFuncRef.kind():
- case wasm::kWasmNullRef.kind():
case wasm::kWasmExnRef.kind():
case wasm::kWasmBottom.kind():
default:
diff --git a/chromium/v8/src/deoptimizer/OWNERS b/chromium/v8/src/deoptimizer/OWNERS
index b8d344ab4c3..e4ff70c640f 100644
--- a/chromium/v8/src/deoptimizer/OWNERS
+++ b/chromium/v8/src/deoptimizer/OWNERS
@@ -1,6 +1,7 @@
-bmeurer@chromium.org
-jarin@chromium.org
+jgruber@chromium.org
+neis@chromium.org
+nicohartmann@chromium.org
sigurds@chromium.org
tebbi@chromium.org
-# COMPONENT: Blink>JavaScript>Runtime
+# COMPONENT: Blink>JavaScript>Compiler
diff --git a/chromium/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc b/chromium/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
index 9477fe185be..c66d6a81559 100644
--- a/chromium/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
+++ b/chromium/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
@@ -295,7 +295,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
FrameDescription::continuation_offset()));
__ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- __ Autiasp();
+ __ Autibsp();
#endif
__ Br(continuation);
}
diff --git a/chromium/v8/src/deoptimizer/deoptimize-reason.h b/chromium/v8/src/deoptimizer/deoptimize-reason.h
index 4a23da8532e..05cb2680a3b 100644
--- a/chromium/v8/src/deoptimizer/deoptimize-reason.h
+++ b/chromium/v8/src/deoptimizer/deoptimize-reason.h
@@ -38,10 +38,12 @@ namespace internal {
V(MinusZero, "minus zero") \
V(NaN, "NaN") \
V(NoCache, "no cache") \
+ V(NotABigInt, "not a BigInt") \
V(NotAHeapNumber, "not a heap number") \
V(NotAJavaScriptObject, "not a JavaScript object") \
V(NotAJavaScriptObjectOrNullOrUndefined, \
"not a JavaScript object, Null or Undefined") \
+ V(NotANumberOrBoolean, "not a Number or Boolean") \
V(NotANumberOrOddball, "not a Number or Oddball") \
V(NotAnArrayIndex, "not an array index") \
V(NotASmi, "not a Smi") \
diff --git a/chromium/v8/src/deoptimizer/deoptimizer.cc b/chromium/v8/src/deoptimizer/deoptimizer.cc
index 44c92f55704..066be821162 100644
--- a/chromium/v8/src/deoptimizer/deoptimizer.cc
+++ b/chromium/v8/src/deoptimizer/deoptimizer.cc
@@ -26,6 +26,7 @@
#include "src/objects/heap-number-inl.h"
#include "src/objects/smi.h"
#include "src/tracing/trace-event.h"
+#include "torque-generated/exported-class-definitions-tq.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
@@ -911,9 +912,19 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
frame_writer.PushRawObject(roots.the_hole_value(), "padding\n");
}
+#ifdef V8_REVERSE_JSARGS
+ std::vector<TranslatedFrame::iterator> parameters;
+ for (int i = 0; i < parameters_count; ++i, ++value_iterator) {
+ parameters.push_back(value_iterator);
+ }
+ for (auto& parameter : base::Reversed(parameters)) {
+ frame_writer.PushTranslatedValue(parameter, "stack parameter");
+ }
+#else
for (int i = 0; i < parameters_count; ++i, ++value_iterator) {
frame_writer.PushTranslatedValue(value_iterator, "stack parameter");
}
+#endif
DCHECK_EQ(output_frame->GetLastArgumentSlotOffset(),
frame_writer.top_offset());
@@ -3461,6 +3472,7 @@ void TranslatedState::InitializeCapturedObjectAt(
case STRING_TABLE_TYPE:
case PROPERTY_ARRAY_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
+ case SLOPPY_ARGUMENTS_ELEMENTS_TYPE:
InitializeObjectWithTaggedFieldsAt(frame, &value_index, slot, map,
no_allocation);
break;
@@ -3625,6 +3637,19 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt(
&value_index, worklist);
}
+ case SLOPPY_ARGUMENTS_ELEMENTS_TYPE: {
+ // Verify that the arguments size is correct.
+ int args_length = frame->values_[value_index].GetSmiValue();
+ int args_size = SloppyArgumentsElements::SizeFor(args_length);
+ CHECK_EQ(args_size, slot->GetChildrenCount() * kTaggedSize);
+
+ slot->set_storage(AllocateStorageFor(slot));
+
+ // Make sure all the remaining children (after the map) are allocated.
+ return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame,
+ &value_index, worklist);
+ }
+
case PROPERTY_ARRAY_TYPE: {
// Check we have the right size.
int length_or_hash = frame->values_[value_index].GetSmiValue();
diff --git a/chromium/v8/src/diagnostics/arm/disasm-arm.cc b/chromium/v8/src/diagnostics/arm/disasm-arm.cc
index e17936d8d2d..e7f006f0e90 100644
--- a/chromium/v8/src/diagnostics/arm/disasm-arm.cc
+++ b/chromium/v8/src/diagnostics/arm/disasm-arm.cc
@@ -34,6 +34,7 @@
#include "src/base/platform/platform.h"
#include "src/codegen/arm/assembler-arm.h"
#include "src/codegen/arm/constants-arm.h"
+#include "src/codegen/arm/register-arm.h"
#include "src/diagnostics/disasm.h"
#include "src/utils/vector.h"
@@ -70,7 +71,9 @@ class Decoder {
void PrintRegister(int reg);
void PrintSRegister(int reg);
void PrintDRegister(int reg);
- int FormatVFPRegister(Instruction* instr, const char* format);
+ void PrintQRegister(int reg);
+ int FormatVFPRegister(Instruction* instr, const char* format,
+ VFPRegPrecision precision);
void PrintMovwMovt(Instruction* instr);
int FormatVFPinstruction(Instruction* instr, const char* format);
void PrintCondition(Instruction* instr);
@@ -160,6 +163,11 @@ void Decoder::PrintSRegister(int reg) { Print(VFPRegisters::Name(reg, false)); }
// Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) { Print(VFPRegisters::Name(reg, true)); }
+// Print the VFP Q register name according to the active name converter.
+void Decoder::PrintQRegister(int reg) {
+ Print(RegisterName(QwNeonRegister::from_code(reg)));
+}
+
// These shift names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
static const char* const shift_names[kNumberOfShifts] = {"lsl", "lsr", "asr",
@@ -312,12 +320,8 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
// Handle all VFP register based formatting in this function to reduce the
// complexity of FormatOption.
-int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
- DCHECK((format[0] == 'S') || (format[0] == 'D'));
-
- VFPRegPrecision precision =
- format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
-
+int Decoder::FormatVFPRegister(Instruction* instr, const char* format,
+ VFPRegPrecision precision) {
int retval = 2;
int reg = -1;
if (format[1] == 'n') {
@@ -334,9 +338,10 @@ int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
}
if (format[2] == '+') {
+ DCHECK_NE(kSimd128Precision, precision); // Simd128 unimplemented.
int immed8 = instr->Immed8Value();
- if (format[0] == 'S') reg += immed8 - 1;
- if (format[0] == 'D') reg += (immed8 / 2 - 1);
+ if (precision == kSinglePrecision) reg += immed8 - 1;
+ if (precision == kDoublePrecision) reg += (immed8 / 2 - 1);
}
if (format[2] == '+') retval = 3;
} else {
@@ -345,8 +350,11 @@ int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
if (precision == kSinglePrecision) {
PrintSRegister(reg);
- } else {
+ } else if (precision == kDoublePrecision) {
PrintDRegister(reg);
+ } else {
+ DCHECK_EQ(kSimd128Precision, precision);
+ PrintQRegister(reg);
}
return retval;
@@ -644,9 +652,11 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 1;
}
case 'S':
- case 'D': {
- return FormatVFPRegister(instr, format);
- }
+ return FormatVFPRegister(instr, format, kSinglePrecision);
+ case 'D':
+ return FormatVFPRegister(instr, format, kDoublePrecision);
+ case 'Q':
+ return FormatVFPRegister(instr, format, kSimd128Precision);
case 'w': { // 'w: W field of load and store instructions
if (instr->HasW()) {
Print("!");
@@ -2264,6 +2274,35 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.%c%i d%d, q%d", name,
type, size, Vd, Vm);
+ } else if (instr->Bits(17, 16) == 0x2 && instr->Bit(10) == 1) {
+ // NEON vrintm, vrintp, vrintz
+ bool dp_op = instr->Bit(6) == 0;
+ int rounding_mode = instr->Bits(9, 7);
+ switch (rounding_mode) {
+ case 3:
+ if (dp_op) {
+ Format(instr, "vrintz.f32 'Dd, 'Dm");
+ } else {
+ Format(instr, "vrintz.f32 'Qd, 'Qm");
+ }
+ break;
+ case 5:
+ if (dp_op) {
+ Format(instr, "vrintm.f32 'Dd, 'Dm");
+ } else {
+ Format(instr, "vrintm.f32 'Qd, 'Qm");
+ }
+ break;
+ case 7:
+ if (dp_op) {
+ Format(instr, "vrintp.f32 'Dd, 'Dm");
+ } else {
+ Format(instr, "vrintp.f32 'Qd, 'Qm");
+ }
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
} else {
int Vd, Vm;
if (instr->Bit(6) == 0) {
diff --git a/chromium/v8/src/diagnostics/arm64/disasm-arm64.cc b/chromium/v8/src/diagnostics/arm64/disasm-arm64.cc
index 3f8f40a244d..7ee499c183f 100644
--- a/chromium/v8/src/diagnostics/arm64/disasm-arm64.cc
+++ b/chromium/v8/src/diagnostics/arm64/disasm-arm64.cc
@@ -1377,6 +1377,10 @@ void DisassemblingDecoder::VisitFPIntegerConvert(Instruction* instr) {
mnemonic = "ucvtf";
form = form_fr;
break;
+ case FJCVTZS:
+ mnemonic = "fjcvtzs";
+ form = form_rf;
+ break;
}
Format(instr, mnemonic, form);
}
@@ -1419,10 +1423,10 @@ void DisassemblingDecoder::VisitFPFixedPointConvert(Instruction* instr) {
// clang-format off
#define PAUTH_SYSTEM_MNEMONICS(V) \
- V(PACIA1716, "pacia1716") \
- V(AUTIA1716, "autia1716") \
- V(PACIASP, "paciasp") \
- V(AUTIASP, "autiasp")
+ V(PACIB1716, "pacib1716") \
+ V(AUTIB1716, "autib1716") \
+ V(PACIBSP, "pacibsp") \
+ V(AUTIBSP, "autibsp")
// clang-format on
void DisassemblingDecoder::VisitSystem(Instruction* instr) {
diff --git a/chromium/v8/src/diagnostics/basic-block-profiler.cc b/chromium/v8/src/diagnostics/basic-block-profiler.cc
index 262a5364b5e..f61292c3cdd 100644
--- a/chromium/v8/src/diagnostics/basic-block-profiler.cc
+++ b/chromium/v8/src/diagnostics/basic-block-profiler.cc
@@ -9,84 +9,138 @@
#include <sstream>
#include "src/base/lazy-instance.h"
+#include "src/heap/heap-inl.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
namespace v8 {
namespace internal {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(BasicBlockProfiler, BasicBlockProfiler::Get)
-BasicBlockProfiler::Data::Data(size_t n_blocks)
- : n_blocks_(n_blocks),
- block_rpo_numbers_(n_blocks_),
- counts_(n_blocks_, 0) {}
+BasicBlockProfilerData::BasicBlockProfilerData(size_t n_blocks)
+ : block_rpo_numbers_(n_blocks), counts_(n_blocks, 0) {}
-static void InsertIntoString(std::ostringstream* os, std::string* string) {
- string->insert(0, os->str());
+void BasicBlockProfilerData::SetCode(const std::ostringstream& os) {
+ code_ = os.str();
}
-static void InsertIntoString(const char* data, std::string* string) {
- string->insert(0, data);
+void BasicBlockProfilerData::SetFunctionName(std::unique_ptr<char[]> name) {
+ function_name_ = name.get();
}
-void BasicBlockProfiler::Data::SetCode(std::ostringstream* os) {
- InsertIntoString(os, &code_);
+void BasicBlockProfilerData::SetSchedule(const std::ostringstream& os) {
+ schedule_ = os.str();
}
-void BasicBlockProfiler::Data::SetFunctionName(std::unique_ptr<char[]> name) {
- InsertIntoString(name.get(), &function_name_);
+void BasicBlockProfilerData::SetBlockRpoNumber(size_t offset,
+ int32_t block_rpo) {
+ DCHECK(offset < n_blocks());
+ block_rpo_numbers_[offset] = block_rpo;
}
-void BasicBlockProfiler::Data::SetSchedule(std::ostringstream* os) {
- InsertIntoString(os, &schedule_);
+void BasicBlockProfilerData::ResetCounts() {
+ for (size_t i = 0; i < n_blocks(); ++i) {
+ counts_[i] = 0;
+ }
}
-void BasicBlockProfiler::Data::SetBlockRpoNumber(size_t offset,
- int32_t block_rpo) {
- DCHECK(offset < n_blocks_);
- block_rpo_numbers_[offset] = block_rpo;
+BasicBlockProfilerData* BasicBlockProfiler::NewData(size_t n_blocks) {
+ base::MutexGuard lock(&data_list_mutex_);
+ auto data = std::make_unique<BasicBlockProfilerData>(n_blocks);
+ BasicBlockProfilerData* data_ptr = data.get();
+ data_list_.push_back(std::move(data));
+ return data_ptr;
}
-intptr_t BasicBlockProfiler::Data::GetCounterAddress(size_t offset) {
- DCHECK(offset < n_blocks_);
- return reinterpret_cast<intptr_t>(&(counts_[offset]));
+namespace {
+Handle<String> CopyStringToJSHeap(const std::string& source, Isolate* isolate) {
+ return isolate->factory()->NewStringFromAsciiChecked(source.c_str(),
+ AllocationType::kOld);
}
-void BasicBlockProfiler::Data::ResetCounts() {
- for (size_t i = 0; i < n_blocks_; ++i) {
- counts_[i] = 0;
+// Size of entries in both block_rpo_numbers and counts.
+constexpr int kBasicBlockSlotSize = kInt32Size;
+} // namespace
+
+BasicBlockProfilerData::BasicBlockProfilerData(
+ Handle<OnHeapBasicBlockProfilerData> js_heap_data, Isolate* isolate) {
+ function_name_ = js_heap_data->name().ToCString().get();
+ schedule_ = js_heap_data->schedule().ToCString().get();
+ code_ = js_heap_data->code().ToCString().get();
+ Handle<ByteArray> counts(js_heap_data->counts(), isolate);
+ for (int i = 0; i < counts->length() / kBasicBlockSlotSize; ++i) {
+ counts_.push_back(counts->get_uint32(i));
}
+ Handle<ByteArray> rpo_numbers(js_heap_data->block_rpo_numbers(), isolate);
+ for (int i = 0; i < rpo_numbers->length() / kBasicBlockSlotSize; ++i) {
+ block_rpo_numbers_.push_back(rpo_numbers->get_int(i));
+ }
+ CHECK_EQ(block_rpo_numbers_.size(), counts_.size());
}
-BasicBlockProfiler::Data* BasicBlockProfiler::NewData(size_t n_blocks) {
- base::MutexGuard lock(&data_list_mutex_);
- Data* data = new Data(n_blocks);
- data_list_.push_back(data);
- return data;
+Handle<OnHeapBasicBlockProfilerData> BasicBlockProfilerData::CopyToJSHeap(
+ Isolate* isolate) {
+ int array_size_in_bytes = static_cast<int>(n_blocks() * kBasicBlockSlotSize);
+ CHECK(array_size_in_bytes >= 0 &&
+ static_cast<size_t>(array_size_in_bytes) / kBasicBlockSlotSize ==
+ n_blocks()); // Overflow
+ Handle<ByteArray> block_rpo_numbers = isolate->factory()->NewByteArray(
+ array_size_in_bytes, AllocationType::kOld);
+ for (int i = 0; i < static_cast<int>(n_blocks()); ++i) {
+ block_rpo_numbers->set_int(i, block_rpo_numbers_[i]);
+ }
+ Handle<ByteArray> counts = isolate->factory()->NewByteArray(
+ array_size_in_bytes, AllocationType::kOld);
+ for (int i = 0; i < static_cast<int>(n_blocks()); ++i) {
+ counts->set_uint32(i, counts_[i]);
+ }
+ Handle<String> name = CopyStringToJSHeap(function_name_, isolate);
+ Handle<String> schedule = CopyStringToJSHeap(schedule_, isolate);
+ Handle<String> code = CopyStringToJSHeap(code_, isolate);
+
+ return isolate->factory()->NewOnHeapBasicBlockProfilerData(
+ block_rpo_numbers, counts, name, schedule, code, AllocationType::kOld);
}
-BasicBlockProfiler::~BasicBlockProfiler() {
- for (DataList::iterator i = data_list_.begin(); i != data_list_.end(); ++i) {
- delete (*i);
+void BasicBlockProfiler::ResetCounts(Isolate* isolate) {
+ for (const auto& data : data_list_) {
+ data->ResetCounts();
+ }
+ HandleScope scope(isolate);
+ Handle<ArrayList> list(isolate->heap()->basic_block_profiling_data(),
+ isolate);
+ for (int i = 0; i < list->Length(); ++i) {
+ Handle<ByteArray> counts(
+ OnHeapBasicBlockProfilerData::cast(list->Get(i)).counts(), isolate);
+ for (int j = 0; j < counts->length() / kBasicBlockSlotSize; ++j) {
+ counts->set_uint32(j, 0);
+ }
}
}
-void BasicBlockProfiler::ResetCounts() {
- for (DataList::iterator i = data_list_.begin(); i != data_list_.end(); ++i) {
- (*i)->ResetCounts();
- }
+bool BasicBlockProfiler::HasData(Isolate* isolate) {
+ return data_list_.size() > 0 ||
+ isolate->heap()->basic_block_profiling_data().Length() > 0;
}
-std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler& p) {
+void BasicBlockProfiler::Print(std::ostream& os, Isolate* isolate) {
os << "---- Start Profiling Data ----" << std::endl;
- using iterator = BasicBlockProfiler::DataList::const_iterator;
- for (iterator i = p.data_list_.begin(); i != p.data_list_.end(); ++i) {
- os << **i;
+ for (const auto& data : data_list_) {
+ os << *data;
+ }
+ HandleScope scope(isolate);
+ Handle<ArrayList> list(isolate->heap()->basic_block_profiling_data(),
+ isolate);
+ for (int i = 0; i < list->Length(); ++i) {
+ BasicBlockProfilerData data(
+ handle(OnHeapBasicBlockProfilerData::cast(list->Get(i)), isolate),
+ isolate);
+ os << data;
}
os << "---- End Profiling Data ----" << std::endl;
- return os;
}
-std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler::Data& d) {
+std::ostream& operator<<(std::ostream& os, const BasicBlockProfilerData& d) {
int block_count_sum = std::accumulate(d.counts_.begin(), d.counts_.end(), 0);
if (block_count_sum == 0) return os;
const char* name = "unknown function";
@@ -100,8 +154,8 @@ std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler::Data& d) {
}
os << "block counts for " << name << ":" << std::endl;
std::vector<std::pair<int32_t, uint32_t>> pairs;
- pairs.reserve(d.n_blocks_);
- for (size_t i = 0; i < d.n_blocks_; ++i) {
+ pairs.reserve(d.n_blocks());
+ for (size_t i = 0; i < d.n_blocks(); ++i) {
pairs.push_back(std::make_pair(d.block_rpo_numbers_[i], d.counts_[i]));
}
std::sort(pairs.begin(), pairs.end(),
diff --git a/chromium/v8/src/diagnostics/basic-block-profiler.h b/chromium/v8/src/diagnostics/basic-block-profiler.h
index 9639e0b6615..0eb82e8a1bc 100644
--- a/chromium/v8/src/diagnostics/basic-block-profiler.h
+++ b/chromium/v8/src/diagnostics/basic-block-profiler.h
@@ -14,66 +14,71 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
+#include "torque-generated/exported-class-definitions-tq.h"
namespace v8 {
namespace internal {
+class BasicBlockProfilerData {
+ public:
+ explicit BasicBlockProfilerData(size_t n_blocks);
+ V8_EXPORT_PRIVATE BasicBlockProfilerData(
+ Handle<OnHeapBasicBlockProfilerData> js_heap_data, Isolate* isolate);
+
+ size_t n_blocks() const {
+ DCHECK_EQ(block_rpo_numbers_.size(), counts_.size());
+ return block_rpo_numbers_.size();
+ }
+ const uint32_t* counts() const { return &counts_[0]; }
+
+ void SetCode(const std::ostringstream& os);
+ void SetFunctionName(std::unique_ptr<char[]> name);
+ void SetSchedule(const std::ostringstream& os);
+ void SetBlockRpoNumber(size_t offset, int32_t block_rpo);
+
+ // Copy the data from this object into an equivalent object stored on the JS
+ // heap, so that it can survive snapshotting and relocation. This must
+ // happen on the main thread during finalization of the compilation.
+ Handle<OnHeapBasicBlockProfilerData> CopyToJSHeap(Isolate* isolate);
+
+ private:
+ friend class BasicBlockProfiler;
+ friend std::ostream& operator<<(std::ostream& os,
+ const BasicBlockProfilerData& s);
+
+ V8_EXPORT_PRIVATE void ResetCounts();
+
+ std::vector<int32_t> block_rpo_numbers_;
+ std::vector<uint32_t> counts_;
+ std::string function_name_;
+ std::string schedule_;
+ std::string code_;
+ DISALLOW_COPY_AND_ASSIGN(BasicBlockProfilerData);
+};
+
class BasicBlockProfiler {
public:
- class Data {
- public:
- size_t n_blocks() const { return n_blocks_; }
- const uint32_t* counts() const { return &counts_[0]; }
-
- void SetCode(std::ostringstream* os);
- void SetFunctionName(std::unique_ptr<char[]> name);
- void SetSchedule(std::ostringstream* os);
- void SetBlockRpoNumber(size_t offset, int32_t block_rpo);
- intptr_t GetCounterAddress(size_t offset);
-
- private:
- friend class BasicBlockProfiler;
- friend std::ostream& operator<<(std::ostream& os,
- const BasicBlockProfiler::Data& s);
-
- explicit Data(size_t n_blocks);
- ~Data() = default;
-
- V8_EXPORT_PRIVATE void ResetCounts();
-
- const size_t n_blocks_;
- std::vector<int32_t> block_rpo_numbers_;
- std::vector<uint32_t> counts_;
- std::string function_name_;
- std::string schedule_;
- std::string code_;
- DISALLOW_COPY_AND_ASSIGN(Data);
- };
-
- using DataList = std::list<Data*>;
+ using DataList = std::list<std::unique_ptr<BasicBlockProfilerData>>;
BasicBlockProfiler() = default;
- ~BasicBlockProfiler();
+ ~BasicBlockProfiler() = default;
V8_EXPORT_PRIVATE static BasicBlockProfiler* Get();
- Data* NewData(size_t n_blocks);
- V8_EXPORT_PRIVATE void ResetCounts();
+ BasicBlockProfilerData* NewData(size_t n_blocks);
+ V8_EXPORT_PRIVATE void ResetCounts(Isolate* isolate);
+ V8_EXPORT_PRIVATE bool HasData(Isolate* isolate);
+ V8_EXPORT_PRIVATE void Print(std::ostream& os, Isolate* isolate);
const DataList* data_list() { return &data_list_; }
private:
- friend V8_EXPORT_PRIVATE std::ostream& operator<<(
- std::ostream& os, const BasicBlockProfiler& s);
-
DataList data_list_;
base::Mutex data_list_mutex_;
DISALLOW_COPY_AND_ASSIGN(BasicBlockProfiler);
};
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
- const BasicBlockProfiler& s);
-std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler::Data& s);
+std::ostream& operator<<(std::ostream& os, const BasicBlockProfilerData& s);
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/diagnostics/code-tracer.h b/chromium/v8/src/diagnostics/code-tracer.h
index a9f276bf448..194d3cbe667 100644
--- a/chromium/v8/src/diagnostics/code-tracer.h
+++ b/chromium/v8/src/diagnostics/code-tracer.h
@@ -5,9 +5,11 @@
#ifndef V8_DIAGNOSTICS_CODE_TRACER_H_
#define V8_DIAGNOSTICS_CODE_TRACER_H_
+#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/utils/allocation.h"
+#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
#include "src/utils/vector.h"
@@ -45,6 +47,28 @@ class CodeTracer final : public Malloced {
CodeTracer* tracer_;
};
+ class StreamScope : public Scope {
+ public:
+ explicit StreamScope(CodeTracer* tracer) : Scope(tracer) {
+ FILE* file = this->file();
+ if (file == stdout) {
+ stdout_stream_.emplace();
+ } else {
+ file_stream_.emplace(file);
+ }
+ }
+
+ std::ostream& stream() {
+ if (stdout_stream_.has_value()) return stdout_stream_.value();
+ return file_stream_.value();
+ }
+
+ private:
+ // Exactly one of these two will be initialized.
+ base::Optional<StdoutStream> stdout_stream_;
+ base::Optional<OFStream> file_stream_;
+ };
+
void OpenFile() {
if (!ShouldRedirect()) {
return;
diff --git a/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc b/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc
index 5e0c5c65e23..6db09c3c97a 100644
--- a/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -791,6 +791,18 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x08:
+ AppendToBuffer("vroundps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", Imm8_U(current));
+ current++;
+ break;
+ case 0x09:
+ AppendToBuffer("vroundpd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", Imm8_U(current));
+ current++;
+ break;
case 0x0E:
AppendToBuffer("vpblendw %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1847,6 +1859,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("movmskps %s,%s", NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte == 0xC0) {
+ data += 2;
+ data += PrintOperands("xadd_b", OPER_REG_OP_ORDER, data);
+ } else if (f0byte == 0xC1) {
+ data += 2;
+ data += PrintOperands("xadd", OPER_REG_OP_ORDER, data);
} else if (f0byte == 0xC2) {
data += 2;
int mod, regop, rm;
@@ -2120,7 +2138,23 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
} else if (*data == 0x3A) {
data++;
- if (*data == 0x0A) {
+ if (*data == 0x08) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("roundps %s,%s,%d", NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm), static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x09) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("roundpd %s,%s,%d", NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm), static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x0A) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
@@ -2318,6 +2352,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("movd ");
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (*data == 0xC1) {
+ data += 2;
+ data += PrintOperands("xadd_w", OPER_REG_OP_ORDER, data);
} else if (*data == 0xC2) {
data++;
int mod, regop, rm;
diff --git a/chromium/v8/src/diagnostics/objects-debug.cc b/chromium/v8/src/diagnostics/objects-debug.cc
index 32caba2da84..f94dd8a3c6a 100644
--- a/chromium/v8/src/diagnostics/objects-debug.cc
+++ b/chromium/v8/src/diagnostics/objects-debug.cc
@@ -29,7 +29,6 @@
#include "src/objects/free-space-inl.h"
#include "src/objects/function-kind.h"
#include "src/objects/hash-table-inl.h"
-#include "src/objects/js-aggregate-error-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/layout-descriptor.h"
#include "src/objects/objects-inl.h"
@@ -326,6 +325,11 @@ void VerifyJSObjectElements(Isolate* isolate, JSObject object) {
return;
}
+ if (object.HasSloppyArgumentsElements()) {
+ CHECK(object.elements().IsSloppyArgumentsElements());
+ return;
+ }
+
FixedArray elements = FixedArray::cast(object.elements());
if (object.HasSmiElements()) {
// We might have a partially initialized backing store, in which case we
@@ -626,39 +630,15 @@ void TransitionArray::TransitionArrayVerify(Isolate* isolate) {
CHECK_LE(LengthFor(number_of_transitions()), length());
}
-void JSArgumentsObject::JSArgumentsObjectVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSArgumentsObjectVerify(*this, isolate);
- if (IsSloppyArgumentsElementsKind(GetElementsKind())) {
- SloppyArgumentsElements::cast(elements())
- .SloppyArgumentsElementsVerify(isolate, *this);
- }
- if (isolate->IsInAnyContext(map(), Context::SLOPPY_ARGUMENTS_MAP_INDEX) ||
- isolate->IsInAnyContext(map(),
- Context::SLOW_ALIASED_ARGUMENTS_MAP_INDEX) ||
- isolate->IsInAnyContext(map(),
- Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)) {
- VerifyObjectField(isolate, JSSloppyArgumentsObject::kLengthOffset);
- VerifyObjectField(isolate, JSSloppyArgumentsObject::kCalleeOffset);
- } else if (isolate->IsInAnyContext(map(),
- Context::STRICT_ARGUMENTS_MAP_INDEX)) {
- VerifyObjectField(isolate, JSStrictArgumentsObject::kLengthOffset);
- }
-}
-
-void SloppyArgumentsElements::SloppyArgumentsElementsVerify(Isolate* isolate,
- JSObject holder) {
- FixedArrayVerify(isolate);
- // Abort verification if only partially initialized (can't use arguments()
- // getter because it does FixedArray::cast()).
- if (get(kArgumentsIndex).IsUndefined(isolate)) return;
-
+namespace {
+void SloppyArgumentsElementsVerify(Isolate* isolate,
+ SloppyArgumentsElements elements,
+ JSObject holder) {
+ elements.SloppyArgumentsElementsVerify(isolate);
ElementsKind kind = holder.GetElementsKind();
bool is_fast = kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
- CHECK(IsFixedArray());
- CHECK_GE(length(), 2);
- CHECK_EQ(map(), ReadOnlyRoots(isolate).sloppy_arguments_elements_map());
- Context context_object = context();
- FixedArray arg_elements = FixedArray::cast(arguments());
+ Context context_object = elements.context();
+ FixedArray arg_elements = elements.arguments();
if (arg_elements.length() == 0) {
CHECK(arg_elements == ReadOnlyRoots(isolate).empty_fixed_array());
return;
@@ -674,7 +654,7 @@ void SloppyArgumentsElements::SloppyArgumentsElementsVerify(Isolate* isolate,
for (int i = 0; i < nofMappedParameters; i++) {
// Verify that each context-mapped argument is either the hole or a valid
// Smi within context length range.
- Object mapped = get_mapped_entry(i);
+ Object mapped = elements.mapped_entries(i);
if (mapped.IsTheHole(isolate)) {
// Slow sloppy arguments can be holey.
if (!is_fast) continue;
@@ -698,6 +678,26 @@ void SloppyArgumentsElements::SloppyArgumentsElementsVerify(Isolate* isolate,
CHECK_LE(maxMappedIndex, context_object.length());
CHECK_LE(maxMappedIndex, arg_elements.length());
}
+} // namespace
+
+void JSArgumentsObject::JSArgumentsObjectVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::JSArgumentsObjectVerify(*this, isolate);
+ if (IsSloppyArgumentsElementsKind(GetElementsKind())) {
+ SloppyArgumentsElementsVerify(
+ isolate, SloppyArgumentsElements::cast(elements()), *this);
+ }
+ if (isolate->IsInAnyContext(map(), Context::SLOPPY_ARGUMENTS_MAP_INDEX) ||
+ isolate->IsInAnyContext(map(),
+ Context::SLOW_ALIASED_ARGUMENTS_MAP_INDEX) ||
+ isolate->IsInAnyContext(map(),
+ Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)) {
+ VerifyObjectField(isolate, JSSloppyArgumentsObject::kLengthOffset);
+ VerifyObjectField(isolate, JSSloppyArgumentsObject::kCalleeOffset);
+ } else if (isolate->IsInAnyContext(map(),
+ Context::STRICT_ARGUMENTS_MAP_INDEX)) {
+ VerifyObjectField(isolate, JSStrictArgumentsObject::kLengthOffset);
+ }
+}
void JSAsyncFunctionObject::JSAsyncFunctionObjectVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSAsyncFunctionObjectVerify(*this, isolate);
@@ -926,6 +926,8 @@ void Oddball::OddballVerify(Isolate* isolate) {
} else if (map() == roots.self_reference_marker_map()) {
// Multiple instances of this oddball may exist at once.
CHECK_EQ(kind(), Oddball::kSelfReferenceMarker);
+ } else if (map() == roots.basic_block_counters_marker_map()) {
+ CHECK(*this == roots.basic_block_counters_marker());
} else {
UNREACHABLE();
}
@@ -1424,8 +1426,6 @@ void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionVerify(
USE_TORQUE_VERIFIER(AsmWasmData)
-USE_TORQUE_VERIFIER(WasmDebugInfo)
-
void WasmInstanceObject::WasmInstanceObjectVerify(Isolate* isolate) {
JSObjectVerify(isolate);
CHECK(IsWasmInstanceObject());
@@ -1532,8 +1532,6 @@ void NormalizedMapCache::NormalizedMapCacheVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(StackFrameInfo)
-
void PreparseData::PreparseDataVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::PreparseDataVerify(*this, isolate);
CHECK_LE(0, data_length());
@@ -1659,8 +1657,7 @@ void JSObject::SpillInformation::Print() {
PrintF("\n");
}
-bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
- if (valid_entries == -1) valid_entries = number_of_descriptors();
+bool DescriptorArray::IsSortedNoDuplicates() {
Name current_key;
uint32_t current = 0;
for (int i = 0; i < number_of_descriptors(); i++) {
@@ -1680,8 +1677,7 @@ bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
return true;
}
-bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
- DCHECK_EQ(valid_entries, -1);
+bool TransitionArray::IsSortedNoDuplicates() {
Name prev_key;
PropertyKind prev_kind = kData;
PropertyAttributes prev_attributes = NONE;
diff --git a/chromium/v8/src/diagnostics/objects-printer.cc b/chromium/v8/src/diagnostics/objects-printer.cc
index 00ef81f56a6..9e554978a24 100644
--- a/chromium/v8/src/diagnostics/objects-printer.cc
+++ b/chromium/v8/src/diagnostics/objects-printer.cc
@@ -24,7 +24,6 @@
#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
-#include "src/objects/js-aggregate-error-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/objects-inl.h"
@@ -98,19 +97,28 @@ void Object::Print(std::ostream& os) const { // NOLINT
}
}
-void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
- os << reinterpret_cast<void*>(ptr()) << ": [";
+namespace {
+
+void PrintHeapObjectHeaderWithoutMap(HeapObject object, std::ostream& os,
+ const char* id) { // NOLINT
+ os << reinterpret_cast<void*>(object.ptr()) << ": [";
if (id != nullptr) {
os << id;
} else {
- os << map().instance_type();
+ os << object.map().instance_type();
}
os << "]";
- if (ReadOnlyHeap::Contains(*this)) {
+ if (ReadOnlyHeap::Contains(object)) {
os << " in ReadOnlySpace";
- } else if (GetHeapFromWritableObject(*this)->InOldSpace(*this)) {
+ } else if (GetHeapFromWritableObject(object)->InOldSpace(object)) {
os << " in OldSpace";
}
+}
+
+} // namespace
+
+void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
+ PrintHeapObjectHeaderWithoutMap(*this, os, id);
if (!IsMap()) os << "\n - map: " << Brief(map());
}
@@ -436,11 +444,9 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
os << "\n 0: context: " << Brief(elements.context())
<< "\n 1: arguments_store: " << Brief(arguments_store)
<< "\n parameter to context slot map:";
- for (uint32_t i = 0; i < elements.parameter_map_length(); i++) {
- uint32_t raw_index = i + SloppyArgumentsElements::kParameterMapStart;
- Object mapped_entry = elements.get_mapped_entry(i);
- os << "\n " << raw_index << ": param(" << i
- << "): " << Brief(mapped_entry);
+ for (int i = 0; i < elements.length(); i++) {
+ Object mapped_entry = elements.mapped_entries(i);
+ os << "\n " << i << ": param(" << i << "): " << Brief(mapped_entry);
if (mapped_entry.IsTheHole()) {
os << " in the arguments_store[" << i << "]";
} else {
@@ -643,12 +649,6 @@ void JSGeneratorObject::JSGeneratorObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void JSAggregateError::JSAggregateErrorPrint(std::ostream& os) {
- JSObjectPrintHeader(os, *this, "JSAggregateError");
- os << "\n - errors: " << Brief(errors());
- JSObjectPrintBody(os, *this);
-}
-
void JSArray::JSArrayPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, *this, "JSArray");
os << "\n - length: " << Brief(this->length());
@@ -1005,7 +1005,13 @@ void FeedbackNexus::Print(std::ostream& os) { // NOLINT
}
void Oddball::OddballPrint(std::ostream& os) { // NOLINT
- to_string().Print(os);
+ PrintHeapObjectHeaderWithoutMap(*this, os, "Oddball");
+ os << ": ";
+ String s = to_string();
+ os << s.PrefixForDebugPrint();
+ s.PrintUC16(os);
+ os << s.SuffixForDebugPrint();
+ os << std::endl;
}
void JSAsyncFunctionObject::JSAsyncFunctionObjectPrint(
@@ -1055,34 +1061,11 @@ void JSMessageObject::JSMessageObjectPrint(std::ostream& os) { // NOLINT
}
void String::StringPrint(std::ostream& os) { // NOLINT
- if (!IsOneByteRepresentation()) {
- os << "u";
- }
- if (StringShape(*this).IsInternalized()) {
- os << "#";
- } else if (StringShape(*this).IsCons()) {
- os << "c\"";
- } else if (StringShape(*this).IsThin()) {
- os << ">\"";
- } else {
- os << "\"";
- }
-
- const char truncated_epilogue[] = "...<truncated>";
- int len = length();
- if (!FLAG_use_verbose_printer) {
- if (len > 100) {
- len = 100 - sizeof(truncated_epilogue);
- }
- }
- for (int i = 0; i < len; i++) {
- os << AsUC16(Get(i));
- }
- if (len != length()) {
- os << truncated_epilogue;
- }
-
- if (!StringShape(*this).IsInternalized()) os << "\"";
+ PrintHeapObjectHeaderWithoutMap(*this, os, "String");
+ os << ": ";
+ os << PrefixForDebugPrint();
+ PrintUC16(os, 0, length());
+ os << SuffixForDebugPrint();
}
void Name::NamePrint(std::ostream& os) { // NOLINT
@@ -1484,9 +1467,7 @@ void Code::CodePrint(std::ostream& os) { // NOLINT
PrintHeader(os, "Code");
os << "\n";
#ifdef ENABLE_DISASSEMBLER
- if (FLAG_use_verbose_printer) {
- Disassemble(nullptr, os, GetIsolate());
- }
+ Disassemble(nullptr, os, GetIsolate());
#endif
}
@@ -1692,14 +1673,12 @@ void WasmStruct::WasmStructPrint(std::ostream& os) { // NOLINT
case wasm::ValueType::kF64:
os << base::ReadUnalignedValue<double>(field_address);
break;
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
case wasm::ValueType::kS128:
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kFuncRef:
- case wasm::ValueType::kNullRef:
- case wasm::ValueType::kExnRef:
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
- case wasm::ValueType::kEqRef:
+ case wasm::ValueType::kRtt:
case wasm::ValueType::kBottom:
case wasm::ValueType::kStmt:
UNIMPLEMENTED(); // TODO(7748): Implement.
@@ -1733,14 +1712,12 @@ void WasmArray::WasmArrayPrint(std::ostream& os) { // NOLINT
PrintTypedArrayElements(os, reinterpret_cast<double*>(data_ptr), len,
true);
break;
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
case wasm::ValueType::kS128:
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kFuncRef:
- case wasm::ValueType::kNullRef:
- case wasm::ValueType::kExnRef:
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
- case wasm::ValueType::kEqRef:
+ case wasm::ValueType::kRtt:
case wasm::ValueType::kBottom:
case wasm::ValueType::kStmt:
UNIMPLEMENTED(); // TODO(7748): Implement.
@@ -1749,12 +1726,6 @@ void WasmArray::WasmArrayPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void WasmDebugInfo::WasmDebugInfoPrint(std::ostream& os) { // NOLINT
- PrintHeader(os, "WasmDebugInfo");
- os << "\n - wasm_instance: " << Brief(wasm_instance());
- os << "\n";
-}
-
void WasmExceptionTag::WasmExceptionTagPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "WasmExceptionTag");
os << "\n - index: " << index();
@@ -1779,9 +1750,6 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { // NOLINT
os << "\n - imported_mutable_globals_buffers: "
<< Brief(imported_mutable_globals_buffers());
}
- if (has_debug_info()) {
- os << "\n - debug_info: " << Brief(debug_info());
- }
for (int i = 0; i < tables().length(); i++) {
os << "\n - table " << i << ": " << Brief(tables().get(i));
}
@@ -1850,7 +1818,8 @@ void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) { // NOLINT
os << "\n - untagged_buffer: " << Brief(untagged_buffer());
os << "\n - tagged_buffer: " << Brief(tagged_buffer());
os << "\n - offset: " << offset();
- os << "\n - flags: " << flags();
+ os << "\n - raw_type: " << raw_type();
+ os << "\n - is_mutable: " << is_mutable();
os << "\n - type: " << type().kind();
os << "\n - is_mutable: " << is_mutable();
os << "\n";
@@ -1929,8 +1898,8 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(
std::ostream& os) { // NOLINT
PrintHeader(os, "FunctionTemplateInfo");
os << "\n - class name: " << Brief(class_name());
- os << "\n - tag: " << Brief(tag());
- os << "\n - serial_number: " << Brief(serial_number());
+ os << "\n - tag: " << tag();
+ os << "\n - serial_number: " << serial_number();
os << "\n - property_list: " << Brief(property_list());
os << "\n - call_code: " << Brief(call_code());
os << "\n - property_accessors: " << Brief(property_accessors());
@@ -1943,21 +1912,6 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(
os << "\n";
}
-void FunctionTemplateRareData::FunctionTemplateRareDataPrint(
- std::ostream& os) { // NOLINT
- PrintHeader(os, "FunctionTemplateRareData");
- os << "\n - prototype_template: " << Brief(prototype_template());
- os << "\n - prototype_provider_template: "
- << Brief(prototype_provider_template());
- os << "\n - parent_template: " << Brief(parent_template());
- os << "\n - named_property_handler: " << Brief(named_property_handler());
- os << "\n - indexed_property_handler: " << Brief(indexed_property_handler());
- os << "\n - instance_template: " << Brief(instance_template());
- os << "\n - instance_call_handler: " << Brief(instance_call_handler());
- os << "\n - access_check_info: " << Brief(access_check_info());
- os << "\n";
-}
-
void WasmCapiFunctionData::WasmCapiFunctionDataPrint(
std::ostream& os) { // NOLINT
PrintHeader(os, "WasmCapiFunctionData");
@@ -1984,8 +1938,8 @@ void WasmIndirectFunctionTable::WasmIndirectFunctionTablePrint(
void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "ObjectTemplateInfo");
- os << "\n - tag: " << Brief(tag());
- os << "\n - serial_number: " << Brief(serial_number());
+ os << "\n - tag: " << tag();
+ os << "\n - serial_number: " << serial_number();
os << "\n - property_list: " << Brief(property_list());
os << "\n - property_accessors: " << Brief(property_accessors());
os << "\n - constructor: " << Brief(constructor());
@@ -2205,8 +2159,8 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
if (HasOuterScopeInfo()) {
os << "\n - outer scope info: " << Brief(OuterScopeInfo());
}
- if (HasLocalsBlackList()) {
- os << "\n - locals blacklist: " << Brief(LocalsBlackList());
+ if (HasLocalsBlockList()) {
+ os << "\n - locals blocklist: " << Brief(LocalsBlockList());
}
if (HasFunctionName()) {
os << "\n - function name: " << Brief(FunctionName());
diff --git a/chromium/v8/src/diagnostics/ppc/disasm-ppc.cc b/chromium/v8/src/diagnostics/ppc/disasm-ppc.cc
index b682bb8c5ac..8394a05f89b 100644
--- a/chromium/v8/src/diagnostics/ppc/disasm-ppc.cc
+++ b/chromium/v8/src/diagnostics/ppc/disasm-ppc.cc
@@ -358,6 +358,17 @@ void Decoder::UnknownFormat(Instruction* instr, const char* name) {
}
void Decoder::DecodeExt0(Instruction* instr) {
+ // Some encodings are 5-0 bits, handle those first
+ switch (EXT0 | (instr->BitField(5, 0))) {
+ case VPERM: {
+ Format(instr, "vperm 'Dt, 'Da, 'Db, 'Dc");
+ return;
+ }
+ case VMLADDUHM: {
+ Format(instr, "vmladduhm 'Dt, 'Da, 'Db, 'Dc");
+ return;
+ }
+ }
switch (EXT0 | (instr->BitField(10, 0))) {
case VSPLTB: {
Format(instr, "vspltb 'Dt, 'Db, 'UIM");
@@ -379,6 +390,74 @@ void Decoder::DecodeExt0(Instruction* instr) {
Format(instr, "vor 'Dt, 'Da, 'Db");
break;
}
+ case VXOR: {
+ Format(instr, "vxor 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VNOR: {
+ Format(instr, "vnor 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VSLO: {
+ Format(instr, "vslo 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VADDUDM: {
+ Format(instr, "vaddudm 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VADDUWM: {
+ Format(instr, "vadduwm 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VADDUHM: {
+ Format(instr, "vadduhm 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VADDUBM: {
+ Format(instr, "vaddubm 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VADDFP: {
+ Format(instr, "vaddfp 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VSUBFP: {
+ Format(instr, "vsubfp 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VSUBUDM: {
+ Format(instr, "vsubudm 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VSUBUWM: {
+ Format(instr, "vsubuwm 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VSUBUHM: {
+ Format(instr, "vsubuhm 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VSUBUBM: {
+ Format(instr, "vsububm 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VMULUWM: {
+ Format(instr, "vmuluwm 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VPKUHUM: {
+ Format(instr, "vpkuhum 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VMULEUB: {
+ Format(instr, "vmuleub 'Dt, 'Da, 'Db");
+ break;
+ }
+ case VMULOUB: {
+ Format(instr, "vmuloub 'Dt, 'Da, 'Db");
+ break;
+ }
}
}
@@ -912,7 +991,7 @@ void Decoder::DecodeExt2(Instruction* instr) {
return;
}
case LVX: {
- Format(instr, "lvx 'Dt, 'ra, 'rb");
+ Format(instr, "lvx 'Dt, 'ra, 'rb");
return;
}
#if V8_TARGET_ARCH_PPC64
diff --git a/chromium/v8/src/diagnostics/x64/disasm-x64.cc b/chromium/v8/src/diagnostics/x64/disasm-x64.cc
index 4d0760b17c0..1ca13c1a3ed 100644
--- a/chromium/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/chromium/v8/src/diagnostics/x64/disasm-x64.cc
@@ -947,6 +947,16 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x08:
+ AppendToBuffer("vroundps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", *current++);
+ break;
+ case 0x09:
+ AppendToBuffer("vroundpd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", *current++);
+ break;
case 0x0A:
AppendToBuffer("vroundss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1840,6 +1850,18 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightOperand(current);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
+ } else if (third_byte == 0x08) {
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("roundps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", (*current) & 3);
+ current += 1;
+ } else if (third_byte == 0x09) {
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("roundpd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", (*current) & 3);
+ current += 1;
} else if (third_byte == 0x0A) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("roundss %s,", NameOfXMMRegister(regop));
@@ -2111,6 +2133,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "psllq";
} else if (opcode == 0xF4) {
mnemonic = "pmuludq";
+ } else if (opcode == 0xF5) {
+ mnemonic = "pmaddwd";
} else if (opcode == 0xF8) {
mnemonic = "psubb";
} else if (opcode == 0xF9) {
diff --git a/chromium/v8/src/execution/arm/simulator-arm.cc b/chromium/v8/src/execution/arm/simulator-arm.cc
index 019542b12d4..ddfc5650b56 100644
--- a/chromium/v8/src/execution/arm/simulator-arm.cc
+++ b/chromium/v8/src/execution/arm/simulator-arm.cc
@@ -1567,7 +1567,7 @@ using SimulatorRuntimeDirectGetterCall = void (*)(int32_t arg0, int32_t arg1);
using SimulatorRuntimeProfilingGetterCall = void (*)(int32_t arg0, int32_t arg1,
void* arg2);
-// Separate for fine-grained UBSan blacklisting. Casting any given C++
+// Separate for fine-grained UBSan blocklisting. Casting any given C++
// function to {SimulatorRuntimeCall} is undefined behavior; but since
// the target function can indeed be any function that's exposed via
// the "fast C call" mechanism, we can't reconstruct its signature here.
@@ -5375,7 +5375,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
} else {
UNIMPLEMENTED();
}
- } else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5) {
+ } else if (instr->Bits(19, 18) == 0x2 && instr->Bits(17, 16) == 0x3 &&
+ instr->Bits(11, 8) == 0x5) {
// vrecpe/vrsqrte.f32 Qd, Qm.
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
@@ -5442,6 +5443,39 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
UNIMPLEMENTED();
break;
}
+ } else if (instr->Bits(17, 16) == 0x2 && instr->Bit(10) == 1) {
+ // vrint<q>.<dt> <Dd>, <Dm>
+ // vrint<q>.<dt> <Qd>, <Qm>
+ // See F6.1.205
+ int regs = instr->Bit(6) + 1;
+ int rounding_mode = instr->Bits(9, 7);
+ float (*fproundint)(float) = nullptr;
+ switch (rounding_mode) {
+ case 3:
+ fproundint = &truncf;
+ break;
+ case 5:
+ fproundint = &floorf;
+ break;
+ case 7:
+ fproundint = &ceilf;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+
+ float floats[2];
+ for (int r = 0; r < regs; r++) {
+ // We cannot simply use GetVFPSingleValue since our Q registers
+ // might not map to any S registers at all.
+ get_neon_register<float, kDoubleSize>(vm + r, floats);
+ for (int e = 0; e < 2; e++) {
+ floats[e] = canonicalizeNaN(fproundint(floats[e]));
+ }
+ set_neon_register<float, kDoubleSize>(vd + r, floats);
+ }
} else {
UNIMPLEMENTED();
}
@@ -5658,12 +5692,12 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
int32_t address = get_register(Rn);
int regs = instr->Bit(5) + 1;
int size = instr->Bits(7, 6);
- uint32_t q_data[4];
+ uint32_t q_data[2];
switch (size) {
case Neon8: {
uint8_t data = ReadBU(address);
uint8_t* dst = reinterpret_cast<uint8_t*>(q_data);
- for (int i = 0; i < 16; i++) {
+ for (int i = 0; i < 8; i++) {
dst[i] = data;
}
break;
@@ -5671,21 +5705,21 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
case Neon16: {
uint16_t data = ReadHU(address);
uint16_t* dst = reinterpret_cast<uint16_t*>(q_data);
- for (int i = 0; i < 8; i++) {
+ for (int i = 0; i < 4; i++) {
dst[i] = data;
}
break;
}
case Neon32: {
uint32_t data = ReadW(address);
- for (int i = 0; i < 4; i++) {
+ for (int i = 0; i < 2; i++) {
q_data[i] = data;
}
break;
}
}
for (int r = 0; r < regs; r++) {
- set_neon_register(Vd + r, q_data);
+ set_neon_register<uint32_t, kDoubleSize>(Vd + r, q_data);
}
if (Rm != 15) {
if (Rm == 13) {
diff --git a/chromium/v8/src/execution/arm64/pointer-auth-arm64.cc b/chromium/v8/src/execution/arm64/pointer-auth-arm64.cc
index 36e792b752e..eaa88445ec2 100644
--- a/chromium/v8/src/execution/arm64/pointer-auth-arm64.cc
+++ b/chromium/v8/src/execution/arm64/pointer-auth-arm64.cc
@@ -10,8 +10,8 @@ namespace v8 {
namespace internal {
// Randomly generated example key for simulating only.
-const Simulator::PACKey Simulator::kPACKeyIA = {0xc31718727de20f71,
- 0xab9fd4e14b2fec51, 0};
+const Simulator::PACKey Simulator::kPACKeyIB = {0xeebb163b474e04c8,
+ 0x5267ac6fc280fb7c, 1};
namespace {
diff --git a/chromium/v8/src/execution/arm64/pointer-authentication-arm64.h b/chromium/v8/src/execution/arm64/pointer-authentication-arm64.h
index c54a59f29c7..e4bc476b3d0 100644
--- a/chromium/v8/src/execution/arm64/pointer-authentication-arm64.h
+++ b/chromium/v8/src/execution/arm64/pointer-authentication-arm64.h
@@ -10,11 +10,6 @@
#include "src/common/globals.h"
#include "src/execution/arm64/simulator-arm64.h"
-// TODO(v8:10026): Replace hints with instruction aliases, when supported.
-#define AUTIA1716 "hint #12"
-#define PACIA1716 "hint #8"
-#define XPACLRI "hint #7"
-
namespace v8 {
namespace internal {
@@ -31,13 +26,13 @@ V8_INLINE Address PointerAuthentication::AuthenticatePC(
uint64_t sp = reinterpret_cast<uint64_t>(pc_address) + offset_from_sp;
uint64_t pc = reinterpret_cast<uint64_t>(*pc_address);
#ifdef USE_SIMULATOR
- pc = Simulator::AuthPAC(pc, sp, Simulator::kPACKeyIA,
+ pc = Simulator::AuthPAC(pc, sp, Simulator::kPACKeyIB,
Simulator::kInstructionPointer);
#else
asm volatile(
" mov x17, %[pc]\n"
" mov x16, %[stack_ptr]\n"
- " " AUTIA1716 "\n"
+ " autib1716\n"
" ldr xzr, [x17]\n"
" mov %[pc], x17\n"
: [pc] "+r"(pc)
@@ -55,7 +50,7 @@ V8_INLINE Address PointerAuthentication::StripPAC(Address pc) {
asm volatile(
" mov x16, lr\n"
" mov lr, %[pc]\n"
- " " XPACLRI "\n"
+ " xpaclri\n"
" mov %[pc], lr\n"
" mov lr, x16\n"
: [pc] "+r"(pc)
@@ -68,13 +63,13 @@ V8_INLINE Address PointerAuthentication::StripPAC(Address pc) {
// Sign {pc} using {sp}.
V8_INLINE Address PointerAuthentication::SignPCWithSP(Address pc, Address sp) {
#ifdef USE_SIMULATOR
- return Simulator::AddPAC(pc, sp, Simulator::kPACKeyIA,
+ return Simulator::AddPAC(pc, sp, Simulator::kPACKeyIB,
Simulator::kInstructionPointer);
#else
asm volatile(
" mov x17, %[pc]\n"
" mov x16, %[sp]\n"
- " " PACIA1716 "\n"
+ " pacib1716\n"
" mov %[pc], x17\n"
: [pc] "+r"(pc)
: [sp] "r"(sp)
@@ -92,13 +87,13 @@ V8_INLINE void PointerAuthentication::ReplacePC(Address* pc_address,
uint64_t sp = reinterpret_cast<uint64_t>(pc_address) + offset_from_sp;
uint64_t old_pc = reinterpret_cast<uint64_t>(*pc_address);
#ifdef USE_SIMULATOR
- uint64_t auth_old_pc = Simulator::AuthPAC(old_pc, sp, Simulator::kPACKeyIA,
+ uint64_t auth_old_pc = Simulator::AuthPAC(old_pc, sp, Simulator::kPACKeyIB,
Simulator::kInstructionPointer);
uint64_t raw_old_pc =
Simulator::StripPAC(old_pc, Simulator::kInstructionPointer);
// Verify that the old address is authenticated.
CHECK_EQ(auth_old_pc, raw_old_pc);
- new_pc = Simulator::AddPAC(new_pc, sp, Simulator::kPACKeyIA,
+ new_pc = Simulator::AddPAC(new_pc, sp, Simulator::kPACKeyIB,
Simulator::kInstructionPointer);
#else
// Only store newly signed address after we have verified that the old
@@ -106,10 +101,10 @@ V8_INLINE void PointerAuthentication::ReplacePC(Address* pc_address,
asm volatile(
" mov x17, %[new_pc]\n"
" mov x16, %[sp]\n"
- " " PACIA1716 "\n"
+ " pacib1716\n"
" mov %[new_pc], x17\n"
" mov x17, %[old_pc]\n"
- " " AUTIA1716 "\n"
+ " autib1716\n"
" ldr xzr, [x17]\n"
: [new_pc] "+&r"(new_pc)
: [sp] "r"(sp), [old_pc] "r"(old_pc)
@@ -127,13 +122,13 @@ V8_INLINE void PointerAuthentication::ReplaceContext(Address* pc_address,
uint64_t new_pc;
#ifdef USE_SIMULATOR
uint64_t auth_pc =
- Simulator::AuthPAC(old_signed_pc, old_context, Simulator::kPACKeyIA,
+ Simulator::AuthPAC(old_signed_pc, old_context, Simulator::kPACKeyIB,
Simulator::kInstructionPointer);
uint64_t raw_pc =
Simulator::StripPAC(auth_pc, Simulator::kInstructionPointer);
// Verify that the old address is authenticated.
CHECK_EQ(raw_pc, auth_pc);
- new_pc = Simulator::AddPAC(raw_pc, new_context, Simulator::kPACKeyIA,
+ new_pc = Simulator::AddPAC(raw_pc, new_context, Simulator::kPACKeyIB,
Simulator::kInstructionPointer);
#else
// Only store newly signed address after we have verified that the old
@@ -141,13 +136,13 @@ V8_INLINE void PointerAuthentication::ReplaceContext(Address* pc_address,
asm volatile(
" mov x17, %[old_pc]\n"
" mov x16, %[old_ctx]\n"
- " " AUTIA1716 "\n"
+ " autib1716\n"
" mov x16, %[new_ctx]\n"
- " " PACIA1716 "\n"
+ " pacib1716\n"
" mov %[new_pc], x17\n"
" mov x17, %[old_pc]\n"
" mov x16, %[old_ctx]\n"
- " " AUTIA1716 "\n"
+ " autib1716\n"
" ldr xzr, [x17]\n"
: [new_pc] "=&r"(new_pc)
: [old_pc] "r"(old_signed_pc), [old_ctx] "r"(old_context),
diff --git a/chromium/v8/src/execution/arm64/simulator-arm64.cc b/chromium/v8/src/execution/arm64/simulator-arm64.cc
index adc856a6066..4d9205f0537 100644
--- a/chromium/v8/src/execution/arm64/simulator-arm64.cc
+++ b/chromium/v8/src/execution/arm64/simulator-arm64.cc
@@ -445,7 +445,7 @@ using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1);
using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1,
void* arg2);
-// Separate for fine-grained UBSan blacklisting. Casting any given C++
+// Separate for fine-grained UBSan blocklisting. Casting any given C++
// function to {SimulatorRuntimeCall} is undefined behavior; but since
// the target function can indeed be any function that's exposed via
// the "fast C call" mechanism, we can't reconstruct its signature here.
@@ -2756,6 +2756,9 @@ void Simulator::VisitFPIntegerConvert(Instruction* instr) {
case FCVTZU_xd:
set_xreg(dst, FPToUInt64(dreg(src), FPZero));
break;
+ case FJCVTZS:
+ set_wreg(dst, FPToFixedJS(dreg(src)));
+ break;
case FMOV_ws:
set_wreg(dst, sreg_bits(src));
break;
@@ -3125,8 +3128,8 @@ bool Simulator::FPProcessNaNs(Instruction* instr) {
// clang-format off
#define PAUTH_SYSTEM_MODES(V) \
- V(A1716, 17, xreg(16), kPACKeyIA) \
- V(ASP, 30, xreg(31, Reg31IsStackPointer), kPACKeyIA)
+ V(B1716, 17, xreg(16), kPACKeyIB) \
+ V(BSP, 30, xreg(31, Reg31IsStackPointer), kPACKeyIB)
// clang-format on
void Simulator::VisitSystem(Instruction* instr) {
@@ -3134,7 +3137,7 @@ void Simulator::VisitSystem(Instruction* instr) {
// range of immediates instead of indicating a different instruction. This
// makes the decoding tricky.
if (instr->Mask(SystemPAuthFMask) == SystemPAuthFixed) {
- // The BType check for PACIASP happens in CheckBType().
+ // The BType check for PACIBSP happens in CheckBType().
switch (instr->Mask(SystemPAuthMask)) {
#define DEFINE_PAUTH_FUNCS(SUFFIX, DST, MOD, KEY) \
case PACI##SUFFIX: \
diff --git a/chromium/v8/src/execution/arm64/simulator-arm64.h b/chromium/v8/src/execution/arm64/simulator-arm64.h
index cd4137c8e51..ee6d6341825 100644
--- a/chromium/v8/src/execution/arm64/simulator-arm64.h
+++ b/chromium/v8/src/execution/arm64/simulator-arm64.h
@@ -828,8 +828,8 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
void CheckBTypeForPAuth() {
DCHECK(pc_->IsPAuth());
Instr instr = pc_->Mask(SystemPAuthMask);
- // Only PACI[AB]SP allowed here, but we don't currently support PACIBSP.
- CHECK_EQ(instr, PACIASP);
+ // Only PACI[AB]SP allowed here, and we only support PACIBSP.
+ CHECK(instr == PACIBSP);
// Check BType allows PACI[AB]SP instructions.
switch (btype()) {
case BranchFromGuardedNotToIP:
@@ -837,7 +837,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
// here to be set. This makes PACI[AB]SP behave like "BTI c",
// disallowing its execution when BTYPE is BranchFromGuardedNotToIP
// (0b11).
- FATAL("Executing PACIASP with wrong BType.");
+ FATAL("Executing PACIBSP with wrong BType.");
case BranchFromUnguardedOrToIP:
case BranchAndLink:
break;
@@ -1397,7 +1397,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
int number;
};
- static const PACKey kPACKeyIA;
+ static const PACKey kPACKeyIB;
// Current implementation is that all pointers are tagged.
static bool HasTBI(uint64_t ptr, PointerType type) {
@@ -2179,6 +2179,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
int64_t FPToInt64(double value, FPRounding rmode);
uint32_t FPToUInt32(double value, FPRounding rmode);
uint64_t FPToUInt64(double value, FPRounding rmode);
+ int32_t FPToFixedJS(double value);
template <typename T>
T FPAdd(T op1, T op2);
diff --git a/chromium/v8/src/execution/arm64/simulator-logic-arm64.cc b/chromium/v8/src/execution/arm64/simulator-logic-arm64.cc
index d855c8b7084..db39408a49e 100644
--- a/chromium/v8/src/execution/arm64/simulator-logic-arm64.cc
+++ b/chromium/v8/src/execution/arm64/simulator-logic-arm64.cc
@@ -3342,6 +3342,65 @@ LogicVRegister Simulator::frsqrts(VectorFormat vform, LogicVRegister dst,
return dst;
}
+int32_t Simulator::FPToFixedJS(double value) {
+ // The Z-flag is set when the conversion from double precision floating-point
+ // to 32-bit integer is exact. If the source value is +/-Infinity, -0.0, NaN,
+ // outside the bounds of a 32-bit integer, or isn't an exact integer then the
+ // Z-flag is unset.
+ int Z = 1;
+ int32_t result;
+ if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
+ (value == kFP64NegativeInfinity)) {
+ // +/- zero and infinity all return zero, however -0 and +/- Infinity also
+ // unset the Z-flag.
+ result = 0.0;
+ if ((value != 0.0) || std::signbit(value)) {
+ Z = 0;
+ }
+ } else if (std::isnan(value)) {
+ // NaN values unset the Z-flag and set the result to 0.
+ result = 0;
+ Z = 0;
+ } else {
+ // All other values are converted to an integer representation, rounded
+ // toward zero.
+ double int_result = std::floor(value);
+ double error = value - int_result;
+ if ((error != 0.0) && (int_result < 0.0)) {
+ int_result++;
+ }
+ // Constrain the value into the range [INT32_MIN, INT32_MAX]. We can almost
+ // write a one-liner with std::round, but the behaviour on ties is incorrect
+ // for our purposes.
+ double mod_const = static_cast<double>(UINT64_C(1) << 32);
+ double mod_error =
+ (int_result / mod_const) - std::floor(int_result / mod_const);
+ double constrained;
+ if (mod_error == 0.5) {
+ constrained = INT32_MIN;
+ } else {
+ constrained = int_result - mod_const * round(int_result / mod_const);
+ }
+ DCHECK(std::floor(constrained) == constrained);
+ DCHECK(constrained >= INT32_MIN);
+ DCHECK(constrained <= INT32_MAX);
+ // Take the bottom 32 bits of the result as a 32-bit integer.
+ result = static_cast<int32_t>(constrained);
+ if ((int_result < INT32_MIN) || (int_result > INT32_MAX) ||
+ (error != 0.0)) {
+ // If the integer result is out of range or the conversion isn't exact,
+ // take exception and unset the Z-flag.
+ FPProcessException();
+ Z = 0;
+ }
+ }
+ nzcv().SetN(0);
+ nzcv().SetZ(Z);
+ nzcv().SetC(0);
+ nzcv().SetV(0);
+ return result;
+}
+
LogicVRegister Simulator::frsqrts(VectorFormat vform, LogicVRegister dst,
const LogicVRegister& src1,
const LogicVRegister& src2) {
diff --git a/chromium/v8/src/execution/frames-inl.h b/chromium/v8/src/execution/frames-inl.h
index ecd45abeb19..70db742a718 100644
--- a/chromium/v8/src/execution/frames-inl.h
+++ b/chromium/v8/src/execution/frames-inl.h
@@ -77,6 +77,10 @@ inline Address StackFrame::callee_pc() const {
inline Address StackFrame::pc() const { return ReadPC(pc_address()); }
+inline Address StackFrame::unauthenticated_pc() const {
+ return PointerAuthentication::StripPAC(*pc_address());
+}
+
inline Address StackFrame::ReadPC(Address* pc_address) {
return PointerAuthentication::AuthenticatePC(pc_address, kSystemPointerSize);
}
diff --git a/chromium/v8/src/execution/frames.cc b/chromium/v8/src/execution/frames.cc
index b6fc4cb7540..e714a514c8a 100644
--- a/chromium/v8/src/execution/frames.cc
+++ b/chromium/v8/src/execution/frames.cc
@@ -315,6 +315,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
// return address into the interpreter entry trampoline, then we are likely
// in a bytecode handler with elided frame. In that case, set the PC
// properly and make sure we do not drop the frame.
+ bool is_no_frame_bytecode_handler = false;
if (IsNoFrameBytecodeHandlerPc(isolate, pc, fp)) {
Address* tos_location = nullptr;
if (top_link_register_) {
@@ -326,6 +327,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
if (IsInterpreterFramePc(isolate, *tos_location, &state)) {
state.pc_address = tos_location;
+ is_no_frame_bytecode_handler = true;
advance_frame = false;
}
}
@@ -338,12 +340,12 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
StandardFrameConstants::kContextOffset);
Address frame_marker = fp + StandardFrameConstants::kFunctionOffset;
if (IsValidStackAddress(frame_marker)) {
- type = StackFrame::ComputeType(this, &state);
- top_frame_type_ = type;
- // We only keep the top frame if we believe it to be interpreted frame.
- if (type != StackFrame::INTERPRETED) {
- advance_frame = true;
+ if (is_no_frame_bytecode_handler) {
+ type = StackFrame::INTERPRETED;
+ } else {
+ type = StackFrame::ComputeType(this, &state);
}
+ top_frame_type_ = type;
MSAN_MEMORY_IS_INITIALIZED(
fp + CommonFrameConstants::kContextOrFrameTypeOffset,
kSystemPointerSize);
diff --git a/chromium/v8/src/execution/frames.h b/chromium/v8/src/execution/frames.h
index cd0156a8877..8186ab8641b 100644
--- a/chromium/v8/src/execution/frames.h
+++ b/chromium/v8/src/execution/frames.h
@@ -29,7 +29,6 @@ class RootVisitor;
class StackFrameIteratorBase;
class StringStream;
class ThreadLocalTop;
-class WasmDebugInfo;
class WasmInstanceObject;
class WasmModuleObject;
@@ -221,6 +220,11 @@ class StackFrame {
inline Address pc() const;
+ // Skip authentication of the PC, when using CFI. Used in the profiler, where
+ // in certain corner-cases we do not use an address on the stack, which would
+ // be signed, as the PC of the frame.
+ inline Address unauthenticated_pc() const;
+
Address constant_pool() const { return *constant_pool_address(); }
void set_constant_pool(Address constant_pool) {
*constant_pool_address() = constant_pool;
diff --git a/chromium/v8/src/execution/futex-emulation.cc b/chromium/v8/src/execution/futex-emulation.cc
index 3f815e24ca1..6804f473091 100644
--- a/chromium/v8/src/execution/futex-emulation.cc
+++ b/chromium/v8/src/execution/futex-emulation.cc
@@ -89,11 +89,11 @@ Object WaitJsTranslateReturn(Isolate* isolate, Object res) {
int val = Smi::ToInt(res);
switch (val) {
case WaitReturnValue::kOk:
- return ReadOnlyRoots(isolate).ok();
+ return ReadOnlyRoots(isolate).ok_string();
case WaitReturnValue::kNotEqual:
- return ReadOnlyRoots(isolate).not_equal();
+ return ReadOnlyRoots(isolate).not_equal_string();
case WaitReturnValue::kTimedOut:
- return ReadOnlyRoots(isolate).timed_out();
+ return ReadOnlyRoots(isolate).timed_out_string();
default:
UNREACHABLE();
}
@@ -193,8 +193,9 @@ Object FutexEmulation::Wait(Isolate* isolate,
do { // Not really a loop, just makes it easier to break out early.
base::MutexGuard lock_guard(mutex_.Pointer());
- void* backing_store = array_buffer->backing_store();
-
+ std::shared_ptr<BackingStore> backing_store =
+ array_buffer->GetBackingStore();
+ DCHECK(backing_store);
FutexWaitListNode* node = isolate->futex_wait_list_node();
node->backing_store_ = backing_store;
node->wait_addr_ = addr;
@@ -204,7 +205,8 @@ Object FutexEmulation::Wait(Isolate* isolate,
// still holding the lock).
ResetWaitingOnScopeExit reset_waiting(node);
- T* p = reinterpret_cast<T*>(static_cast<int8_t*>(backing_store) + addr);
+ T* p = reinterpret_cast<T*>(
+ static_cast<int8_t*>(backing_store->buffer_start()) + addr);
if (*p != value) {
result = handle(Smi::FromInt(WaitReturnValue::kNotEqual), isolate);
callback_result = AtomicsWaitEvent::kNotEqual;
@@ -308,13 +310,16 @@ Object FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
DCHECK_LT(addr, array_buffer->byte_length());
int waiters_woken = 0;
- void* backing_store = array_buffer->backing_store();
+ std::shared_ptr<BackingStore> backing_store = array_buffer->GetBackingStore();
base::MutexGuard lock_guard(mutex_.Pointer());
FutexWaitListNode* node = wait_list_.Pointer()->head_;
while (node && num_waiters_to_wake > 0) {
- if (backing_store == node->backing_store_ && addr == node->wait_addr_ &&
- node->waiting_) {
+ std::shared_ptr<BackingStore> node_backing_store =
+ node->backing_store_.lock();
+ DCHECK(node_backing_store);
+ if (backing_store.get() == node_backing_store.get() &&
+ addr == node->wait_addr_ && node->waiting_) {
node->waiting_ = false;
node->cond_.NotifyOne();
if (num_waiters_to_wake != kWakeAll) {
@@ -332,15 +337,18 @@ Object FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
Object FutexEmulation::NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer,
size_t addr) {
DCHECK_LT(addr, array_buffer->byte_length());
- void* backing_store = array_buffer->backing_store();
+ std::shared_ptr<BackingStore> backing_store = array_buffer->GetBackingStore();
base::MutexGuard lock_guard(mutex_.Pointer());
int waiters = 0;
FutexWaitListNode* node = wait_list_.Pointer()->head_;
while (node) {
- if (backing_store == node->backing_store_ && addr == node->wait_addr_ &&
- node->waiting_) {
+ std::shared_ptr<BackingStore> node_backing_store =
+ node->backing_store_.lock();
+ DCHECK(node_backing_store);
+ if (backing_store.get() == node_backing_store.get() &&
+ addr == node->wait_addr_ && node->waiting_) {
waiters++;
}
diff --git a/chromium/v8/src/execution/futex-emulation.h b/chromium/v8/src/execution/futex-emulation.h
index 2d005bcfd19..03ad310fd21 100644
--- a/chromium/v8/src/execution/futex-emulation.h
+++ b/chromium/v8/src/execution/futex-emulation.h
@@ -30,6 +30,7 @@ class TimeDelta;
namespace internal {
+class BackingStore;
template <typename T>
class Handle;
class Isolate;
@@ -52,7 +53,6 @@ class FutexWaitListNode {
FutexWaitListNode()
: prev_(nullptr),
next_(nullptr),
- backing_store_(nullptr),
wait_addr_(0),
waiting_(false),
interrupted_(false) {}
@@ -68,7 +68,7 @@ class FutexWaitListNode {
// prev_ and next_ are protected by FutexEmulation::mutex_.
FutexWaitListNode* prev_;
FutexWaitListNode* next_;
- void* backing_store_;
+ std::weak_ptr<BackingStore> backing_store_;
size_t wait_addr_;
// waiting_ and interrupted_ are protected by FutexEmulation::mutex_
// if this node is currently contained in FutexEmulation::wait_list_
@@ -126,20 +126,25 @@ class FutexEmulation : public AllStatic {
// Same as WaitJs above except it returns 0 (ok), 1 (not equal) and 2 (timed
// out) as expected by Wasm.
- static Object WaitWasm32(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
- size_t addr, int32_t value, int64_t rel_timeout_ns);
+ V8_EXPORT_PRIVATE static Object WaitWasm32(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int32_t value,
+ int64_t rel_timeout_ns);
// Same as Wait32 above except it checks for an int64_t value in the
// array_buffer.
- static Object WaitWasm64(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
- size_t addr, int64_t value, int64_t rel_timeout_ns);
+ V8_EXPORT_PRIVATE static Object WaitWasm64(Isolate* isolate,
+ Handle<JSArrayBuffer> array_buffer,
+ size_t addr, int64_t value,
+ int64_t rel_timeout_ns);
// Wake |num_waiters_to_wake| threads that are waiting on the given |addr|.
// |num_waiters_to_wake| can be kWakeAll, in which case all waiters are
// woken. The rest of the waiters will continue to wait. The return value is
// the number of woken waiters.
- static Object Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
- uint32_t num_waiters_to_wake);
+ V8_EXPORT_PRIVATE static Object Wake(Handle<JSArrayBuffer> array_buffer,
+ size_t addr,
+ uint32_t num_waiters_to_wake);
// Return the number of threads waiting on |addr|. Should only be used for
// testing.
diff --git a/chromium/v8/src/execution/isolate.cc b/chromium/v8/src/execution/isolate.cc
index bea08a16b83..98b98d5bea7 100644
--- a/chromium/v8/src/execution/isolate.cc
+++ b/chromium/v8/src/execution/isolate.cc
@@ -32,6 +32,7 @@
#include "src/debug/debug-frames.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
+#include "src/diagnostics/basic-block-profiler.h"
#include "src/diagnostics/compilation-statistics.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
@@ -588,21 +589,28 @@ class FrameArrayBuilder {
offset, flags, parameters);
}
- void AppendPromiseAllFrame(Handle<Context> context, int offset) {
+ void AppendPromiseCombinatorFrame(Handle<JSFunction> element_function,
+ Handle<JSFunction> combinator,
+ FrameArray::Flag combinator_flag,
+ Handle<Context> context) {
if (full()) return;
- int flags = FrameArray::kIsAsync | FrameArray::kIsPromiseAll;
+ int flags = FrameArray::kIsAsync | combinator_flag;
Handle<Context> native_context(context->native_context(), isolate_);
- Handle<JSFunction> function(native_context->promise_all(), isolate_);
- if (!IsVisibleInStackTrace(function)) return;
+ if (!IsVisibleInStackTrace(combinator)) return;
Handle<Object> receiver(native_context->promise_function(), isolate_);
- Handle<AbstractCode> code(AbstractCode::cast(function->code()), isolate_);
+ Handle<AbstractCode> code(AbstractCode::cast(combinator->code()), isolate_);
- // TODO(mmarchini) save Promises list from Promise.all()
+ // TODO(mmarchini) save Promises list from the Promise combinator
Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
- elements_ = FrameArray::AppendJSFrame(elements_, receiver, function, code,
+ // We store the offset of the promise into the element function's
+ // hash field for element callbacks.
+ int const offset =
+ Smi::ToInt(Smi::cast(element_function->GetIdentityHash())) - 1;
+
+ elements_ = FrameArray::AppendJSFrame(elements_, receiver, combinator, code,
offset, flags, parameters);
}
@@ -861,11 +869,10 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
Handle<JSFunction> function(JSFunction::cast(reaction->fulfill_handler()),
isolate);
Handle<Context> context(function->context(), isolate);
-
- // We store the offset of the promise into the {function}'s
- // hash field for promise resolve element callbacks.
- int const offset = Smi::ToInt(Smi::cast(function->GetIdentityHash())) - 1;
- builder->AppendPromiseAllFrame(context, offset);
+ Handle<JSFunction> combinator(context->native_context().promise_all(),
+ isolate);
+ builder->AppendPromiseCombinatorFrame(function, combinator,
+ FrameArray::kIsPromiseAll, context);
// Now peak into the Promise.all() resolve element context to
// find the promise capability that's being resolved when all
@@ -876,6 +883,24 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
PromiseCapability::cast(context->get(index)), isolate);
if (!capability->promise().IsJSPromise()) return;
promise = handle(JSPromise::cast(capability->promise()), isolate);
+ } else if (IsBuiltinFunction(isolate, reaction->reject_handler(),
+ Builtins::kPromiseAnyRejectElementClosure)) {
+ Handle<JSFunction> function(JSFunction::cast(reaction->reject_handler()),
+ isolate);
+ Handle<Context> context(function->context(), isolate);
+ Handle<JSFunction> combinator(context->native_context().promise_any(),
+ isolate);
+ builder->AppendPromiseCombinatorFrame(function, combinator,
+ FrameArray::kIsPromiseAny, context);
+
+ // Now peak into the Promise.any() reject element context to
+ // find the promise capability that's being resolved when any of
+ // the concurrent promises resolve.
+ int const index = PromiseBuiltins::kPromiseAnyRejectElementCapabilitySlot;
+ Handle<PromiseCapability> capability(
+ PromiseCapability::cast(context->get(index)), isolate);
+ if (!capability->promise().IsJSPromise()) return;
+ promise = handle(JSPromise::cast(capability->promise()), isolate);
} else if (IsBuiltinFunction(isolate, reaction->fulfill_handler(),
Builtins::kPromiseCapabilityDefaultResolve)) {
Handle<JSFunction> function(JSFunction::cast(reaction->fulfill_handler()),
@@ -2491,6 +2516,10 @@ void Isolate::SetCaptureStackTraceForUncaughtExceptions(
stack_trace_for_uncaught_exceptions_options_ = options;
}
+bool Isolate::get_capture_stack_trace_for_uncaught_exceptions() const {
+ return capture_stack_trace_for_uncaught_exceptions_;
+}
+
void Isolate::SetAbortOnUncaughtExceptionCallback(
v8::Isolate::AbortOnUncaughtExceptionCallback callback) {
abort_on_uncaught_exception_callback_ = callback;
@@ -2632,77 +2661,110 @@ void Isolate::ThreadDataTable::RemoveAllThreads() {
table_.clear();
}
-class VerboseAccountingAllocator : public AccountingAllocator {
+class TracingAccountingAllocator : public AccountingAllocator {
public:
- VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes)
- : heap_(heap), allocation_sample_bytes_(allocation_sample_bytes) {}
+ explicit TracingAccountingAllocator(Isolate* isolate) : isolate_(isolate) {}
- v8::internal::Segment* AllocateSegment(size_t size) override {
- v8::internal::Segment* memory = AccountingAllocator::AllocateSegment(size);
- if (!memory) return nullptr;
- size_t malloced_current = GetCurrentMemoryUsage();
+ protected:
+ void TraceAllocateSegmentImpl(v8::internal::Segment* segment) override {
+ base::MutexGuard lock(&mutex_);
+ UpdateMemoryTrafficAndReportMemoryUsage(segment->total_size());
+ }
- if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current) {
- PrintMemoryJSON(malloced_current);
- last_memory_usage_ = malloced_current;
- }
- return memory;
+ void TraceZoneCreationImpl(const Zone* zone) override {
+ base::MutexGuard lock(&mutex_);
+ active_zones_.insert(zone);
+ nesting_depth_++;
}
- void ReturnSegment(v8::internal::Segment* memory) override {
- AccountingAllocator::ReturnSegment(memory);
- size_t malloced_current = GetCurrentMemoryUsage();
+ void TraceZoneDestructionImpl(const Zone* zone) override {
+ base::MutexGuard lock(&mutex_);
+ UpdateMemoryTrafficAndReportMemoryUsage(zone->segment_bytes_allocated());
+ active_zones_.erase(zone);
+ nesting_depth_--;
+ }
- if (malloced_current + allocation_sample_bytes_ < last_memory_usage_) {
- PrintMemoryJSON(malloced_current);
- last_memory_usage_ = malloced_current;
+ private:
+ void UpdateMemoryTrafficAndReportMemoryUsage(size_t memory_traffic_delta) {
+ memory_traffic_since_last_report_ += memory_traffic_delta;
+ if (memory_traffic_since_last_report_ < FLAG_zone_stats_tolerance) return;
+ memory_traffic_since_last_report_ = 0;
+
+ Dump(buffer_, true);
+
+ {
+ std::string trace_str = buffer_.str();
+
+ if (FLAG_trace_zone_stats) {
+ PrintF(
+ "{"
+ "\"type\": \"v8-zone-trace\", "
+ "\"stats\": %s"
+ "}\n",
+ trace_str.c_str());
+ }
+ if (V8_UNLIKELY(
+ TracingFlags::zone_stats.load(std::memory_order_relaxed) &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.zone_stats"),
+ "V8.Zone_Stats", TRACE_EVENT_SCOPE_THREAD, "stats",
+ TRACE_STR_COPY(trace_str.c_str()));
+ }
}
- }
- void ZoneCreation(const Zone* zone) override {
- PrintZoneModificationSample(zone, "zonecreation");
- nesting_deepth_++;
+ // Clear the buffer.
+ buffer_.str(std::string());
}
- void ZoneDestruction(const Zone* zone) override {
- nesting_deepth_--;
- PrintZoneModificationSample(zone, "zonedestruction");
+ void Dump(std::ostringstream& out, bool dump_details) {
+ // Note: Neither isolate nor zones are locked, so be careful with accesses
+ // as the allocator is potentially used on a concurrent thread.
+ double time = isolate_->time_millis_since_init();
+ out << "{"
+ << "\"isolate\": \"" << reinterpret_cast<void*>(isolate_) << "\", "
+ << "\"time\": " << time << ", ";
+ size_t total_segment_bytes_allocated = 0;
+ size_t total_zone_allocation_size = 0;
+
+ if (dump_details) {
+ // Print detailed zone stats if memory usage changes direction.
+ out << "\"zones\": [";
+ bool first = true;
+ for (const Zone* zone : active_zones_) {
+ size_t zone_segment_bytes_allocated = zone->segment_bytes_allocated();
+ size_t zone_allocation_size = zone->allocation_size_for_tracing();
+ if (first) {
+ first = false;
+ } else {
+ out << ", ";
+ }
+ out << "{"
+ << "\"name\": \"" << zone->name() << "\", "
+ << "\"allocated\": " << zone_segment_bytes_allocated << ", "
+ << "\"used\": " << zone_allocation_size << "}";
+ total_segment_bytes_allocated += zone_segment_bytes_allocated;
+ total_zone_allocation_size += zone_allocation_size;
+ }
+ out << "], ";
+ } else {
+ // Just calculate total allocated/used memory values.
+ for (const Zone* zone : active_zones_) {
+ total_segment_bytes_allocated += zone->segment_bytes_allocated();
+ total_zone_allocation_size += zone->allocation_size_for_tracing();
+ }
+ }
+ out << "\"allocated\": " << total_segment_bytes_allocated << ", "
+ << "\"used\": " << total_zone_allocation_size << "}";
}
- private:
- void PrintZoneModificationSample(const Zone* zone, const char* type) {
- PrintF(
- "{"
- "\"type\": \"%s\", "
- "\"isolate\": \"%p\", "
- "\"time\": %f, "
- "\"ptr\": \"%p\", "
- "\"name\": \"%s\", "
- "\"size\": %zu,"
- "\"nesting\": %zu}\n",
- type, reinterpret_cast<void*>(heap_->isolate()),
- heap_->isolate()->time_millis_since_init(),
- reinterpret_cast<const void*>(zone), zone->name(),
- zone->allocation_size(), nesting_deepth_.load());
- }
-
- void PrintMemoryJSON(size_t malloced) {
- // Note: Neither isolate, nor heap is locked, so be careful with accesses
- // as the allocator is potentially used on a concurrent thread.
- double time = heap_->isolate()->time_millis_since_init();
- PrintF(
- "{"
- "\"type\": \"zone\", "
- "\"isolate\": \"%p\", "
- "\"time\": %f, "
- "\"allocated\": %zu}\n",
- reinterpret_cast<void*>(heap_->isolate()), time, malloced);
- }
-
- Heap* heap_;
- std::atomic<size_t> last_memory_usage_{0};
- std::atomic<size_t> nesting_deepth_{0};
- size_t allocation_sample_bytes_;
+ Isolate* const isolate_;
+ std::atomic<size_t> nesting_depth_{0};
+
+ base::Mutex mutex_;
+ std::unordered_set<const Zone*> active_zones_;
+ std::ostringstream buffer_;
+ // This value is increased on both allocations and deallocations.
+ size_t memory_traffic_since_last_report_ = 0;
};
#ifdef DEBUG
@@ -2781,9 +2843,7 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
: isolate_data_(this),
isolate_allocator_(std::move(isolate_allocator)),
id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
- allocator_(FLAG_trace_zone_stats
- ? new VerboseAccountingAllocator(&heap_, 256 * KB)
- : new AccountingAllocator()),
+ allocator_(new TracingAccountingAllocator(this)),
builtins_(this),
rail_mode_(PERFORMANCE_ANIMATION),
code_event_dispatcher_(new CodeEventDispatcher()),
@@ -3235,15 +3295,15 @@ void Isolate::AddCrashKeysForIsolateAndHeapPointers() {
AddressToString(isolate_address));
const uintptr_t ro_space_firstpage_address =
- reinterpret_cast<uintptr_t>(heap()->read_only_space()->first_page());
+ heap()->read_only_space()->FirstPageAddress();
add_crash_key_callback_(v8::CrashKeyId::kReadonlySpaceFirstPageAddress,
AddressToString(ro_space_firstpage_address));
const uintptr_t map_space_firstpage_address =
- reinterpret_cast<uintptr_t>(heap()->map_space()->first_page());
+ heap()->map_space()->FirstPageAddress();
add_crash_key_callback_(v8::CrashKeyId::kMapSpaceFirstPageAddress,
AddressToString(map_space_firstpage_address));
const uintptr_t code_space_firstpage_address =
- reinterpret_cast<uintptr_t>(heap()->code_space()->first_page());
+ heap()->code_space()->FirstPageAddress();
add_crash_key_callback_(v8::CrashKeyId::kCodeSpaceFirstPageAddress,
AddressToString(code_space_firstpage_address));
}
@@ -3616,6 +3676,11 @@ void Isolate::DumpAndResetStats() {
counters()->runtime_call_stats()->Print();
counters()->runtime_call_stats()->Reset();
}
+ if (BasicBlockProfiler::Get()->HasData(this)) {
+ StdoutStream out;
+ BasicBlockProfiler::Get()->Print(out, this);
+ BasicBlockProfiler::Get()->ResetCounts(this);
+ }
}
void Isolate::AbortConcurrentOptimization(BlockingBehavior behavior) {
@@ -4081,54 +4146,57 @@ void Isolate::RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
void Isolate::RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
Handle<JSPromise> promise) {
if (!async_event_delegate_) return;
- if (type == PromiseHookType::kResolve) return;
-
- if (type == PromiseHookType::kBefore) {
- if (!promise->async_task_id()) return;
- async_event_delegate_->AsyncEventOccurred(debug::kDebugWillHandle,
- promise->async_task_id(), false);
- } else if (type == PromiseHookType::kAfter) {
- if (!promise->async_task_id()) return;
- async_event_delegate_->AsyncEventOccurred(debug::kDebugDidHandle,
- promise->async_task_id(), false);
- } else {
- DCHECK(type == PromiseHookType::kInit);
- debug::DebugAsyncActionType type = debug::kDebugPromiseThen;
- bool last_frame_was_promise_builtin = false;
- JavaScriptFrameIterator it(this);
- while (!it.done()) {
- std::vector<Handle<SharedFunctionInfo>> infos;
- it.frame()->GetFunctions(&infos);
- for (size_t i = 1; i <= infos.size(); ++i) {
- Handle<SharedFunctionInfo> info = infos[infos.size() - i];
- if (info->IsUserJavaScript()) {
- // We should not report PromiseThen and PromiseCatch which is called
- // indirectly, e.g. Promise.all calls Promise.then internally.
- if (last_frame_was_promise_builtin) {
- if (!promise->async_task_id()) {
- promise->set_async_task_id(++async_task_count_);
+ switch (type) {
+ case PromiseHookType::kResolve:
+ return;
+ case PromiseHookType::kBefore:
+ if (!promise->async_task_id()) return;
+ async_event_delegate_->AsyncEventOccurred(
+ debug::kDebugWillHandle, promise->async_task_id(), false);
+ break;
+ case PromiseHookType::kAfter:
+ if (!promise->async_task_id()) return;
+ async_event_delegate_->AsyncEventOccurred(
+ debug::kDebugDidHandle, promise->async_task_id(), false);
+ break;
+ case PromiseHookType::kInit:
+ debug::DebugAsyncActionType type = debug::kDebugPromiseThen;
+ bool last_frame_was_promise_builtin = false;
+ JavaScriptFrameIterator it(this);
+ while (!it.done()) {
+ std::vector<Handle<SharedFunctionInfo>> infos;
+ it.frame()->GetFunctions(&infos);
+ for (size_t i = 1; i <= infos.size(); ++i) {
+ Handle<SharedFunctionInfo> info = infos[infos.size() - i];
+ if (info->IsUserJavaScript()) {
+ // We should not report PromiseThen and PromiseCatch which is called
+ // indirectly, e.g. Promise.all calls Promise.then internally.
+ if (last_frame_was_promise_builtin) {
+ if (!promise->async_task_id()) {
+ promise->set_async_task_id(++async_task_count_);
+ }
+ async_event_delegate_->AsyncEventOccurred(
+ type, promise->async_task_id(), debug()->IsBlackboxed(info));
}
- async_event_delegate_->AsyncEventOccurred(
- type, promise->async_task_id(), debug()->IsBlackboxed(info));
+ return;
}
- return;
- }
- last_frame_was_promise_builtin = false;
- if (info->HasBuiltinId()) {
- if (info->builtin_id() == Builtins::kPromisePrototypeThen) {
- type = debug::kDebugPromiseThen;
- last_frame_was_promise_builtin = true;
- } else if (info->builtin_id() == Builtins::kPromisePrototypeCatch) {
- type = debug::kDebugPromiseCatch;
- last_frame_was_promise_builtin = true;
- } else if (info->builtin_id() == Builtins::kPromisePrototypeFinally) {
- type = debug::kDebugPromiseFinally;
- last_frame_was_promise_builtin = true;
+ last_frame_was_promise_builtin = false;
+ if (info->HasBuiltinId()) {
+ if (info->builtin_id() == Builtins::kPromisePrototypeThen) {
+ type = debug::kDebugPromiseThen;
+ last_frame_was_promise_builtin = true;
+ } else if (info->builtin_id() == Builtins::kPromisePrototypeCatch) {
+ type = debug::kDebugPromiseCatch;
+ last_frame_was_promise_builtin = true;
+ } else if (info->builtin_id() ==
+ Builtins::kPromisePrototypeFinally) {
+ type = debug::kDebugPromiseFinally;
+ last_frame_was_promise_builtin = true;
+ }
}
}
+ it.Advance();
}
- it.Advance();
- }
}
}
@@ -4180,6 +4248,13 @@ void Isolate::CountUsage(v8::Isolate::UseCounterFeature feature) {
int Isolate::GetNextScriptId() { return heap()->NextScriptId(); }
+int Isolate::GetNextStackFrameInfoId() {
+ int id = last_stack_frame_info_id();
+ int next_id = id == Smi::kMaxValue ? 0 : (id + 1);
+ set_last_stack_frame_info_id(next_id);
+ return next_id;
+}
+
// static
std::string Isolate::GetTurboCfgFileName(Isolate* isolate) {
if (FLAG_trace_turbo_cfg_file == nullptr) {
diff --git a/chromium/v8/src/execution/isolate.h b/chromium/v8/src/execution/isolate.h
index de00d862a3b..bc13f53edaf 100644
--- a/chromium/v8/src/execution/isolate.h
+++ b/chromium/v8/src/execution/isolate.h
@@ -464,6 +464,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
public:
using HandleScopeType = HandleScope;
+ void* operator new(size_t) = delete;
+ void operator delete(void*) = delete;
// A thread has a PerIsolateThreadData instance for each isolate that it has
// entered. That instance is allocated when the isolate is initially entered
@@ -731,6 +733,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void SetCaptureStackTraceForUncaughtExceptions(
bool capture, int frame_limit, StackTrace::StackTraceOptions options);
+ bool get_capture_stack_trace_for_uncaught_exceptions() const;
void SetAbortOnUncaughtExceptionCallback(
v8::Isolate::AbortOnUncaughtExceptionCallback callback);
@@ -1291,6 +1294,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
int GetNextScriptId();
+ int GetNextStackFrameInfoId();
+
#if V8_SFI_HAS_UNIQUE_ID
int GetNextUniqueSharedFunctionInfoId() {
int current_id = next_unique_sfi_id_.load(std::memory_order_relaxed);
@@ -1860,8 +1865,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Delete new/delete operators to ensure that Isolate::New() and
// Isolate::Delete() are used for Isolate creation and deletion.
void* operator new(size_t, void* ptr) { return ptr; }
- void* operator new(size_t) = delete;
- void operator delete(void*) = delete;
friend class heap::HeapTester;
friend class TestSerializer;
diff --git a/chromium/v8/src/execution/local-isolate-wrapper-inl.h b/chromium/v8/src/execution/local-isolate-wrapper-inl.h
new file mode 100644
index 00000000000..2f573130491
--- /dev/null
+++ b/chromium/v8/src/execution/local-isolate-wrapper-inl.h
@@ -0,0 +1,148 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_LOCAL_ISOLATE_WRAPPER_INL_H_
+#define V8_EXECUTION_LOCAL_ISOLATE_WRAPPER_INL_H_
+
+#include "src/execution/isolate.h"
+#include "src/execution/local-isolate-wrapper.h"
+#include "src/execution/off-thread-isolate.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/off-thread-heap.h"
+#include "src/logging/log.h"
+#include "src/logging/off-thread-logger.h"
+
+namespace v8 {
+namespace internal {
+
+class HeapMethodCaller {
+ public:
+ explicit HeapMethodCaller(LocalHeapWrapper* heap) : heap_(heap) {}
+
+ ReadOnlySpace* read_only_space() {
+ return heap_->is_off_thread() ? heap_->off_thread()->read_only_space()
+ : heap_->main_thread()->read_only_space();
+ }
+
+ void OnAllocationEvent(HeapObject obj, int size) {
+ return heap_->is_off_thread()
+ ? heap_->off_thread()->OnAllocationEvent(obj, size)
+ : heap_->main_thread()->OnAllocationEvent(obj, size);
+ }
+
+ bool Contains(HeapObject obj) {
+ return heap_->is_off_thread() ? heap_->off_thread()->Contains(obj)
+ : heap_->main_thread()->Contains(obj);
+ }
+
+ private:
+ LocalHeapWrapper* heap_;
+};
+
+class LoggerMethodCaller {
+ public:
+ explicit LoggerMethodCaller(LocalLoggerWrapper* logger) : logger_(logger) {}
+
+ bool is_logging() const {
+ return logger_->is_off_thread() ? logger_->off_thread()->is_logging()
+ : logger_->main_thread()->is_logging();
+ }
+
+ void ScriptEvent(Logger::ScriptEventType type, int script_id) {
+ return logger_->is_off_thread()
+ ? logger_->off_thread()->ScriptEvent(type, script_id)
+ : logger_->main_thread()->ScriptEvent(type, script_id);
+ }
+ void ScriptDetails(Script script) {
+ return logger_->is_off_thread()
+ ? logger_->off_thread()->ScriptDetails(script)
+ : logger_->main_thread()->ScriptDetails(script);
+ }
+
+ private:
+ LocalLoggerWrapper* logger_;
+};
+
+class IsolateMethodCaller {
+ public:
+ explicit IsolateMethodCaller(LocalIsolateWrapper* isolate)
+ : isolate_(isolate) {}
+
+ LocalLoggerWrapper logger() {
+ return isolate_->is_off_thread()
+ ? LocalLoggerWrapper(isolate_->off_thread()->logger())
+ : LocalLoggerWrapper(isolate_->main_thread()->logger());
+ }
+
+ LocalHeapWrapper heap() {
+ return isolate_->is_off_thread()
+ ? LocalHeapWrapper(isolate_->off_thread()->heap())
+ : LocalHeapWrapper(isolate_->main_thread()->heap());
+ }
+
+ ReadOnlyHeap* read_only_heap() {
+ return isolate_->is_off_thread()
+ ? isolate_->off_thread()->read_only_heap()
+ : isolate_->main_thread()->read_only_heap();
+ }
+
+ Object root(RootIndex index) {
+ return isolate_->is_off_thread() ? isolate_->off_thread()->root(index)
+ : isolate_->main_thread()->root(index);
+ }
+
+ int GetNextScriptId() {
+ return isolate_->is_off_thread()
+ ? isolate_->off_thread()->GetNextScriptId()
+ : isolate_->main_thread()->GetNextScriptId();
+ }
+
+ private:
+ LocalIsolateWrapper* isolate_;
+};
+
+// Helper wrapper for HandleScope behaviour with a LocalIsolateWrapper.
+class LocalHandleScopeWrapper {
+ public:
+ explicit LocalHandleScopeWrapper(LocalIsolateWrapper local_isolate)
+ : is_off_thread_(local_isolate.is_off_thread()) {
+ if (is_off_thread_) {
+ new (off_thread()) OffThreadHandleScope(local_isolate.off_thread());
+ } else {
+ new (main_thread()) HandleScope(local_isolate.main_thread());
+ }
+ }
+ ~LocalHandleScopeWrapper() {
+ if (is_off_thread_) {
+ off_thread()->~OffThreadHandleScope();
+ } else {
+ main_thread()->~HandleScope();
+ }
+ }
+
+ template <typename T>
+ Handle<T> CloseAndEscape(Handle<T> handle) {
+ if (is_off_thread_) {
+ return off_thread()->CloseAndEscape(handle);
+ } else {
+ return main_thread()->CloseAndEscape(handle);
+ }
+ }
+
+ private:
+ HandleScope* main_thread() {
+ return reinterpret_cast<HandleScope*>(&scope_storage_);
+ }
+ OffThreadHandleScope* off_thread() {
+ return reinterpret_cast<OffThreadHandleScope*>(&scope_storage_);
+ }
+
+ std::aligned_union_t<0, HandleScope, OffThreadHandleScope> scope_storage_;
+ bool is_off_thread_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_LOCAL_ISOLATE_WRAPPER_INL_H_
diff --git a/chromium/v8/src/execution/local-isolate-wrapper.h b/chromium/v8/src/execution/local-isolate-wrapper.h
new file mode 100644
index 00000000000..8dbf0c23919
--- /dev/null
+++ b/chromium/v8/src/execution/local-isolate-wrapper.h
@@ -0,0 +1,85 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_LOCAL_ISOLATE_WRAPPER_H_
+#define V8_EXECUTION_LOCAL_ISOLATE_WRAPPER_H_
+
+#include "src/utils/pointer-with-payload.h"
+
+namespace v8 {
+namespace internal {
+
+// LocalWrapperBase is the base-class for wrapper classes around a main-thread
+// and off-thread type, e.g. Isolate and OffThreadIsolate, and a bit stating
+// which of the two the wrapper wraps.
+//
+// The shared methods are defined on MethodCaller, which will dispatch to the
+// right type depending on the state of the wrapper. The reason for a separate
+// MethodCaller is to
+//
+// a) Move the method definitions into an -inl.h so that this header can have
+// minimal dependencies, and
+// b) To allow the type methods to be called with operator-> (e.g.
+// isolate_wrapper->heap()), while forcing the wrapper methods to be called
+// with a dot (e.g. isolate_wrapper.is_main_thread()).
+template <typename MainThreadType, typename OffThreadType,
+ typename MethodCaller>
+class LocalWrapperBase {
+ public:
+ // Helper for returning a MethodCaller* by value from operator->.
+ class MethodCallerRef {
+ public:
+ MethodCaller* operator->() { return &caller_; }
+
+ private:
+ friend class LocalWrapperBase;
+ explicit MethodCallerRef(LocalWrapperBase* wrapper) : caller_(wrapper) {}
+
+ MethodCaller caller_;
+ };
+
+ explicit LocalWrapperBase(std::nullptr_t) : pointer_and_tag_(nullptr) {}
+ explicit LocalWrapperBase(MainThreadType* pointer)
+ : pointer_and_tag_(pointer, false) {}
+ explicit LocalWrapperBase(OffThreadType* pointer)
+ : pointer_and_tag_(pointer, true) {}
+
+ MainThreadType* main_thread() {
+ DCHECK(is_main_thread());
+ return static_cast<MainThreadType*>(
+ pointer_and_tag_.GetPointerWithKnownPayload(false));
+ }
+ OffThreadType* off_thread() {
+ DCHECK(is_off_thread());
+ return static_cast<OffThreadType*>(
+ pointer_and_tag_.GetPointerWithKnownPayload(true));
+ }
+
+ bool is_main_thread() const {
+ return !is_null() && !pointer_and_tag_.GetPayload();
+ }
+ bool is_off_thread() const {
+ return !is_null() && pointer_and_tag_.GetPayload();
+ }
+ bool is_null() const { return pointer_and_tag_.GetPointer() == nullptr; }
+
+ // Access the methods via wrapper->Method.
+ MethodCallerRef operator->() { return MethodCallerRef(this); }
+
+ private:
+ PointerWithPayload<void, bool, 1> pointer_and_tag_;
+};
+
+using LocalHeapWrapper =
+ LocalWrapperBase<class Heap, class OffThreadHeap, class HeapMethodCaller>;
+using LocalLoggerWrapper = LocalWrapperBase<class Logger, class OffThreadLogger,
+ class LoggerMethodCaller>;
+using LocalIsolateWrapper =
+ LocalWrapperBase<class Isolate, class OffThreadIsolate,
+ class IsolateMethodCaller>;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_LOCAL_ISOLATE_WRAPPER_H_
diff --git a/chromium/v8/src/execution/messages.cc b/chromium/v8/src/execution/messages.cc
index 33a2fa99ba6..86e3d48882d 100644
--- a/chromium/v8/src/execution/messages.cc
+++ b/chromium/v8/src/execution/messages.cc
@@ -311,18 +311,18 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
} // namespace
-Handle<Object> StackFrameBase::GetEvalOrigin() {
+Handle<PrimitiveHeapObject> StackFrameBase::GetEvalOrigin() {
if (!HasScript() || !IsEval()) return isolate_->factory()->undefined_value();
return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked();
}
-Handle<Object> StackFrameBase::GetWasmModuleName() {
+Handle<PrimitiveHeapObject> StackFrameBase::GetWasmModuleName() {
return isolate_->factory()->undefined_value();
}
int StackFrameBase::GetWasmFunctionIndex() { return StackFrameBase::kNone; }
-Handle<Object> StackFrameBase::GetWasmInstance() {
+Handle<HeapObject> StackFrameBase::GetWasmInstance() {
return isolate_->factory()->undefined_value();
}
@@ -351,6 +351,7 @@ void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
is_strict_ = (flags & FrameArray::kIsStrict) != 0;
is_async_ = (flags & FrameArray::kIsAsync) != 0;
is_promise_all_ = (flags & FrameArray::kIsPromiseAll) != 0;
+ is_promise_any_ = (flags & FrameArray::kIsPromiseAny) != 0;
}
JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
@@ -375,7 +376,7 @@ Handle<Object> JSStackFrame::GetFileName() {
return handle(GetScript()->name(), isolate_);
}
-Handle<Object> JSStackFrame::GetFunctionName() {
+Handle<PrimitiveHeapObject> JSStackFrame::GetFunctionName() {
Handle<String> result = JSFunction::GetDebugName(function_);
if (result->length() != 0) return result;
@@ -418,7 +419,7 @@ Handle<Object> JSStackFrame::GetScriptNameOrSourceUrl() {
return ScriptNameOrSourceUrl(GetScript(), isolate_);
}
-Handle<Object> JSStackFrame::GetMethodName() {
+Handle<PrimitiveHeapObject> JSStackFrame::GetMethodName() {
if (receiver_->IsNullOrUndefined(isolate_)) {
return isolate_->factory()->null_value();
}
@@ -452,7 +453,7 @@ Handle<Object> JSStackFrame::GetMethodName() {
}
HandleScope outer_scope(isolate_);
- Handle<Object> result;
+ Handle<PrimitiveHeapObject> result;
for (PrototypeIterator iter(isolate_, receiver, kStartAtReceiver);
!iter.IsAtEnd(); iter.Advance()) {
Handle<Object> current = PrototypeIterator::GetCurrent(iter);
@@ -478,7 +479,7 @@ Handle<Object> JSStackFrame::GetMethodName() {
return isolate_->factory()->null_value();
}
-Handle<Object> JSStackFrame::GetTypeName() {
+Handle<PrimitiveHeapObject> JSStackFrame::GetTypeName() {
// TODO(jgruber): Check for strict/constructor here as in
// CallSitePrototypeGetThis.
@@ -514,7 +515,7 @@ int JSStackFrame::GetColumnNumber() {
}
int JSStackFrame::GetPromiseIndex() const {
- return is_promise_all_ ? offset_ : kNone;
+ return (is_promise_all_ || is_promise_any_) ? offset_ : kNone;
}
bool JSStackFrame::IsNative() {
@@ -564,8 +565,8 @@ Handle<Object> WasmStackFrame::GetFunction() const {
return handle(Smi::FromInt(wasm_func_index_), isolate_);
}
-Handle<Object> WasmStackFrame::GetFunctionName() {
- Handle<Object> name;
+Handle<PrimitiveHeapObject> WasmStackFrame::GetFunctionName() {
+ Handle<PrimitiveHeapObject> name;
Handle<WasmModuleObject> module_object(wasm_instance_->module_object(),
isolate_);
if (!WasmModuleObject::GetFunctionNameOrNull(isolate_, module_object,
@@ -582,8 +583,8 @@ Handle<Object> WasmStackFrame::GetScriptNameOrSourceUrl() {
return ScriptNameOrSourceUrl(script, isolate_);
}
-Handle<Object> WasmStackFrame::GetWasmModuleName() {
- Handle<Object> module_name;
+Handle<PrimitiveHeapObject> WasmStackFrame::GetWasmModuleName() {
+ Handle<PrimitiveHeapObject> module_name;
Handle<WasmModuleObject> module_object(wasm_instance_->module_object(),
isolate_);
if (!WasmModuleObject::GetModuleNameOrNull(isolate_, module_object)
@@ -593,7 +594,7 @@ Handle<Object> WasmStackFrame::GetWasmModuleName() {
return module_name;
}
-Handle<Object> WasmStackFrame::GetWasmInstance() { return wasm_instance_; }
+Handle<HeapObject> WasmStackFrame::GetWasmInstance() { return wasm_instance_; }
int WasmStackFrame::GetPosition() const {
return IsInterpreted() ? offset_ : code_->GetSourcePositionBefore(offset_);
@@ -607,7 +608,9 @@ int WasmStackFrame::GetModuleOffset() const {
return function_offset + GetPosition();
}
-Handle<Object> WasmStackFrame::Null() const {
+Handle<Object> WasmStackFrame::GetFileName() { return Null(); }
+
+Handle<PrimitiveHeapObject> WasmStackFrame::Null() const {
return isolate_->factory()->null_value();
}
@@ -1258,14 +1261,13 @@ Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object,
isolate, *location->shared());
UnoptimizedCompileState compile_state(isolate);
ParseInfo info(isolate, flags, &compile_state);
- if (parsing::ParseAny(&info, location->shared(), isolate)) {
+ if (parsing::ParseAny(&info, location->shared(), isolate,
+ parsing::ReportStatisticsMode::kNo)) {
info.ast_value_factory()->Internalize(isolate);
CallPrinter printer(isolate, location->shared()->IsUserJavaScript());
Handle<String> str = printer.Print(info.literal(), location->start_pos());
*hint = printer.GetErrorHint();
if (str->length() > 0) return str;
- } else {
- isolate->clear_pending_exception();
}
}
return BuildDefaultCallSite(isolate, object);
@@ -1319,7 +1321,8 @@ Object ErrorUtils::ThrowSpreadArgError(Isolate* isolate, MessageTemplate id,
isolate, *location.shared());
UnoptimizedCompileState compile_state(isolate);
ParseInfo info(isolate, flags, &compile_state);
- if (parsing::ParseAny(&info, location.shared(), isolate)) {
+ if (parsing::ParseAny(&info, location.shared(), isolate,
+ parsing::ReportStatisticsMode::kNo)) {
info.ast_value_factory()->Internalize(isolate);
CallPrinter printer(isolate, location.shared()->IsUserJavaScript(),
CallPrinter::SpreadErrorInArgsHint::kErrorInArgs);
@@ -1334,7 +1337,6 @@ Object ErrorUtils::ThrowSpreadArgError(Isolate* isolate, MessageTemplate id,
MessageLocation(location.script(), pos, pos + 1, location.shared());
}
} else {
- isolate->clear_pending_exception();
callsite = BuildDefaultCallSite(isolate, object);
}
}
@@ -1396,7 +1398,8 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
isolate, *location.shared());
UnoptimizedCompileState compile_state(isolate);
ParseInfo info(isolate, flags, &compile_state);
- if (parsing::ParseAny(&info, location.shared(), isolate)) {
+ if (parsing::ParseAny(&info, location.shared(), isolate,
+ parsing::ReportStatisticsMode::kNo)) {
info.ast_value_factory()->Internalize(isolate);
CallPrinter printer(isolate, location.shared()->IsUserJavaScript());
Handle<String> str = printer.Print(info.literal(), location.start_pos());
@@ -1431,8 +1434,6 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
}
if (str->length() > 0) callsite = str;
- } else {
- isolate->clear_pending_exception();
}
}
diff --git a/chromium/v8/src/execution/messages.h b/chromium/v8/src/execution/messages.h
index 963796c7fe6..1fe79031766 100644
--- a/chromium/v8/src/execution/messages.h
+++ b/chromium/v8/src/execution/messages.h
@@ -27,6 +27,7 @@ class AbstractCode;
class FrameArray;
class JSMessageObject;
class LookupIterator;
+class PrimitiveHeapObject;
class SharedFunctionInfo;
class SourceInfo;
class WasmInstanceObject;
@@ -67,13 +68,13 @@ class StackFrameBase {
virtual Handle<Object> GetFunction() const = 0;
virtual Handle<Object> GetFileName() = 0;
- virtual Handle<Object> GetFunctionName() = 0;
+ virtual Handle<PrimitiveHeapObject> GetFunctionName() = 0;
virtual Handle<Object> GetScriptNameOrSourceUrl() = 0;
- virtual Handle<Object> GetMethodName() = 0;
- virtual Handle<Object> GetTypeName() = 0;
- virtual Handle<Object> GetEvalOrigin();
- virtual Handle<Object> GetWasmModuleName();
- virtual Handle<Object> GetWasmInstance();
+ virtual Handle<PrimitiveHeapObject> GetMethodName() = 0;
+ virtual Handle<PrimitiveHeapObject> GetTypeName() = 0;
+ virtual Handle<PrimitiveHeapObject> GetEvalOrigin();
+ virtual Handle<PrimitiveHeapObject> GetWasmModuleName();
+ virtual Handle<HeapObject> GetWasmInstance();
// Returns the script ID if one is attached, -1 otherwise.
int GetScriptId() const;
@@ -86,7 +87,8 @@ class StackFrameBase {
// Return 0-based Wasm function index. Returns -1 for non-Wasm frames.
virtual int GetWasmFunctionIndex();
- // Returns index for Promise.all() async frames, or -1 for other frames.
+ // Returns the index of the rejected promise in the Promise combinator input,
+ // or -1 if this frame is not a Promise combinator frame.
virtual int GetPromiseIndex() const = 0;
virtual bool IsNative() = 0;
@@ -94,6 +96,7 @@ class StackFrameBase {
virtual bool IsEval();
virtual bool IsAsync() const = 0;
virtual bool IsPromiseAll() const = 0;
+ virtual bool IsPromiseAny() const = 0;
virtual bool IsConstructor() = 0;
virtual bool IsStrict() const = 0;
@@ -121,10 +124,10 @@ class JSStackFrame : public StackFrameBase {
Handle<Object> GetFunction() const override;
Handle<Object> GetFileName() override;
- Handle<Object> GetFunctionName() override;
+ Handle<PrimitiveHeapObject> GetFunctionName() override;
Handle<Object> GetScriptNameOrSourceUrl() override;
- Handle<Object> GetMethodName() override;
- Handle<Object> GetTypeName() override;
+ Handle<PrimitiveHeapObject> GetMethodName() override;
+ Handle<PrimitiveHeapObject> GetTypeName() override;
int GetPosition() const override;
int GetLineNumber() override;
@@ -136,6 +139,7 @@ class JSStackFrame : public StackFrameBase {
bool IsToplevel() override;
bool IsAsync() const override { return is_async_; }
bool IsPromiseAll() const override { return is_promise_all_; }
+ bool IsPromiseAny() const override { return is_promise_any_; }
bool IsConstructor() override { return is_constructor_; }
bool IsStrict() const override { return is_strict_; }
@@ -155,6 +159,7 @@ class JSStackFrame : public StackFrameBase {
bool is_async_ : 1;
bool is_constructor_ : 1;
bool is_promise_all_ : 1;
+ bool is_promise_any_ : 1;
bool is_strict_ : 1;
friend class FrameArrayIterator;
@@ -167,13 +172,13 @@ class WasmStackFrame : public StackFrameBase {
Handle<Object> GetReceiver() const override;
Handle<Object> GetFunction() const override;
- Handle<Object> GetFileName() override { return Null(); }
- Handle<Object> GetFunctionName() override;
+ Handle<Object> GetFileName() override;
+ Handle<PrimitiveHeapObject> GetFunctionName() override;
Handle<Object> GetScriptNameOrSourceUrl() override;
- Handle<Object> GetMethodName() override { return Null(); }
- Handle<Object> GetTypeName() override { return Null(); }
- Handle<Object> GetWasmModuleName() override;
- Handle<Object> GetWasmInstance() override;
+ Handle<PrimitiveHeapObject> GetMethodName() override { return Null(); }
+ Handle<PrimitiveHeapObject> GetTypeName() override { return Null(); }
+ Handle<PrimitiveHeapObject> GetWasmModuleName() override;
+ Handle<HeapObject> GetWasmInstance() override;
int GetPosition() const override;
int GetLineNumber() override { return 0; }
@@ -186,12 +191,13 @@ class WasmStackFrame : public StackFrameBase {
bool IsToplevel() override { return false; }
bool IsAsync() const override { return false; }
bool IsPromiseAll() const override { return false; }
+ bool IsPromiseAny() const override { return false; }
bool IsConstructor() override { return false; }
bool IsStrict() const override { return false; }
bool IsInterpreted() const { return code_ == nullptr; }
protected:
- Handle<Object> Null() const;
+ Handle<PrimitiveHeapObject> Null() const;
bool HasScript() const override;
Handle<Script> GetScript() const override;
@@ -308,7 +314,7 @@ class ErrorUtils : public AllStatic {
class MessageFormatter {
public:
- static const char* TemplateString(MessageTemplate index);
+ V8_EXPORT_PRIVATE static const char* TemplateString(MessageTemplate index);
V8_EXPORT_PRIVATE static MaybeHandle<String> Format(Isolate* isolate,
MessageTemplate index,
diff --git a/chromium/v8/src/execution/microtask-queue.h b/chromium/v8/src/execution/microtask-queue.h
index 4ce1498279c..82840c2bed5 100644
--- a/chromium/v8/src/execution/microtask-queue.h
+++ b/chromium/v8/src/execution/microtask-queue.h
@@ -26,7 +26,7 @@ class V8_EXPORT_PRIVATE MicrotaskQueue final : public v8::MicrotaskQueue {
static void SetUpDefaultMicrotaskQueue(Isolate* isolate);
static std::unique_ptr<MicrotaskQueue> New(Isolate* isolate);
- ~MicrotaskQueue();
+ ~MicrotaskQueue() override;
// Uses raw Address values because it's called via ExternalReference.
// {raw_microtask} is a tagged Microtask pointer.
diff --git a/chromium/v8/src/execution/off-thread-isolate-inl.h b/chromium/v8/src/execution/off-thread-isolate-inl.h
index 13dfebd47f5..9e82ad9b73b 100644
--- a/chromium/v8/src/execution/off-thread-isolate-inl.h
+++ b/chromium/v8/src/execution/off-thread-isolate-inl.h
@@ -15,6 +15,14 @@ namespace internal {
Address OffThreadIsolate::isolate_root() const {
return isolate_->isolate_root();
}
+ReadOnlyHeap* OffThreadIsolate::read_only_heap() {
+ return isolate_->read_only_heap();
+}
+
+Object OffThreadIsolate::root(RootIndex index) {
+ DCHECK(RootsTable::IsImmortalImmovable(index));
+ return isolate_->root(index);
+}
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/execution/off-thread-isolate.cc b/chromium/v8/src/execution/off-thread-isolate.cc
index 3a4c39052f2..08675493723 100644
--- a/chromium/v8/src/execution/off-thread-isolate.cc
+++ b/chromium/v8/src/execution/off-thread-isolate.cc
@@ -7,88 +7,12 @@
#include "src/execution/isolate.h"
#include "src/execution/thread-id.h"
#include "src/handles/handles-inl.h"
+#include "src/handles/off-thread-transfer-handle-storage-inl.h"
#include "src/logging/off-thread-logger.h"
namespace v8 {
namespace internal {
-class OffThreadTransferHandleStorage {
- public:
- enum State { kOffThreadHandle, kRawObject, kHandle };
-
- explicit OffThreadTransferHandleStorage(
- Address* off_thread_handle_location,
- std::unique_ptr<OffThreadTransferHandleStorage> next)
- : handle_location_(off_thread_handle_location),
- next_(std::move(next)),
- state_(kOffThreadHandle) {
- CheckValid();
- }
-
- void ConvertFromOffThreadHandleOnFinish() {
- CheckValid();
- DCHECK_EQ(state_, kOffThreadHandle);
- raw_obj_ptr_ = *handle_location_;
- state_ = kRawObject;
- CheckValid();
- }
-
- void ConvertToHandleOnPublish(Isolate* isolate) {
- CheckValid();
- DCHECK_EQ(state_, kRawObject);
- handle_location_ = handle(Object(raw_obj_ptr_), isolate).location();
- state_ = kHandle;
- CheckValid();
- }
-
- Address* handle_location() const {
- DCHECK_EQ(state_, kHandle);
- DCHECK(
- Object(*handle_location_).IsSmi() ||
- !Heap::InOffThreadSpace(HeapObject::cast(Object(*handle_location_))));
- return handle_location_;
- }
-
- OffThreadTransferHandleStorage* next() { return next_.get(); }
-
- State state() const { return state_; }
-
- private:
- void CheckValid() {
-#ifdef DEBUG
- Object obj;
-
- switch (state_) {
- case kHandle:
- case kOffThreadHandle:
- DCHECK_NOT_NULL(handle_location_);
- obj = Object(*handle_location_);
- break;
- case kRawObject:
- obj = Object(raw_obj_ptr_);
- break;
- }
-
- // Smis are always fine.
- if (obj.IsSmi()) return;
-
- // The object that is not yet in a main-thread handle should be in
- // off-thread space. Main-thread handles can still point to off-thread space
- // during Publish, so that invariant is taken care of on main-thread handle
- // access.
- DCHECK_IMPLIES(state_ != kHandle,
- Heap::InOffThreadSpace(HeapObject::cast(obj)));
-#endif
- }
-
- union {
- Address* handle_location_;
- Address raw_obj_ptr_;
- };
- std::unique_ptr<OffThreadTransferHandleStorage> next_;
- State state_;
-};
-
Address* OffThreadTransferHandleBase::ToHandleLocation() const {
return storage_ == nullptr ? nullptr : storage_->handle_location();
}
@@ -98,32 +22,16 @@ OffThreadIsolate::OffThreadIsolate(Isolate* isolate, Zone* zone)
heap_(isolate->heap()),
isolate_(isolate),
logger_(new OffThreadLogger()),
- handle_zone_(zone),
- off_thread_transfer_handles_head_(nullptr) {}
+ handle_zone_(zone) {}
OffThreadIsolate::~OffThreadIsolate() = default;
void OffThreadIsolate::FinishOffThread() {
heap()->FinishOffThread();
-
- OffThreadTransferHandleStorage* storage =
- off_thread_transfer_handles_head_.get();
- while (storage != nullptr) {
- storage->ConvertFromOffThreadHandleOnFinish();
- storage = storage->next();
- }
-
handle_zone_ = nullptr;
}
void OffThreadIsolate::Publish(Isolate* isolate) {
- OffThreadTransferHandleStorage* storage =
- off_thread_transfer_handles_head_.get();
- while (storage != nullptr) {
- storage->ConvertToHandleOnPublish(isolate);
- storage = storage->next();
- }
-
heap()->Publish(isolate->heap());
}
@@ -145,16 +53,5 @@ void OffThreadIsolate::PinToCurrentThread() {
thread_id_ = ThreadId::Current();
}
-OffThreadTransferHandleStorage* OffThreadIsolate::AddTransferHandleStorage(
- HandleBase handle) {
- DCHECK_IMPLIES(off_thread_transfer_handles_head_ != nullptr,
- off_thread_transfer_handles_head_->state() ==
- OffThreadTransferHandleStorage::kOffThreadHandle);
- off_thread_transfer_handles_head_ =
- std::make_unique<OffThreadTransferHandleStorage>(
- handle.location(), std::move(off_thread_transfer_handles_head_));
- return off_thread_transfer_handles_head_.get();
-}
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/execution/off-thread-isolate.h b/chromium/v8/src/execution/off-thread-isolate.h
index 80fea9bc4c9..e5217ef3bf7 100644
--- a/chromium/v8/src/execution/off-thread-isolate.h
+++ b/chromium/v8/src/execution/off-thread-isolate.h
@@ -86,6 +86,8 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final
OffThreadHeap* heap() { return &heap_; }
inline Address isolate_root() const;
+ inline ReadOnlyHeap* read_only_heap();
+ inline Object root(RootIndex index);
v8::internal::OffThreadFactory* factory() {
// Upcast to the privately inherited base-class using c-style casts to avoid
@@ -129,7 +131,7 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final
if (handle.is_null()) {
return OffThreadTransferHandle<T>();
}
- return OffThreadTransferHandle<T>(AddTransferHandleStorage(handle));
+ return OffThreadTransferHandle<T>(heap()->AddTransferHandleStorage(handle));
}
template <typename T>
@@ -139,7 +141,8 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final
if (!maybe_handle.ToHandle(&handle)) {
return OffThreadTransferMaybeHandle<T>();
}
- return OffThreadTransferMaybeHandle<T>(AddTransferHandleStorage(handle));
+ return OffThreadTransferMaybeHandle<T>(
+ heap()->AddTransferHandleStorage(handle));
}
int GetNextScriptId();
@@ -157,8 +160,6 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final
private:
friend class v8::internal::OffThreadFactory;
- OffThreadTransferHandleStorage* AddTransferHandleStorage(HandleBase handle);
-
OffThreadHeap heap_;
// TODO(leszeks): Extract out the fields of the Isolate we want and store
@@ -168,8 +169,6 @@ class V8_EXPORT_PRIVATE OffThreadIsolate final
std::unique_ptr<OffThreadLogger> logger_;
ThreadId thread_id_;
Zone* handle_zone_;
- std::unique_ptr<OffThreadTransferHandleStorage>
- off_thread_transfer_handles_head_;
};
} // namespace internal
diff --git a/chromium/v8/src/execution/s390/simulator-s390.cc b/chromium/v8/src/execution/s390/simulator-s390.cc
index f41288f6a96..85688f861ce 100644
--- a/chromium/v8/src/execution/s390/simulator-s390.cc
+++ b/chromium/v8/src/execution/s390/simulator-s390.cc
@@ -785,9 +785,10 @@ void Simulator::EvalTableInit() {
V(vlc, VLC, 0xE7DE) /* type = VRR_A VECTOR LOAD COMPLEMENT */ \
V(vsel, VSEL, 0xE78D) /* type = VRR_E VECTOR SELECT */ \
V(vperm, VPERM, 0xE78C) /* type = VRR_E VECTOR PERMUTE */ \
- V(vtm, VTM, 0xE7D8) /* type = VRR_A VECTOR TEST UNDER MASK */ \
- V(vesl, VESL, 0xE730) /* type = VRS_A VECTOR ELEMENT SHIFT LEFT */ \
- V(veslv, VESLV, 0xE770) /* type = VRR_C VECTOR ELEMENT SHIFT LEFT */ \
+ V(vbperm, VBPERM, 0xE785) /* type = VRR_C VECTOR BIT PERMUTE */ \
+ V(vtm, VTM, 0xE7D8) /* type = VRR_A VECTOR TEST UNDER MASK */ \
+ V(vesl, VESL, 0xE730) /* type = VRS_A VECTOR ELEMENT SHIFT LEFT */ \
+ V(veslv, VESLV, 0xE770) /* type = VRR_C VECTOR ELEMENT SHIFT LEFT */ \
V(vesrl, VESRL, \
0xE738) /* type = VRS_A VECTOR ELEMENT SHIFT RIGHT LOGICAL */ \
V(vesrlv, VESRLV, \
@@ -3702,6 +3703,34 @@ EVALUATE(VPERM) {
return length;
}
+EVALUATE(VBPERM) {
+ DCHECK_OPCODE(VBPERM);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m4);
+ USE(m5);
+ USE(m6);
+ uint16_t result_bits = 0;
+ for (int i = 0; i < kSimd128Size; i++) {
+ result_bits <<= 1;
+ uint8_t selected_bit_index = get_simd_register_by_lane<uint8_t>(r3, i);
+ unsigned __int128 src_bits =
+ *(reinterpret_cast<__int128*>(get_simd_register(r2).int8));
+ if (selected_bit_index < (kSimd128Size * kBitsPerByte)) {
+ unsigned __int128 bit_value =
+ (src_bits << selected_bit_index) >> (kSimd128Size * kBitsPerByte - 1);
+ result_bits |= bit_value;
+ }
+ }
+ set_simd_register_by_lane<uint64_t>(r1, 0, 0);
+ set_simd_register_by_lane<uint64_t>(r1, 1, 0);
+ // Write back in bytes to avoid endianness problems.
+ set_simd_register_by_lane<uint8_t>(r1, 6,
+ static_cast<uint8_t>(result_bits >> 8));
+ set_simd_register_by_lane<uint8_t>(
+ r1, 7, static_cast<uint8_t>((result_bits << 8) >> 8));
+ return length;
+}
+
EVALUATE(VSEL) {
DCHECK_OPCODE(VSEL);
DECODE_VRR_E_INSTRUCTION(r1, r2, r3, r4, m6, m5);
@@ -3977,33 +4006,35 @@ EVALUATE(VFNMS) {
#undef VECTOR_FP_MULTIPLY_QFMS_OPERATION
template <class T, class Operation>
-void VectorFPMaxMin(void* dst, void* src1, void* src2, Operation op) {
+void VectorFPMaxMin(void* dst, void* src1, void* src2, int mode, Operation op) {
T* dst_ptr = reinterpret_cast<T*>(dst);
T* src1_ptr = reinterpret_cast<T*>(src1);
T* src2_ptr = reinterpret_cast<T*>(src2);
for (size_t i = 0; i < kSimd128Size / sizeof(T); i++) {
T src1_val = *(src1_ptr + i);
T src2_val = *(src2_ptr + i);
- T value = op(src1_val, src2_val);
- // using Java's Max Min functions
- if (isnan(src1_val) || isnan(src2_val)) {
- value = NAN;
- }
+ T value = op(src1_val, src2_val, mode);
memcpy(dst_ptr + i, &value, sizeof(T));
}
}
-#define VECTOR_FP_MAX_MIN_FOR_TYPE(type, op) \
+#define VECTOR_FP_MAX_MIN_FOR_TYPE(type, op, std_op) \
VectorFPMaxMin<type>(&get_simd_register(r1), &get_simd_register(r2), \
- &get_simd_register(r3), [](type a, type b) { \
- if (signbit(b) op signbit(a)) \
+ &get_simd_register(r3), m6, \
+ [](type a, type b, int mode) { \
+ if (mode == 3) { \
+ return std::std_op(a, b); \
+ } \
+ if (isnan(a) || isnan(b)) \
+ return static_cast<type>(NAN); \
+ else if (signbit(b) op signbit(a)) \
return a; \
else if (signbit(b) != signbit(a)) \
return b; \
return (a op b) ? a : b; \
});
-#define VECTOR_FP_MAX_MIN(op) \
+#define VECTOR_FP_MAX_MIN(op, std_op) \
switch (m4) { \
case 2: \
if (m5 == 8) { \
@@ -4012,8 +4043,7 @@ void VectorFPMaxMin(void* dst, void* src1, void* src2, Operation op) {
set_simd_register_by_lane<float>(r1, 0, (src1 op src2) ? src1 : src2); \
} else { \
DCHECK_EQ(m5, 0); \
- DCHECK_EQ(m6, 1); \
- VECTOR_FP_MAX_MIN_FOR_TYPE(float, op) \
+ VECTOR_FP_MAX_MIN_FOR_TYPE(float, op, std_op) \
} \
break; \
case 3: \
@@ -4024,8 +4054,7 @@ void VectorFPMaxMin(void* dst, void* src1, void* src2, Operation op) {
(src1 op src2) ? src1 : src2); \
} else { \
DCHECK_EQ(m5, 0); \
- DCHECK_EQ(m6, 1); \
- VECTOR_FP_MAX_MIN_FOR_TYPE(double, op) \
+ VECTOR_FP_MAX_MIN_FOR_TYPE(double, op, std_op) \
} \
break; \
default: \
@@ -4037,8 +4066,7 @@ EVALUATE(VFMIN) {
DCHECK(CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1));
DCHECK_OPCODE(VFMIN);
DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
- USE(m6);
- VECTOR_FP_MAX_MIN(<) // NOLINT
+ VECTOR_FP_MAX_MIN(<, min) // NOLINT
return length;
}
@@ -4047,7 +4075,7 @@ EVALUATE(VFMAX) {
DCHECK_OPCODE(VFMAX);
DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
USE(m6);
- VECTOR_FP_MAX_MIN(>) // NOLINT
+ VECTOR_FP_MAX_MIN(>, max) // NOLINT
return length;
}
@@ -4224,24 +4252,39 @@ EVALUATE(VFSQ) {
return length;
}
+#define ROUNDING_SWITCH(type) \
+ switch (m5) { \
+ case 4: \
+ set_simd_register_by_lane<type>(r1, i, nearbyint(value)); \
+ break; \
+ case 5: \
+ set_simd_register_by_lane<type>(r1, i, trunc(value)); \
+ break; \
+ case 6: \
+ set_simd_register_by_lane<type>(r1, i, ceil(value)); \
+ break; \
+ case 7: \
+ set_simd_register_by_lane<type>(r1, i, floor(value)); \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ }
EVALUATE(VFI) {
DCHECK_OPCODE(VFI);
DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
USE(m4);
- USE(m5);
- DCHECK_EQ(m5, 5);
switch (m3) {
case 2:
DCHECK(CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1));
for (int i = 0; i < 4; i++) {
float value = get_simd_register_by_lane<float>(r2, i);
- set_simd_register_by_lane<float>(r1, i, trunc(value));
+ ROUNDING_SWITCH(float)
}
break;
case 3:
for (int i = 0; i < 2; i++) {
double value = get_simd_register_by_lane<double>(r2, i);
- set_simd_register_by_lane<double>(r1, i, trunc(value));
+ ROUNDING_SWITCH(double)
}
break;
default:
@@ -4249,6 +4292,7 @@ EVALUATE(VFI) {
}
return length;
}
+#undef ROUNDING_SWITCH
EVALUATE(DUMY) {
DCHECK_OPCODE(DUMY);
diff --git a/chromium/v8/src/execution/simulator.h b/chromium/v8/src/execution/simulator.h
index a4e07b235b4..74763474c61 100644
--- a/chromium/v8/src/execution/simulator.h
+++ b/chromium/v8/src/execution/simulator.h
@@ -128,7 +128,7 @@ class GeneratedCode {
#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
FATAL("Generated code execution not possible during cross-compilation.");
#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
-#if V8_OS_AIX
+#if ABI_USES_FUNCTION_DESCRIPTORS
// AIX ABI requires function descriptors (FD). Artificially create a pseudo
// FD to ensure correct dispatch to generated code. The 'volatile'
// declaration is required to avoid the compiler from not observing the
@@ -140,7 +140,7 @@ class GeneratedCode {
return fn(args...);
#else
return fn_ptr_(args...);
-#endif // V8_OS_AIX
+#endif // ABI_USES_FUNCTION_DESCRIPTORS
}
#endif // USE_SIMULATOR
diff --git a/chromium/v8/src/execution/stack-guard.cc b/chromium/v8/src/execution/stack-guard.cc
index d37327f1c3d..90689556673 100644
--- a/chromium/v8/src/execution/stack-guard.cc
+++ b/chromium/v8/src/execution/stack-guard.cc
@@ -272,8 +272,7 @@ Object StackGuard::HandleInterrupts() {
}
if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "V8.WasmGrowSharedMemory");
+ TRACE_EVENT0("v8.wasm", "V8.WasmGrowSharedMemory");
BackingStore::UpdateSharedWasmMemoryObjects(isolate_);
}
@@ -297,12 +296,12 @@ Object StackGuard::HandleInterrupts() {
}
if (TestAndClear(&interrupt_flags, LOG_WASM_CODE)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "LogCode");
+ TRACE_EVENT0("v8.wasm", "V8.LogCode");
isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_);
}
if (TestAndClear(&interrupt_flags, WASM_CODE_GC)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "WasmCodeGC");
+ TRACE_EVENT0("v8.wasm", "V8.WasmCodeGC");
isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_);
}
diff --git a/chromium/v8/src/extensions/cputracemark-extension.cc b/chromium/v8/src/extensions/cputracemark-extension.cc
index 9dfa9761fd1..5fde3608de8 100644
--- a/chromium/v8/src/extensions/cputracemark-extension.cc
+++ b/chromium/v8/src/extensions/cputracemark-extension.cc
@@ -34,12 +34,12 @@ void CpuTraceMarkExtension::Mark(
#if defined(__i386__) && defined(__pic__)
__asm__ __volatile__("push %%ebx; cpuid; pop %%ebx"
: "=a"(magic_dummy)
- : "a"(0x4711 | ((unsigned)(param) << 16))
+ : "a"(0x4711 | (param << 16))
: "ecx", "edx");
#else
__asm__ __volatile__("cpuid"
: "=a"(magic_dummy)
- : "a"(0x4711 | ((unsigned)(param) << 16))
+ : "a"(0x4711 | (param << 16))
: "ecx", "edx", "ebx");
#endif // defined(__i386__) && defined(__pic__)
diff --git a/chromium/v8/src/flags/flag-definitions.h b/chromium/v8/src/flags/flag-definitions.h
index 30d5f09180f..1ac03d14300 100644
--- a/chromium/v8/src/flags/flag-definitions.h
+++ b/chromium/v8/src/flags/flag-definitions.h
@@ -235,9 +235,6 @@ DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
// Features that are complete (but still behind --harmony/es-staging flag).
#define HARMONY_STAGED_BASE(V) \
- V(harmony_string_replaceall, "harmony String.prototype.replaceAll") \
- V(harmony_logical_assignment, "harmony logical assignment") \
- V(harmony_promise_any, "harmony Promise.any") \
V(harmony_top_level_await, "harmony top level await")
#ifdef V8_INTL_SUPPORT
@@ -258,8 +255,11 @@ DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
V(harmony_import_meta, "harmony import.meta property") \
V(harmony_dynamic_import, "harmony dynamic import") \
V(harmony_promise_all_settled, "harmony Promise.allSettled") \
+ V(harmony_promise_any, "harmony Promise.any") \
V(harmony_private_methods, "harmony private methods in class literals") \
- V(harmony_weak_refs, "harmony weak references")
+ V(harmony_weak_refs, "harmony weak references") \
+ V(harmony_string_replaceall, "harmony String.prototype.replaceAll") \
+ V(harmony_logical_assignment, "harmony logical assignment")
#ifdef V8_INTL_SUPPORT
#define HARMONY_SHIPPING(V) \
@@ -442,6 +442,11 @@ DEFINE_IMPLICATION(jitless, regexp_interpret_all)
DEFINE_NEG_IMPLICATION(jitless, validate_asm)
// --jitless also implies --no-expose-wasm, see InitializeOncePerProcessImpl.
+#ifndef V8_TARGET_ARCH_ARM
+// Unsupported on arm. See https://crbug.com/v8/8713.
+DEFINE_NEG_IMPLICATION(jitless, interpreted_frames_native_stack)
+#endif
+
// Flags for inline caching and feedback vectors.
DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_INT(budget_for_feedback_vector_allocation, 1 * KB,
@@ -622,7 +627,11 @@ DEFINE_BOOL(trace_environment_liveness, false,
DEFINE_BOOL(turbo_load_elimination, true, "enable load elimination in TurboFan")
DEFINE_BOOL(trace_turbo_load_elimination, false,
"trace TurboFan load elimination")
-DEFINE_BOOL(turbo_profiling, false, "enable profiling in TurboFan")
+DEFINE_BOOL(turbo_profiling, false, "enable basic block profiling in TurboFan")
+DEFINE_BOOL(turbo_profiling_verbose, false,
+ "enable basic block profiling in TurboFan, and include each "
+ "function's schedule and disassembly in the output")
+DEFINE_IMPLICATION(turbo_profiling_verbose, turbo_profiling)
DEFINE_BOOL(turbo_verify_allocation, DEBUG_BOOL,
"verify register allocation in TurboFan")
DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
@@ -654,11 +663,16 @@ DEFINE_INT(reuse_opt_code_count, 0,
// Native context independent (NCI) code.
DEFINE_BOOL(turbo_nci, false,
"enable experimental native context independent code.")
+// TODO(v8:8888): Temporary until NCI caching is implemented or
+// feedback collection is made unconditional.
+DEFINE_IMPLICATION(turbo_nci, turbo_collect_feedback_in_generic_lowering)
DEFINE_BOOL(turbo_nci_as_highest_tier, false,
"replace default TF with NCI code as the highest tier for testing "
"purposes.")
DEFINE_BOOL(print_nci_code, false, "print native context independent code.")
DEFINE_BOOL(trace_turbo_nci, false, "trace native context independent code.")
+DEFINE_BOOL(turbo_collect_feedback_in_generic_lowering, false,
+ "enable experimental feedback collection in generic lowering.")
// Favor memory over execution speed.
DEFINE_BOOL(optimize_for_size, false,
@@ -714,16 +728,13 @@ DEFINE_DEBUG_BOOL(trace_wasm_streaming, false,
DEFINE_INT(trace_wasm_ast_start, 0,
"start function for wasm AST trace (inclusive)")
DEFINE_INT(trace_wasm_ast_end, 0, "end function for wasm AST trace (exclusive)")
-// Enable Liftoff by default on ia32 and x64. More architectures will follow
-// once they are implemented and sufficiently tested.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
DEFINE_BOOL(liftoff, true,
"enable Liftoff, the baseline compiler for WebAssembly")
-#else
-DEFINE_BOOL(liftoff, false,
- "enable Liftoff, the baseline compiler for WebAssembly")
-DEFINE_IMPLICATION(future, liftoff)
-#endif
+// We can't tier up (from Liftoff to TurboFan) in single-threaded mode, hence
+// disable Liftoff in that configuration for now. The alternative is disabling
+// TurboFan, which would reduce peak performance considerably.
+// Note that for debugging, Liftoff will still be used.
+DEFINE_NEG_IMPLICATION(single_threaded, liftoff)
DEFINE_DEBUG_BOOL(trace_liftoff, false,
"trace Liftoff, the baseline compiler for WebAssembly")
DEFINE_BOOL(trace_wasm_memory, false,
@@ -783,7 +794,6 @@ DEFINE_BOOL(wasm_fuzzer_gen_test, false,
"generate a test case when running a wasm fuzzer")
DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded)
DEFINE_BOOL(print_wasm_code, false, "Print WebAssembly code")
-DEFINE_IMPLICATION(print_wasm_code, single_threaded)
DEFINE_BOOL(print_wasm_stub_code, false, "Print WebAssembly stub code")
DEFINE_BOOL(asm_wasm_lazy_compilation, false,
"enable lazy compilation for asm-wasm modules")
@@ -797,7 +807,7 @@ DEFINE_BOOL(wasm_lazy_validation, false,
// Flags for wasm prototyping that are not strictly features i.e., part of
// an existing proposal that may be conditionally enabled.
-DEFINE_BOOL(wasm_atomics_on_non_shared_memory, false,
+DEFINE_BOOL(wasm_atomics_on_non_shared_memory, true,
"allow atomic operations on non-shared WebAssembly memory")
DEFINE_BOOL(wasm_grow_shared_memory, true,
"allow growing shared WebAssembly memory objects")
@@ -820,6 +830,8 @@ DEFINE_INT(stress_sampling_allocation_profiler, 0,
"Enables sampling allocation profiler with X as a sample interval")
// Garbage collections flags.
+DEFINE_BOOL(lazy_new_space_shrinking, false,
+ "Enables the lazy new space shrinking strategy")
DEFINE_SIZE_T(min_semi_space_size, 0,
"min size of a semi-space (in MBytes), the new space consists of "
"two semi-spaces")
@@ -874,14 +886,6 @@ DEFINE_BOOL(
trace_allocations_origins, false,
"Show statistics about the origins of allocations. "
"Combine with --no-inline-new to track allocations from generated code")
-DEFINE_INT(gc_freelist_strategy, 5,
- "Freelist strategy to use: "
- "0:FreeListLegacy. "
- "1:FreeListFastAlloc. "
- "2:FreeListMany. "
- "3:FreeListManyCached. "
- "4:FreeListManyCachedFastPath. "
- "5:FreeListManyCachedOrigin. ")
DEFINE_INT(trace_allocation_stack_interval, -1,
"print stack trace after <n> free-list allocations")
@@ -891,6 +895,8 @@ DEFINE_INT(trace_duplicate_threshold_kb, 0,
DEFINE_BOOL(trace_fragmentation, false, "report fragmentation for old space")
DEFINE_BOOL(trace_fragmentation_verbose, false,
"report fragmentation for old space (detailed)")
+DEFINE_BOOL(minor_mc_trace_fragmentation, false,
+ "trace fragmentation after marking")
DEFINE_BOOL(trace_evacuation, false, "report evacuation statistics")
DEFINE_BOOL(trace_mutator_utilization, false,
"print mutator utilization, allocation speed, gc speed")
@@ -909,7 +915,7 @@ DEFINE_BOOL(parallel_scavenge, true, "parallel scavenge")
DEFINE_BOOL(scavenge_task, true, "schedule scavenge tasks")
DEFINE_INT(scavenge_task_trigger, 80,
"scavenge task trigger in percent of the current heap limit")
-DEFINE_BOOL(scavenge_separate_stack_scanning, true,
+DEFINE_BOOL(scavenge_separate_stack_scanning, false,
"use a separate phase for stack scanning in scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory")
@@ -933,7 +939,12 @@ DEFINE_BOOL(concurrent_array_buffer_sweeping, true,
DEFINE_BOOL(concurrent_allocation, false, "concurrently allocate in old space")
DEFINE_BOOL(local_heaps, false, "allow heap access from background tasks")
DEFINE_NEG_NEG_IMPLICATION(array_buffer_extension, local_heaps)
-DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
+DEFINE_BOOL(stress_concurrent_allocation, false,
+ "start background threads that allocate memory")
+DEFINE_IMPLICATION(stress_concurrent_allocation, concurrent_allocation)
+DEFINE_IMPLICATION(stress_concurrent_allocation, local_heaps)
+DEFINE_BOOL(parallel_marking, V8_CONCURRENT_MARKING_BOOL,
+ "use parallel marking in atomic pause")
DEFINE_INT(ephemeron_fixpoint_iterations, 10,
"number of fixpoint iterations it takes to switch to linear "
"ephemeron algorithm")
@@ -955,6 +966,13 @@ DEFINE_BOOL(track_gc_object_stats, false,
DEFINE_BOOL(trace_gc_object_stats, false,
"trace object counts and memory usage")
DEFINE_BOOL(trace_zone_stats, false, "trace zone memory usage")
+DEFINE_GENERIC_IMPLICATION(
+ trace_zone_stats,
+ TracingFlags::zone_stats.store(
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE))
+DEFINE_SIZE_T(
+ zone_stats_tolerance, 1 * MB,
+ "report a tick only when allocated zone memory changes by this amount")
DEFINE_BOOL(track_retaining_path, false,
"enable support for tracking retaining path")
DEFINE_DEBUG_BOOL(trace_backing_store, false, "trace backing store events")
@@ -1052,7 +1070,7 @@ DEFINE_BOOL(young_generation_large_objects, true,
"allocates large objects by default in the young generation large "
"object space")
-// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
+// assembler-ia32.cc / assembler-arm.cc / assembler-arm64.cc / assembler-x64.cc
DEFINE_BOOL(debug_code, DEBUG_BOOL,
"generate extra code (assertions) for debugging")
DEFINE_BOOL(code_comments, false,
@@ -1081,6 +1099,9 @@ DEFINE_BOOL(force_long_branches, false,
DEFINE_STRING(mcpu, "auto", "enable optimization for specific cpu")
DEFINE_BOOL(partial_constant_pool, true,
"enable use of partial constant pools (X64 only)")
+DEFINE_STRING(sim_arm64_optional_features, "none",
+ "enable optional features on the simulator for testing: none or "
+ "all")
// Controlling source positions for Torque/CSA code.
DEFINE_BOOL(enable_source_at_csa_bind, false,
@@ -1143,7 +1164,8 @@ DEFINE_BOOL(inline_new, true, "use fast inline allocation")
DEFINE_NEG_NEG_IMPLICATION(inline_new, turbo_allocation_folding)
// codegen-ia32.cc / codegen-arm.cc
-DEFINE_BOOL(trace, false, "trace function calls")
+DEFINE_BOOL(trace, false, "trace javascript function calls")
+DEFINE_BOOL(trace_wasm, false, "trace wasm function calls")
// codegen.cc
DEFINE_BOOL(lazy, true, "use lazy compilation")
@@ -1244,7 +1266,6 @@ DEFINE_BOOL(native_code_counters, DEBUG_BOOL,
DEFINE_BOOL(thin_strings, true, "Enable ThinString support")
DEFINE_BOOL(trace_prototype_users, false,
"Trace updates to prototype user tracking")
-DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
DEFINE_BOOL(trace_for_in_enumerate, false, "Trace for-in enumerate slow-paths")
DEFINE_BOOL(trace_maps, false, "trace map creation")
DEFINE_BOOL(trace_maps_details, true, "also log map details")
@@ -1252,15 +1273,11 @@ DEFINE_IMPLICATION(trace_maps, log_code)
// parser.cc
DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
-DEFINE_BOOL(allow_natives_for_fuzzing, false,
- "allow only natives explicitly whitelisted for fuzzers")
DEFINE_BOOL(allow_natives_for_differential_fuzzing, false,
- "allow only natives explicitly whitelisted for differential "
+ "allow only natives explicitly allowlisted for differential "
"fuzzers")
DEFINE_IMPLICATION(allow_natives_for_differential_fuzzing, allow_natives_syntax)
-DEFINE_IMPLICATION(allow_natives_for_fuzzing, allow_natives_syntax)
-DEFINE_IMPLICATION(allow_natives_for_differential_fuzzing,
- allow_natives_for_fuzzing)
+DEFINE_IMPLICATION(allow_natives_for_differential_fuzzing, fuzzing)
DEFINE_BOOL(parse_only, false, "only parse the sources")
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
@@ -1431,7 +1448,6 @@ DEFINE_BOOL(dump_counters, false, "Dump counters on exit")
DEFINE_BOOL(dump_counters_nvp, false,
"Dump counters as name-value pairs on exit")
DEFINE_BOOL(use_external_strings, false, "Use external strings for source code")
-
DEFINE_STRING(map_counters, "", "Map counters to a file")
DEFINE_BOOL(mock_arraybuffer_allocator, false,
"Use a mock ArrayBuffer allocator for testing.")
diff --git a/chromium/v8/src/handles/global-handles.cc b/chromium/v8/src/handles/global-handles.cc
index e6dbd6ad450..4404b0b6379 100644
--- a/chromium/v8/src/handles/global-handles.cc
+++ b/chromium/v8/src/handles/global-handles.cc
@@ -575,7 +575,7 @@ class GlobalHandles::Node final : public NodeBase<GlobalHandles::Node> {
void PostGarbageCollectionProcessing(Isolate* isolate) {
// This method invokes a finalizer. Updating the method name would require
- // adjusting CFI blacklist as weak_callback_ is invoked on the wrong type.
+ // adjusting CFI blocklist as weak_callback_ is invoked on the wrong type.
CHECK(IsPendingFinalizer());
set_state(NEAR_DEATH);
// Check that we are not passing a finalized external string to
diff --git a/chromium/v8/src/handles/handles-inl.h b/chromium/v8/src/handles/handles-inl.h
index c8b4b4556bc..b16044871c4 100644
--- a/chromium/v8/src/handles/handles-inl.h
+++ b/chromium/v8/src/handles/handles-inl.h
@@ -6,6 +6,7 @@
#define V8_HANDLES_HANDLES_INL_H_
#include "src/execution/isolate.h"
+#include "src/execution/local-isolate-wrapper.h"
#include "src/execution/off-thread-isolate.h"
#include "src/handles/handles.h"
#include "src/handles/local-handles-inl.h"
@@ -66,19 +67,11 @@ V8_INLINE Handle<T> handle(T object, LocalHeap* local_heap) {
return Handle<T>(object, local_heap);
}
-// Convenience overloads for when we already have a Handle, but want
-// either a Handle or an Handle.
template <typename T>
-V8_INLINE Handle<T> handle(Handle<T> handle, Isolate* isolate) {
- return handle;
-}
-template <typename T>
-V8_INLINE Handle<T> handle(Handle<T> handle, OffThreadIsolate* isolate) {
- return Handle<T>(*handle);
-}
-template <typename T>
-V8_INLINE Handle<T> handle(Handle<T> handle, LocalHeap* local_heap) {
- return Handle<T>(*handle, local_heap);
+V8_INLINE Handle<T> handle(T object, LocalIsolateWrapper local_isolate) {
+ return local_isolate.is_off_thread()
+ ? handle(object, local_isolate.off_thread())
+ : handle(object, local_isolate.main_thread());
}
template <typename T>
diff --git a/chromium/v8/src/handles/handles.h b/chromium/v8/src/handles/handles.h
index aa9e522c0e6..a6a83dcca2d 100644
--- a/chromium/v8/src/handles/handles.h
+++ b/chromium/v8/src/handles/handles.h
@@ -199,6 +199,15 @@ class HandleScope {
explicit inline HandleScope(Isolate* isolate);
inline HandleScope(HandleScope&& other) V8_NOEXCEPT;
+ // Allow placement new.
+ void* operator new(size_t size, void* storage) {
+ return ::operator new(size, storage);
+ }
+
+ // Prevent heap allocation or illegal handle scopes.
+ void* operator new(size_t size) = delete;
+ void operator delete(void* size_t) = delete;
+
inline ~HandleScope();
inline HandleScope& operator=(HandleScope&& other) V8_NOEXCEPT;
@@ -234,10 +243,6 @@ class HandleScope {
static const int kCheckHandleThreshold = 30 * 1024;
private:
- // Prevent heap allocation or illegal handle scopes.
- void* operator new(size_t size);
- void operator delete(void* size_t);
-
Isolate* isolate_;
Address* prev_next_;
Address* prev_limit_;
diff --git a/chromium/v8/src/handles/off-thread-transfer-handle-storage-inl.h b/chromium/v8/src/handles/off-thread-transfer-handle-storage-inl.h
new file mode 100644
index 00000000000..8a0682d9c76
--- /dev/null
+++ b/chromium/v8/src/handles/off-thread-transfer-handle-storage-inl.h
@@ -0,0 +1,77 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HANDLES_OFF_THREAD_TRANSFER_HANDLE_STORAGE_INL_H_
+#define V8_HANDLES_OFF_THREAD_TRANSFER_HANDLE_STORAGE_INL_H_
+
+#include "src/handles/handles-inl.h"
+#include "src/handles/off-thread-transfer-handle-storage.h"
+
+namespace v8 {
+namespace internal {
+
+OffThreadTransferHandleStorage::OffThreadTransferHandleStorage(
+ Address* off_thread_handle_location,
+ std::unique_ptr<OffThreadTransferHandleStorage> next)
+ : handle_location_(off_thread_handle_location),
+ next_(std::move(next)),
+ state_(kOffThreadHandle) {
+ CheckValid();
+}
+
+void OffThreadTransferHandleStorage::ConvertFromOffThreadHandleOnFinish() {
+ CheckValid();
+ DCHECK_EQ(state_, kOffThreadHandle);
+ raw_obj_ptr_ = *handle_location_;
+ state_ = kRawObject;
+ CheckValid();
+}
+
+void OffThreadTransferHandleStorage::ConvertToHandleOnPublish(
+ Isolate* isolate, DisallowHeapAllocation* no_gc) {
+ CheckValid();
+ DCHECK_EQ(state_, kRawObject);
+ handle_location_ = handle(Object(raw_obj_ptr_), isolate).location();
+ state_ = kHandle;
+ CheckValid();
+}
+
+Address* OffThreadTransferHandleStorage::handle_location() const {
+ CheckValid();
+ DCHECK_EQ(state_, kHandle);
+ return handle_location_;
+}
+
+void OffThreadTransferHandleStorage::CheckValid() const {
+#ifdef DEBUG
+ Object obj;
+
+ switch (state_) {
+ case kHandle:
+ case kOffThreadHandle:
+ DCHECK_NOT_NULL(handle_location_);
+ obj = Object(*handle_location_);
+ break;
+ case kRawObject:
+ obj = Object(raw_obj_ptr_);
+ break;
+ }
+
+ // Smis are always fine.
+ if (obj.IsSmi()) return;
+
+ // The main-thread handle should not be in off-thread space, and vice verse.
+ // Raw object pointers can point to the main-thread heap during Publish, so
+ // we don't check that.
+ DCHECK_IMPLIES(state_ == kOffThreadHandle,
+ Heap::InOffThreadSpace(HeapObject::cast(obj)));
+ DCHECK_IMPLIES(state_ == kHandle,
+ !Heap::InOffThreadSpace(HeapObject::cast(obj)));
+#endif
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HANDLES_OFF_THREAD_TRANSFER_HANDLE_STORAGE_INL_H_
diff --git a/chromium/v8/src/handles/off-thread-transfer-handle-storage.h b/chromium/v8/src/handles/off-thread-transfer-handle-storage.h
new file mode 100644
index 00000000000..608ad69693d
--- /dev/null
+++ b/chromium/v8/src/handles/off-thread-transfer-handle-storage.h
@@ -0,0 +1,47 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HANDLES_OFF_THREAD_TRANSFER_HANDLE_STORAGE_H_
+#define V8_HANDLES_OFF_THREAD_TRANSFER_HANDLE_STORAGE_H_
+
+#include "src/common/assert-scope.h"
+#include "src/handles/handles.h"
+
+namespace v8 {
+namespace internal {
+
+class OffThreadTransferHandleStorage {
+ public:
+ enum State { kOffThreadHandle, kRawObject, kHandle };
+
+ inline explicit OffThreadTransferHandleStorage(
+ Address* off_thread_handle_location,
+ std::unique_ptr<OffThreadTransferHandleStorage> next);
+
+ inline void ConvertFromOffThreadHandleOnFinish();
+
+ inline void ConvertToHandleOnPublish(Isolate* isolate,
+ DisallowHeapAllocation* no_gc);
+
+ inline Address* handle_location() const;
+
+ OffThreadTransferHandleStorage* next() { return next_.get(); }
+
+ State state() const { return state_; }
+
+ private:
+ inline void CheckValid() const;
+
+ union {
+ Address* handle_location_;
+ Address raw_obj_ptr_;
+ };
+ std::unique_ptr<OffThreadTransferHandleStorage> next_;
+ State state_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HANDLES_OFF_THREAD_TRANSFER_HANDLE_STORAGE_H_
diff --git a/chromium/v8/src/heap/allocation-stats.h b/chromium/v8/src/heap/allocation-stats.h
new file mode 100644
index 00000000000..b05158f91b4
--- /dev/null
+++ b/chromium/v8/src/heap/allocation-stats.h
@@ -0,0 +1,117 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_ALLOCATION_STATS_H_
+#define V8_HEAP_ALLOCATION_STATS_H_
+
+#include <atomic>
+#include <unordered_map>
+
+#include "src/base/macros.h"
+#include "src/heap/basic-memory-chunk.h"
+
+namespace v8 {
+namespace internal {
+
+// An abstraction of the accounting statistics of a page-structured space.
+//
+// The stats are only set by functions that ensure they stay balanced. These
+// functions increase or decrease one of the non-capacity stats in conjunction
+// with capacity, or else they always balance increases and decreases to the
+// non-capacity stats.
+class AllocationStats {
+ public:
+ AllocationStats() { Clear(); }
+
+ AllocationStats& operator=(const AllocationStats& stats) V8_NOEXCEPT {
+ capacity_ = stats.capacity_.load();
+ max_capacity_ = stats.max_capacity_;
+ size_.store(stats.size_);
+#ifdef DEBUG
+ allocated_on_page_ = stats.allocated_on_page_;
+#endif
+ return *this;
+ }
+
+ // Zero out all the allocation statistics (i.e., no capacity).
+ void Clear() {
+ capacity_ = 0;
+ max_capacity_ = 0;
+ ClearSize();
+ }
+
+ void ClearSize() {
+ size_ = 0;
+#ifdef DEBUG
+ allocated_on_page_.clear();
+#endif
+ }
+
+ // Accessors for the allocation statistics.
+ size_t Capacity() { return capacity_; }
+ size_t MaxCapacity() { return max_capacity_; }
+ size_t Size() { return size_; }
+#ifdef DEBUG
+ size_t AllocatedOnPage(BasicMemoryChunk* page) {
+ return allocated_on_page_[page];
+ }
+#endif
+
+ void IncreaseAllocatedBytes(size_t bytes, BasicMemoryChunk* page) {
+#ifdef DEBUG
+ size_t size = size_;
+ DCHECK_GE(size + bytes, size);
+#endif
+ size_.fetch_add(bytes);
+#ifdef DEBUG
+ allocated_on_page_[page] += bytes;
+#endif
+ }
+
+ void DecreaseAllocatedBytes(size_t bytes, BasicMemoryChunk* page) {
+ DCHECK_GE(size_, bytes);
+ size_.fetch_sub(bytes);
+#ifdef DEBUG
+ DCHECK_GE(allocated_on_page_[page], bytes);
+ allocated_on_page_[page] -= bytes;
+#endif
+ }
+
+ void DecreaseCapacity(size_t bytes) {
+ DCHECK_GE(capacity_, bytes);
+ DCHECK_GE(capacity_ - bytes, size_);
+ capacity_ -= bytes;
+ }
+
+ void IncreaseCapacity(size_t bytes) {
+ DCHECK_GE(capacity_ + bytes, capacity_);
+ capacity_ += bytes;
+ if (capacity_ > max_capacity_) {
+ max_capacity_ = capacity_;
+ }
+ }
+
+ private:
+ // |capacity_|: The number of object-area bytes (i.e., not including page
+ // bookkeeping structures) currently in the space.
+ // During evacuation capacity of the main spaces is accessed from multiple
+ // threads to check the old generation hard limit.
+ std::atomic<size_t> capacity_;
+
+ // |max_capacity_|: The maximum capacity ever observed.
+ size_t max_capacity_;
+
+ // |size_|: The number of allocated bytes.
+ std::atomic<size_t> size_;
+
+#ifdef DEBUG
+ std::unordered_map<BasicMemoryChunk*, size_t, BasicMemoryChunk::Hasher>
+ allocated_on_page_;
+#endif
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_ALLOCATION_STATS_H_
diff --git a/chromium/v8/src/heap/base-space.cc b/chromium/v8/src/heap/base-space.cc
new file mode 100644
index 00000000000..aabbeaebf54
--- /dev/null
+++ b/chromium/v8/src/heap/base-space.cc
@@ -0,0 +1,33 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/base-space.h"
+
+namespace v8 {
+namespace internal {
+
+const char* BaseSpace::GetSpaceName(AllocationSpace space) {
+ switch (space) {
+ case NEW_SPACE:
+ return "new_space";
+ case OLD_SPACE:
+ return "old_space";
+ case MAP_SPACE:
+ return "map_space";
+ case CODE_SPACE:
+ return "code_space";
+ case LO_SPACE:
+ return "large_object_space";
+ case NEW_LO_SPACE:
+ return "new_large_object_space";
+ case CODE_LO_SPACE:
+ return "code_large_object_space";
+ case RO_SPACE:
+ return "read_only_space";
+ }
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/base-space.h b/chromium/v8/src/heap/base-space.h
new file mode 100644
index 00000000000..4b121e470cd
--- /dev/null
+++ b/chromium/v8/src/heap/base-space.h
@@ -0,0 +1,81 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_BASE_SPACE_H_
+#define V8_HEAP_BASE_SPACE_H_
+
+#include <atomic>
+
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+#include "src/logging/log.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+// ----------------------------------------------------------------------------
+// BaseSpace is the abstract superclass for all allocation spaces.
+class V8_EXPORT_PRIVATE BaseSpace : public Malloced {
+ public:
+ Heap* heap() const {
+ DCHECK_NOT_NULL(heap_);
+ return heap_;
+ }
+
+ AllocationSpace identity() { return id_; }
+
+ // Returns name of the space.
+ static const char* GetSpaceName(AllocationSpace space);
+
+ const char* name() { return GetSpaceName(id_); }
+
+ void AccountCommitted(size_t bytes) {
+ DCHECK_GE(committed_ + bytes, committed_);
+ committed_ += bytes;
+ if (committed_ > max_committed_) {
+ max_committed_ = committed_;
+ }
+ }
+
+ void AccountUncommitted(size_t bytes) {
+ DCHECK_GE(committed_, committed_ - bytes);
+ committed_ -= bytes;
+ }
+
+ // Return the total amount committed memory for this space, i.e., allocatable
+ // memory and page headers.
+ virtual size_t CommittedMemory() { return committed_; }
+
+ virtual size_t MaximumCommittedMemory() { return max_committed_; }
+
+ // Approximate amount of physical memory committed for this space.
+ virtual size_t CommittedPhysicalMemory() = 0;
+
+ // Returns allocated size.
+ virtual size_t Size() = 0;
+
+ protected:
+ BaseSpace(Heap* heap, AllocationSpace id)
+ : heap_(heap), id_(id), committed_(0), max_committed_(0) {}
+
+ virtual ~BaseSpace() = default;
+
+ protected:
+ Heap* heap_;
+ AllocationSpace id_;
+
+ // Keeps track of committed memory in a space.
+ std::atomic<size_t> committed_;
+ size_t max_committed_;
+
+ DISALLOW_COPY_AND_ASSIGN(BaseSpace);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_BASE_SPACE_H_
diff --git a/chromium/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/arm/push_registers_asm.cc
index 5246c3f6c3e..5246c3f6c3e 100644
--- a/chromium/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/arm/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/arm64/push_registers_asm.cc
index 30d4de1f308..30d4de1f308 100644
--- a/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/arm64/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S b/chromium/v8/src/heap/base/asm/arm64/push_registers_masm.S
index 9773654ffcf..9773654ffcf 100644
--- a/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S
+++ b/chromium/v8/src/heap/base/asm/arm64/push_registers_masm.S
diff --git a/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/ia32/push_registers_asm.cc
index ed9c14a50e9..ed9c14a50e9 100644
--- a/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/ia32/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S b/chromium/v8/src/heap/base/asm/ia32/push_registers_masm.S
index a35fd6e527d..a35fd6e527d 100644
--- a/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S
+++ b/chromium/v8/src/heap/base/asm/ia32/push_registers_masm.S
diff --git a/chromium/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/mips/push_registers_asm.cc
index 4a46caa6c52..4a46caa6c52 100644
--- a/chromium/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/mips/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/mips64/push_registers_asm.cc
index 6befa3bcc0c..6befa3bcc0c 100644
--- a/chromium/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/mips64/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/ppc/push_registers_asm.cc
index 6936819ba2b..6936819ba2b 100644
--- a/chromium/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/ppc/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/s390/push_registers_asm.cc
index 6b9b2c08536..6b9b2c08536 100644
--- a/chromium/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/s390/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc
index 68f7918c93c..68f7918c93c 100644
--- a/chromium/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/x64/push_registers_masm.S b/chromium/v8/src/heap/base/asm/x64/push_registers_masm.S
index 627843830fa..627843830fa 100644
--- a/chromium/v8/src/heap/cppgc/asm/x64/push_registers_masm.S
+++ b/chromium/v8/src/heap/base/asm/x64/push_registers_masm.S
diff --git a/chromium/v8/src/heap/cppgc/stack.cc b/chromium/v8/src/heap/base/stack.cc
index b99693708c6..cd284444747 100644
--- a/chromium/v8/src/heap/cppgc/stack.cc
+++ b/chromium/v8/src/heap/base/stack.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/heap/cppgc/stack.h"
+#include "src/heap/base/stack.h"
#include <limits>
@@ -10,8 +10,8 @@
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/sanitizers.h"
-namespace cppgc {
-namespace internal {
+namespace heap {
+namespace base {
using IterateStackCallback = void (*)(const Stack*, StackVisitor*, intptr_t*);
extern "C" void PushAllRegistersAndIterateStack(const Stack*, StackVisitor*,
@@ -125,5 +125,5 @@ void Stack::IteratePointers(StackVisitor* visitor) const {
IterateSafeStackIfNecessary(visitor);
}
-} // namespace internal
-} // namespace cppgc
+} // namespace base
+} // namespace heap
diff --git a/chromium/v8/src/heap/cppgc/stack.h b/chromium/v8/src/heap/base/stack.h
index 3f561aed08e..a46e6e660ed 100644
--- a/chromium/v8/src/heap/cppgc/stack.h
+++ b/chromium/v8/src/heap/base/stack.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_CPPGC_STACK_H_
-#define V8_HEAP_CPPGC_STACK_H_
+#ifndef V8_HEAP_BASE_STACK_H_
+#define V8_HEAP_BASE_STACK_H_
#include "src/base/macros.h"
-namespace cppgc {
-namespace internal {
+namespace heap {
+namespace base {
class StackVisitor {
public:
@@ -37,7 +37,7 @@ class V8_EXPORT_PRIVATE Stack final {
const void* stack_start_;
};
-} // namespace internal
-} // namespace cppgc
+} // namespace base
+} // namespace heap
-#endif // V8_HEAP_CPPGC_STACK_H_
+#endif // V8_HEAP_BASE_STACK_H_
diff --git a/chromium/v8/src/heap/basic-memory-chunk.cc b/chromium/v8/src/heap/basic-memory-chunk.cc
index fa94f60f4ec..50eb8392915 100644
--- a/chromium/v8/src/heap/basic-memory-chunk.cc
+++ b/chromium/v8/src/heap/basic-memory-chunk.cc
@@ -7,8 +7,8 @@
#include <cstdlib>
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/heap/incremental-marking.h"
#include "src/objects/heap-object.h"
-#include "src/objects/slots-inl.h"
namespace v8 {
namespace internal {
@@ -39,5 +39,41 @@ void BasicMemoryChunk::ReleaseMarkingBitmap() {
marking_bitmap_ = nullptr;
}
+// static
+BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base,
+ size_t size, Address area_start,
+ Address area_end,
+ BaseSpace* owner,
+ VirtualMemory reservation) {
+ BasicMemoryChunk* chunk = FromAddress(base);
+ DCHECK_EQ(base, chunk->address());
+ new (chunk) BasicMemoryChunk(size, area_start, area_end);
+
+ chunk->heap_ = heap;
+ chunk->set_owner(owner);
+ chunk->reservation_ = std::move(reservation);
+ chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
+ chunk->allocated_bytes_ = chunk->area_size();
+ chunk->wasted_memory_ = 0;
+
+ return chunk;
+}
+
+bool BasicMemoryChunk::InOldSpace() const {
+ return owner()->identity() == OLD_SPACE;
+}
+
+bool BasicMemoryChunk::InLargeObjectSpace() const {
+ return owner()->identity() == LO_SPACE;
+}
+
+#ifdef THREAD_SANITIZER
+void BasicMemoryChunk::SynchronizedHeapLoad() {
+ CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
+ reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
+ InReadOnlySpace());
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/basic-memory-chunk.h b/chromium/v8/src/heap/basic-memory-chunk.h
index 205d02ce247..8d8fff39fbe 100644
--- a/chromium/v8/src/heap/basic-memory-chunk.h
+++ b/chromium/v8/src/heap/basic-memory-chunk.h
@@ -6,25 +6,29 @@
#define V8_HEAP_BASIC_MEMORY_CHUNK_H_
#include <type_traits>
+#include <unordered_map>
#include "src/base/atomic-utils.h"
#include "src/common/globals.h"
+#include "src/flags/flags.h"
#include "src/heap/marking.h"
-#include "src/heap/slot-set.h"
+#include "src/objects/heap-object.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
-class MemoryChunk;
-
-enum RememberedSetType {
- OLD_TO_NEW,
- OLD_TO_OLD,
- NUMBER_OF_REMEMBERED_SET_TYPES
-};
+class BaseSpace;
class BasicMemoryChunk {
public:
+ // Use with std data structures.
+ struct Hasher {
+ size_t operator()(BasicMemoryChunk* const chunk) const {
+ return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
+ }
+ };
+
enum Flag {
NO_FLAGS = 0u,
IS_EXECUTABLE = 1u << 0,
@@ -109,11 +113,30 @@ class BasicMemoryChunk {
Address address() const { return reinterpret_cast<Address>(this); }
+ // Returns the offset of a given address to this page.
+ inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
+
+ // Returns the address for a given offset to the this page.
+ Address OffsetToAddress(size_t offset) {
+ Address address_in_page = address() + offset;
+ DCHECK_GE(address_in_page, area_start());
+ DCHECK_LT(address_in_page, area_end());
+ return address_in_page;
+ }
+
+ // Some callers rely on the fact that this can operate on both
+ // tagged and aligned object addresses.
+ inline uint32_t AddressToMarkbitIndex(Address addr) const {
+ return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
+ }
+
+ inline Address MarkbitIndexToAddress(uint32_t index) const {
+ return this->address() + (index << kTaggedSizeLog2);
+ }
+
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
- size_t buckets() const { return SlotSet::BucketsForSize(size()); }
-
Address area_start() const { return area_start_; }
Address area_end() const { return area_end_; }
@@ -123,6 +146,16 @@ class BasicMemoryChunk {
return static_cast<size_t>(area_end() - area_start());
}
+ Heap* heap() const {
+ DCHECK_NOT_NULL(heap_);
+ return heap_;
+ }
+
+ // Gets the chunk's owner or null if the space has been detached.
+ BaseSpace* owner() const { return owner_; }
+
+ void set_owner(BaseSpace* space) { owner_ = space; }
+
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
void SetFlag(Flag flag) {
if (access_mode == AccessMode::NON_ATOMIC) {
@@ -155,9 +188,69 @@ class BasicMemoryChunk {
}
}
+ using Flags = uintptr_t;
+
+ static const Flags kPointersToHereAreInterestingMask =
+ POINTERS_TO_HERE_ARE_INTERESTING;
+
+ static const Flags kPointersFromHereAreInterestingMask =
+ POINTERS_FROM_HERE_ARE_INTERESTING;
+
+ static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
+
+ static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
+
+ static const Flags kIsLargePageMask = LARGE_PAGE;
+
+ static const Flags kSkipEvacuationSlotsRecordingMask =
+ kEvacuationCandidateMask | kIsInYoungGenerationMask;
+
bool InReadOnlySpace() const { return IsFlagSet(READ_ONLY_HEAP); }
- // TODO(v8:7464): Add methods for down casting to MemoryChunk.
+ bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
+
+ void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
+
+ bool CanAllocate() {
+ return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ bool IsEvacuationCandidate() {
+ DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
+ IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
+ return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ bool ShouldSkipEvacuationSlotRecording() {
+ uintptr_t flags = GetFlags<access_mode>();
+ return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
+ ((flags & COMPACTION_WAS_ABORTED) == 0);
+ }
+
+ Executability executable() {
+ return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+ }
+
+ bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
+ bool IsToPage() const { return IsFlagSet(TO_PAGE); }
+ bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
+ bool InYoungGeneration() const {
+ return (GetFlags() & kIsInYoungGenerationMask) != 0;
+ }
+ bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
+ bool InNewLargeObjectSpace() const {
+ return InYoungGeneration() && IsLargePage();
+ }
+ bool InOldSpace() const;
+ V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
+
+ bool IsWritable() const {
+ // If this is a read-only space chunk but heap_ is non-null, it has not yet
+ // been sealed and can be written to.
+ return !InReadOnlySpace() || heap_ != nullptr;
+ }
bool Contains(Address addr) const {
return addr >= area_start() && addr < area_end();
@@ -171,23 +264,92 @@ class BasicMemoryChunk {
void ReleaseMarkingBitmap();
+ static BasicMemoryChunk* Initialize(Heap* heap, Address base, size_t size,
+ Address area_start, Address area_end,
+ BaseSpace* owner,
+ VirtualMemory reservation);
+
+ size_t wasted_memory() { return wasted_memory_; }
+ void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
+ size_t allocated_bytes() { return allocated_bytes_; }
+
static const intptr_t kSizeOffset = 0;
static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
static const intptr_t kAreaStartOffset = kHeapOffset + kSystemPointerSize;
static const intptr_t kAreaEndOffset = kAreaStartOffset + kSystemPointerSize;
- static const intptr_t kOldToNewSlotSetOffset =
- kAreaEndOffset + kSystemPointerSize;
static const size_t kHeaderSize =
- kSizeOffset + kSizetSize // size_t size
- + kUIntptrSize // uintptr_t flags_
- + kSystemPointerSize // Bitmap* marking_bitmap_
- + kSystemPointerSize // Heap* heap_
- + kSystemPointerSize // Address area_start_
- + kSystemPointerSize // Address area_end_
- + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES; // SlotSet* array
+ kSizeOffset + kSizetSize // size_t size
+ + kUIntptrSize // uintptr_t flags_
+ + kSystemPointerSize // Bitmap* marking_bitmap_
+ + kSystemPointerSize // Heap* heap_
+ + kSystemPointerSize // Address area_start_
+ + kSystemPointerSize // Address area_end_
+ + kSizetSize // size_t allocated_bytes_
+ + kSizetSize // size_t wasted_memory_
+ + kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ + kSystemPointerSize // Address owner_
+ + 3 * kSystemPointerSize; // VirtualMemory reservation_
+
+ // Only works if the pointer is in the first kPageSize of the MemoryChunk.
+ static BasicMemoryChunk* FromAddress(Address a) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
+ return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(a));
+ }
+
+ // Only works if the object is in the first kPageSize of the MemoryChunk.
+ static BasicMemoryChunk* FromHeapObject(HeapObject o) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
+ return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(o.ptr()));
+ }
+
+ template <AccessMode mode>
+ ConcurrentBitmap<mode>* marking_bitmap() const {
+ return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
+ }
+
+ Address HighWaterMark() { return address() + high_water_mark_; }
+
+ static inline void UpdateHighWaterMark(Address mark) {
+ if (mark == kNullAddress) return;
+ // Need to subtract one from the mark because when a chunk is full the
+ // top points to the next address after the chunk, which effectively belongs
+ // to another chunk. See the comment to Page::FromAllocationAreaAddress.
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(mark - 1);
+ intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
+ intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
+ while ((new_mark > old_mark) &&
+ !chunk->high_water_mark_.compare_exchange_weak(
+ old_mark, new_mark, std::memory_order_acq_rel)) {
+ }
+ }
+
+ VirtualMemory* reserved_memory() { return &reservation_; }
+
+ void ResetAllocationStatistics() {
+ allocated_bytes_ = area_size();
+ wasted_memory_ = 0;
+ }
+
+ void IncreaseAllocatedBytes(size_t bytes) {
+ DCHECK_LE(bytes, area_size());
+ allocated_bytes_ += bytes;
+ }
+
+ void DecreaseAllocatedBytes(size_t bytes) {
+ DCHECK_LE(bytes, area_size());
+ DCHECK_GE(allocated_bytes(), bytes);
+ allocated_bytes_ -= bytes;
+ }
+
+#ifdef THREAD_SANITIZER
+ // Perform a dummy acquire load to tell TSAN that there is no data race in
+ // mark-bit initialization. See MemoryChunk::Initialize for the corresponding
+ // release store.
+ void SynchronizedHeapLoad();
+#endif
protected:
// Overall size of the chunk, including the header and guards.
@@ -207,12 +369,31 @@ class BasicMemoryChunk {
Address area_start_;
Address area_end_;
- // A single slot set for small pages (of size kPageSize) or an array of slot
- // set for large pages. In the latter case the number of entries in the array
- // is ceil(size() / kPageSize).
- SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
+ // Byte allocated on the page, which includes all objects on the page and the
+ // linear allocation area.
+ size_t allocated_bytes_;
+ // Freed memory that was not added to the free list.
+ size_t wasted_memory_;
+
+ // Assuming the initial allocation on a page is sequential, count highest
+ // number of bytes ever allocated on the page.
+ std::atomic<intptr_t> high_water_mark_;
+
+ // The space owning this memory chunk.
+ std::atomic<BaseSpace*> owner_;
+
+ // If the chunk needs to remember its memory reservation, it is stored here.
+ VirtualMemory reservation_;
friend class BasicMemoryChunkValidator;
+ friend class ConcurrentMarkingState;
+ friend class MajorMarkingState;
+ friend class MajorAtomicMarkingState;
+ friend class MajorNonAtomicMarkingState;
+ friend class MemoryAllocator;
+ friend class MinorMarkingState;
+ friend class MinorNonAtomicMarkingState;
+ friend class PagedSpace;
};
STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
@@ -227,8 +408,6 @@ class BasicMemoryChunkValidator {
offsetof(BasicMemoryChunk, marking_bitmap_));
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
offsetof(BasicMemoryChunk, heap_));
- STATIC_ASSERT(BasicMemoryChunk::kOldToNewSlotSetOffset ==
- offsetof(BasicMemoryChunk, slot_set_));
};
} // namespace internal
diff --git a/chromium/v8/src/heap/code-object-registry.cc b/chromium/v8/src/heap/code-object-registry.cc
new file mode 100644
index 00000000000..ebaa29fbaeb
--- /dev/null
+++ b/chromium/v8/src/heap/code-object-registry.cc
@@ -0,0 +1,75 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/code-object-registry.h"
+
+#include <algorithm>
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
+ auto result = code_object_registry_newly_allocated_.insert(code);
+ USE(result);
+ DCHECK(result.second);
+}
+
+void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) {
+ code_object_registry_already_existing_.push_back(code);
+}
+
+void CodeObjectRegistry::Clear() {
+ code_object_registry_already_existing_.clear();
+ code_object_registry_newly_allocated_.clear();
+}
+
+void CodeObjectRegistry::Finalize() {
+ code_object_registry_already_existing_.shrink_to_fit();
+}
+
+bool CodeObjectRegistry::Contains(Address object) const {
+ return (code_object_registry_newly_allocated_.find(object) !=
+ code_object_registry_newly_allocated_.end()) ||
+ (std::binary_search(code_object_registry_already_existing_.begin(),
+ code_object_registry_already_existing_.end(),
+ object));
+}
+
+Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress(
+ Address address) const {
+ // Let's first find the object which comes right before address in the vector
+ // of already existing code objects.
+ Address already_existing_set_ = 0;
+ Address newly_allocated_set_ = 0;
+ if (!code_object_registry_already_existing_.empty()) {
+ auto it =
+ std::upper_bound(code_object_registry_already_existing_.begin(),
+ code_object_registry_already_existing_.end(), address);
+ if (it != code_object_registry_already_existing_.begin()) {
+ already_existing_set_ = *(--it);
+ }
+ }
+
+ // Next, let's find the object which comes right before address in the set
+ // of newly allocated code objects.
+ if (!code_object_registry_newly_allocated_.empty()) {
+ auto it = code_object_registry_newly_allocated_.upper_bound(address);
+ if (it != code_object_registry_newly_allocated_.begin()) {
+ newly_allocated_set_ = *(--it);
+ }
+ }
+
+ // The code objects which contains address has to be in one of the two
+ // data structures.
+ DCHECK(already_existing_set_ != 0 || newly_allocated_set_ != 0);
+
+ // The address which is closest to the given address is the code object.
+ return already_existing_set_ > newly_allocated_set_ ? already_existing_set_
+ : newly_allocated_set_;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/code-object-registry.h b/chromium/v8/src/heap/code-object-registry.h
new file mode 100644
index 00000000000..beab1766256
--- /dev/null
+++ b/chromium/v8/src/heap/code-object-registry.h
@@ -0,0 +1,38 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CODE_OBJECT_REGISTRY_H_
+#define V8_HEAP_CODE_OBJECT_REGISTRY_H_
+
+#include <set>
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// The CodeObjectRegistry holds all start addresses of code objects of a given
+// MemoryChunk. Each MemoryChunk owns a separate CodeObjectRegistry. The
+// CodeObjectRegistry allows fast lookup from an inner pointer of a code object
+// to the actual code object.
+class V8_EXPORT_PRIVATE CodeObjectRegistry {
+ public:
+ void RegisterNewlyAllocatedCodeObject(Address code);
+ void RegisterAlreadyExistingCodeObject(Address code);
+ void Clear();
+ void Finalize();
+ bool Contains(Address code) const;
+ Address GetCodeObjectStartFromInnerAddress(Address address) const;
+
+ private:
+ std::vector<Address> code_object_registry_already_existing_;
+ std::set<Address> code_object_registry_newly_allocated_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CODE_OBJECT_REGISTRY_H_
diff --git a/chromium/v8/src/heap/code-stats.cc b/chromium/v8/src/heap/code-stats.cc
index 27b1315c6b5..6e685c47b38 100644
--- a/chromium/v8/src/heap/code-stats.cc
+++ b/chromium/v8/src/heap/code-stats.cc
@@ -7,7 +7,7 @@
#include "src/codegen/code-comments.h"
#include "src/codegen/reloc-info.h"
#include "src/heap/large-spaces.h"
-#include "src/heap/spaces-inl.h" // For PagedSpaceObjectIterator.
+#include "src/heap/paged-spaces-inl.h" // For PagedSpaceObjectIterator.
#include "src/objects/objects-inl.h"
namespace v8 {
diff --git a/chromium/v8/src/heap/combined-heap.cc b/chromium/v8/src/heap/combined-heap.cc
index 0416bb62a42..3079e600f22 100644
--- a/chromium/v8/src/heap/combined-heap.cc
+++ b/chromium/v8/src/heap/combined-heap.cc
@@ -10,7 +10,8 @@ namespace internal {
CombinedHeapObjectIterator::CombinedHeapObjectIterator(
Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
- : heap_iterator_(heap, filtering),
+ : safepoint_scope_(heap),
+ heap_iterator_(heap, filtering),
ro_heap_iterator_(heap->isolate()->read_only_heap()) {}
HeapObject CombinedHeapObjectIterator::Next() {
diff --git a/chromium/v8/src/heap/combined-heap.h b/chromium/v8/src/heap/combined-heap.h
index d7e58dfb87c..55664114d39 100644
--- a/chromium/v8/src/heap/combined-heap.h
+++ b/chromium/v8/src/heap/combined-heap.h
@@ -7,6 +7,7 @@
#include "src/heap/heap.h"
#include "src/heap/read-only-heap.h"
+#include "src/heap/safepoint.h"
#include "src/heap/third-party/heap-api.h"
#include "src/objects/objects.h"
@@ -25,6 +26,7 @@ class V8_EXPORT_PRIVATE CombinedHeapObjectIterator final {
HeapObject Next();
private:
+ SafepointScope safepoint_scope_;
HeapObjectIterator heap_iterator_;
ReadOnlyHeapObjectIterator ro_heap_iterator_;
};
diff --git a/chromium/v8/src/heap/concurrent-allocator-inl.h b/chromium/v8/src/heap/concurrent-allocator-inl.h
index 65f1be313f8..15a4ef5f89c 100644
--- a/chromium/v8/src/heap/concurrent-allocator-inl.h
+++ b/chromium/v8/src/heap/concurrent-allocator-inl.h
@@ -8,8 +8,8 @@
#include "include/v8-internal.h"
#include "src/common/globals.h"
#include "src/heap/concurrent-allocator.h"
-
#include "src/heap/heap.h"
+#include "src/heap/incremental-marking.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
@@ -23,15 +23,7 @@ AllocationResult ConcurrentAllocator::Allocate(int object_size,
// TODO(dinfuehr): Add support for allocation observers
CHECK(FLAG_concurrent_allocation);
if (object_size > kMaxLabObjectSize) {
- auto result = space_->SlowGetLinearAllocationAreaBackground(
- local_heap_, object_size, object_size, alignment, origin);
-
- if (result) {
- HeapObject object = HeapObject::FromAddress(result->first);
- return AllocationResult(object);
- } else {
- return AllocationResult::Retry(OLD_SPACE);
- }
+ return AllocateOutsideLab(object_size, alignment, origin);
}
return AllocateInLab(object_size, alignment, origin);
@@ -69,6 +61,12 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
if (!result) return false;
+ if (local_heap_->heap()->incremental_marking()->black_allocation()) {
+ Address top = result->first;
+ Address limit = top + result->second;
+ Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
+ }
+
HeapObject object = HeapObject::FromAddress(result->first);
LocalAllocationBuffer saved_lab = std::move(lab_);
lab_ = LocalAllocationBuffer::FromResult(
diff --git a/chromium/v8/src/heap/concurrent-allocator.cc b/chromium/v8/src/heap/concurrent-allocator.cc
index 7fd29110215..9625bdb13aa 100644
--- a/chromium/v8/src/heap/concurrent-allocator.cc
+++ b/chromium/v8/src/heap/concurrent-allocator.cc
@@ -4,12 +4,52 @@
#include "src/heap/concurrent-allocator.h"
+#include "src/execution/isolate.h"
+#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h"
#include "src/heap/local-heap.h"
+#include "src/heap/marking.h"
namespace v8 {
namespace internal {
+void StressConcurrentAllocatorTask::RunInternal() {
+ Heap* heap = isolate_->heap();
+ LocalHeap local_heap(heap);
+ ConcurrentAllocator* allocator = local_heap.old_space_allocator();
+
+ const int kNumIterations = 2000;
+ const int kObjectSize = 10 * kTaggedSize;
+ const int kLargeObjectSize = 8 * KB;
+
+ for (int i = 0; i < kNumIterations; i++) {
+ Address address = allocator->AllocateOrFail(
+ kObjectSize, AllocationAlignment::kWordAligned,
+ AllocationOrigin::kRuntime);
+ heap->CreateFillerObjectAtBackground(
+ address, kObjectSize, ClearFreedMemoryMode::kDontClearFreedMemory);
+ address = allocator->AllocateOrFail(kLargeObjectSize,
+ AllocationAlignment::kWordAligned,
+ AllocationOrigin::kRuntime);
+ heap->CreateFillerObjectAtBackground(
+ address, kLargeObjectSize, ClearFreedMemoryMode::kDontClearFreedMemory);
+ if (i % 10 == 0) {
+ local_heap.Safepoint();
+ }
+ }
+
+ Schedule(isolate_);
+}
+
+// static
+void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) {
+ CHECK(FLAG_local_heaps && FLAG_concurrent_allocation);
+ auto task = std::make_unique<StressConcurrentAllocatorTask>(isolate);
+ const double kDelayInSeconds = 0.1;
+ V8::GetCurrentPlatform()->CallDelayedOnWorkerThread(std::move(task),
+ kDelayInSeconds);
+}
+
Address ConcurrentAllocator::PerformCollectionAndAllocateAgain(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
Heap* heap = local_heap_->heap();
@@ -39,5 +79,43 @@ void ConcurrentAllocator::MakeLinearAllocationAreaIterable() {
lab_.MakeIterable();
}
+void ConcurrentAllocator::MarkLinearAllocationAreaBlack() {
+ Address top = lab_.top();
+ Address limit = lab_.limit();
+
+ if (top != kNullAddress && top != limit) {
+ Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
+ }
+}
+
+void ConcurrentAllocator::UnmarkLinearAllocationArea() {
+ Address top = lab_.top();
+ Address limit = lab_.limit();
+
+ if (top != kNullAddress && top != limit) {
+ Page::FromAllocationAreaAddress(top)->DestroyBlackAreaBackground(top,
+ limit);
+ }
+}
+
+AllocationResult ConcurrentAllocator::AllocateOutsideLab(
+ int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
+ auto result = space_->SlowGetLinearAllocationAreaBackground(
+ local_heap_, object_size, object_size, alignment, origin);
+
+ if (result) {
+ HeapObject object = HeapObject::FromAddress(result->first);
+
+ if (local_heap_->heap()->incremental_marking()->black_allocation()) {
+ local_heap_->heap()->incremental_marking()->MarkBlackBackground(
+ object, object_size);
+ }
+
+ return AllocationResult(object);
+ } else {
+ return AllocationResult::Retry(OLD_SPACE);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/concurrent-allocator.h b/chromium/v8/src/heap/concurrent-allocator.h
index f165d009620..795e37d339c 100644
--- a/chromium/v8/src/heap/concurrent-allocator.h
+++ b/chromium/v8/src/heap/concurrent-allocator.h
@@ -8,12 +8,27 @@
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
+#include "src/tasks/cancelable-task.h"
namespace v8 {
namespace internal {
class LocalHeap;
+class StressConcurrentAllocatorTask : public CancelableTask {
+ public:
+ explicit StressConcurrentAllocatorTask(Isolate* isolate)
+ : CancelableTask(isolate), isolate_(isolate) {}
+
+ void RunInternal() override;
+
+ // Schedules task on background thread
+ static void Schedule(Isolate* isolate);
+
+ private:
+ Isolate* isolate_;
+};
+
// Concurrent allocator for allocation from background threads/tasks.
// Allocations are served from a TLAB if possible.
class ConcurrentAllocator {
@@ -36,6 +51,8 @@ class ConcurrentAllocator {
void FreeLinearAllocationArea();
void MakeLinearAllocationAreaIterable();
+ void MarkLinearAllocationAreaBlack();
+ void UnmarkLinearAllocationArea();
private:
inline bool EnsureLab(AllocationOrigin origin);
@@ -43,6 +60,9 @@ class ConcurrentAllocator {
AllocationAlignment alignment,
AllocationOrigin origin);
+ V8_EXPORT_PRIVATE AllocationResult AllocateOutsideLab(
+ int object_size, AllocationAlignment alignment, AllocationOrigin origin);
+
V8_EXPORT_PRIVATE Address PerformCollectionAndAllocateAgain(
int object_size, AllocationAlignment alignment, AllocationOrigin origin);
diff --git a/chromium/v8/src/heap/concurrent-marking.cc b/chromium/v8/src/heap/concurrent-marking.cc
index 7b9385b441f..aef84c0637d 100644
--- a/chromium/v8/src/heap/concurrent-marking.cc
+++ b/chromium/v8/src/heap/concurrent-marking.cc
@@ -41,10 +41,10 @@ class ConcurrentMarkingState final
explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
: memory_chunk_data_(memory_chunk_data) {}
- ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) {
+ ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const BasicMemoryChunk* chunk) {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
- MemoryChunk::kMarkBitmapOffset);
+ BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
@@ -298,7 +298,7 @@ class ConcurrentMarkingVisitor final
#ifdef THREAD_SANITIZER
// This is needed because TSAN does not process the memory fence
// emitted after page initialization.
- MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
+ BasicMemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
#endif
}
diff --git a/chromium/v8/src/heap/cppgc-js/cpp-heap.cc b/chromium/v8/src/heap/cppgc-js/cpp-heap.cc
new file mode 100644
index 00000000000..b9723ddb656
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -0,0 +1,141 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc-js/cpp-heap.h"
+
+#include "include/cppgc/platform.h"
+#include "include/v8-platform.h"
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/base/platform/time.h"
+#include "src/flags/flags.h"
+#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
+#include "src/heap/cppgc/stats-collector.h"
+#include "src/heap/cppgc/sweeper.h"
+#include "src/heap/marking-worklist.h"
+#include "src/heap/sweeper.h"
+#include "src/init/v8.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class CppgcPlatformAdapter final : public cppgc::Platform {
+ public:
+ explicit CppgcPlatformAdapter(v8::Isolate* isolate)
+ : platform_(V8::GetCurrentPlatform()), isolate_(isolate) {}
+
+ CppgcPlatformAdapter(const CppgcPlatformAdapter&) = delete;
+ CppgcPlatformAdapter& operator=(const CppgcPlatformAdapter&) = delete;
+
+ PageAllocator* GetPageAllocator() final {
+ return platform_->GetPageAllocator();
+ }
+
+ double MonotonicallyIncreasingTime() final {
+ return platform_->MonotonicallyIncreasingTime();
+ }
+
+ std::shared_ptr<TaskRunner> GetForegroundTaskRunner() final {
+ return platform_->GetForegroundTaskRunner(isolate_);
+ }
+
+ std::unique_ptr<JobHandle> PostJob(TaskPriority priority,
+ std::unique_ptr<JobTask> job_task) final {
+ return platform_->PostJob(priority, std::move(job_task));
+ }
+
+ private:
+ v8::Platform* platform_;
+ v8::Isolate* isolate_;
+};
+
+class UnifiedHeapMarker : public cppgc::internal::Marker {
+ public:
+ explicit UnifiedHeapMarker(cppgc::internal::HeapBase& heap);
+
+ void AddObject(void*);
+
+ // TODO(chromium:1056170): Implement unified heap specific
+ // CreateMutatorThreadMarkingVisitor and AdvanceMarkingWithDeadline.
+};
+
+UnifiedHeapMarker::UnifiedHeapMarker(cppgc::internal::HeapBase& heap)
+ : cppgc::internal::Marker(heap) {}
+
+void UnifiedHeapMarker::AddObject(void* object) {
+ auto& header = cppgc::internal::HeapObjectHeader::FromPayload(object);
+ marking_visitor_->MarkObject(header);
+}
+
+} // namespace
+
+CppHeap::CppHeap(v8::Isolate* isolate, size_t custom_spaces)
+ : cppgc::internal::HeapBase(std::make_shared<CppgcPlatformAdapter>(isolate),
+ custom_spaces) {
+ CHECK(!FLAG_incremental_marking_wrappers);
+}
+
+void CppHeap::RegisterV8References(
+ const std::vector<std::pair<void*, void*> >& embedder_fields) {
+ DCHECK(marker_);
+ for (auto& tuple : embedder_fields) {
+ // First field points to type.
+ // Second field points to object.
+ static_cast<UnifiedHeapMarker*>(marker_.get())->AddObject(tuple.second);
+ }
+ marking_done_ = false;
+}
+
+void CppHeap::TracePrologue(TraceFlags flags) {
+ marker_ = std::make_unique<UnifiedHeapMarker>(AsBase());
+ const UnifiedHeapMarker::MarkingConfig marking_config{
+ UnifiedHeapMarker::MarkingConfig::CollectionType::kMajor,
+ cppgc::Heap::StackState::kNoHeapPointers,
+ UnifiedHeapMarker::MarkingConfig::MarkingType::kAtomic};
+ marker_->StartMarking(marking_config);
+ marking_done_ = false;
+}
+
+bool CppHeap::AdvanceTracing(double deadline_in_ms) {
+ marking_done_ = marker_->AdvanceMarkingWithDeadline(
+ v8::base::TimeDelta::FromMillisecondsD(deadline_in_ms));
+ return marking_done_;
+}
+
+bool CppHeap::IsTracingDone() { return marking_done_; }
+
+void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
+ const UnifiedHeapMarker::MarkingConfig marking_config{
+ UnifiedHeapMarker::MarkingConfig::CollectionType::kMajor,
+ cppgc::Heap::StackState::kNoHeapPointers,
+ UnifiedHeapMarker::MarkingConfig::MarkingType::kAtomic};
+ marker_->EnterAtomicPause(marking_config);
+}
+
+void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
+ CHECK(marking_done_);
+ marker_->LeaveAtomicPause();
+ {
+ // Pre finalizers are forbidden from allocating objects
+ cppgc::internal::ObjectAllocator::NoAllocationScope no_allocation_scope_(
+ object_allocator_);
+ marker()->ProcessWeakness();
+ prefinalizer_handler()->InvokePreFinalizers();
+ }
+ {
+ NoGCScope no_gc(*this);
+ sweeper().Start(cppgc::internal::Sweeper::Config::kAtomic);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/cppgc-js/cpp-heap.h b/chromium/v8/src/heap/cppgc-js/cpp-heap.h
new file mode 100644
index 00000000000..469bee5e882
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc-js/cpp-heap.h
@@ -0,0 +1,42 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_CPP_HEAP_H_
+#define V8_HEAP_CPPGC_JS_CPP_HEAP_H_
+
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/heap-base.h"
+
+namespace v8 {
+
+class Isolate;
+
+namespace internal {
+
+// A C++ heap implementation used with V8 to implement unified heap.
+class V8_EXPORT_PRIVATE CppHeap final : public cppgc::internal::HeapBase,
+ public v8::EmbedderHeapTracer {
+ public:
+ CppHeap(v8::Isolate* isolate, size_t custom_spaces);
+
+ HeapBase& AsBase() { return *this; }
+ const HeapBase& AsBase() const { return *this; }
+
+ void RegisterV8References(
+ const std::vector<std::pair<void*, void*> >& embedder_fields) final;
+ void TracePrologue(TraceFlags flags) final;
+ bool AdvanceTracing(double deadline_in_ms) final;
+ bool IsTracingDone() final;
+ void TraceEpilogue(TraceSummary* trace_summary) final;
+ void EnterFinalPause(EmbedderStackState stack_state) final;
+
+ private:
+ bool marking_done_ = false;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CPPGC_JS_CPP_HEAP_H_
diff --git a/chromium/v8/src/heap/cppgc/allocation.cc b/chromium/v8/src/heap/cppgc/allocation.cc
index 32f917da5ac..04bcea82d03 100644
--- a/chromium/v8/src/heap/cppgc/allocation.cc
+++ b/chromium/v8/src/heap/cppgc/allocation.cc
@@ -6,7 +6,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/heap/cppgc/heap-inl.h"
+#include "src/heap/cppgc/object-allocator-inl.h"
namespace cppgc {
namespace internal {
@@ -15,19 +15,17 @@ STATIC_ASSERT(api_constants::kLargeObjectSizeThreshold ==
kLargeObjectSizeThreshold);
// static
-void* MakeGarbageCollectedTraitInternal::Allocate(cppgc::Heap* heap,
- size_t size,
- GCInfoIndex index) {
- DCHECK_NOT_NULL(heap);
- return Heap::From(heap)->Allocate(size, index);
+void* MakeGarbageCollectedTraitInternal::Allocate(
+ cppgc::AllocationHandle& handle, size_t size, GCInfoIndex index) {
+ return static_cast<ObjectAllocator&>(handle).AllocateObject(size, index);
}
// static
void* MakeGarbageCollectedTraitInternal::Allocate(
- cppgc::Heap* heap, size_t size, GCInfoIndex index,
+ cppgc::AllocationHandle& handle, size_t size, GCInfoIndex index,
CustomSpaceIndex space_index) {
- DCHECK_NOT_NULL(heap);
- return Heap::From(heap)->Allocate(size, index, space_index);
+ return static_cast<ObjectAllocator&>(handle).AllocateObject(size, index,
+ space_index);
}
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/caged-heap-local-data.cc b/chromium/v8/src/heap/cppgc/caged-heap-local-data.cc
new file mode 100644
index 00000000000..55ededdc087
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/caged-heap-local-data.cc
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/caged-heap-local-data.h"
+
+#include <algorithm>
+#include <type_traits>
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+#if defined(CPPGC_YOUNG_GENERATION)
+
+static_assert(
+ std::is_trivially_default_constructible<AgeTable>::value,
+ "To support lazy committing, AgeTable must be trivially constructible");
+
+void AgeTable::Reset(PageAllocator* allocator) {
+ // TODO(chromium:1029379): Consider MADV_DONTNEED instead of MADV_FREE on
+ // POSIX platforms.
+ std::fill(table_.begin(), table_.end(), Age::kOld);
+ const uintptr_t begin = RoundUp(reinterpret_cast<uintptr_t>(table_.begin()),
+ allocator->CommitPageSize());
+ const uintptr_t end = RoundDown(reinterpret_cast<uintptr_t>(table_.end()),
+ allocator->CommitPageSize());
+ allocator->DiscardSystemPages(reinterpret_cast<void*>(begin), end - begin);
+}
+
+#endif
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/caged-heap.cc b/chromium/v8/src/heap/cppgc/caged-heap.cc
new file mode 100644
index 00000000000..16cb30aa281
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/caged-heap.cc
@@ -0,0 +1,85 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !defined(CPPGC_CAGED_HEAP)
+#error "Must be compiled with caged heap enabled"
+#endif
+
+#include "src/heap/cppgc/caged-heap.h"
+
+#include "include/cppgc/internal/caged-heap-local-data.h"
+#include "src/base/bounded-page-allocator.h"
+#include "src/base/logging.h"
+#include "src/heap/cppgc/globals.h"
+
+namespace cppgc {
+namespace internal {
+
+STATIC_ASSERT(api_constants::kCagedHeapReservationSize ==
+ kCagedHeapReservationSize);
+STATIC_ASSERT(api_constants::kCagedHeapReservationAlignment ==
+ kCagedHeapReservationAlignment);
+
+namespace {
+
+VirtualMemory ReserveCagedHeap(PageAllocator* platform_allocator) {
+ DCHECK_NOT_NULL(platform_allocator);
+ DCHECK_EQ(0u,
+ kCagedHeapReservationSize % platform_allocator->AllocatePageSize());
+
+ static constexpr size_t kAllocationTries = 4;
+ for (size_t i = 0; i < kAllocationTries; ++i) {
+ void* hint = reinterpret_cast<void*>(RoundDown(
+ reinterpret_cast<uintptr_t>(platform_allocator->GetRandomMmapAddr()),
+ kCagedHeapReservationAlignment));
+
+ VirtualMemory memory(platform_allocator, kCagedHeapReservationSize,
+ kCagedHeapReservationAlignment, hint);
+ if (memory.IsReserved()) return memory;
+ }
+
+ FATAL("Fatal process out of memory: Failed to reserve memory for caged heap");
+ UNREACHABLE();
+}
+
+std::unique_ptr<CagedHeap::AllocatorType> CreateBoundedAllocator(
+ v8::PageAllocator* platform_allocator, void* caged_heap_start) {
+ DCHECK(caged_heap_start);
+
+ auto start =
+ reinterpret_cast<CagedHeap::AllocatorType::Address>(caged_heap_start);
+
+ return std::make_unique<CagedHeap::AllocatorType>(
+ platform_allocator, start, kCagedHeapReservationSize, kPageSize);
+}
+
+} // namespace
+
+CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
+ : reserved_area_(ReserveCagedHeap(platform_allocator)) {
+ DCHECK_NOT_NULL(heap_base);
+
+ void* caged_heap_start = reserved_area_.address();
+ CHECK(platform_allocator->SetPermissions(
+ reserved_area_.address(),
+ RoundUp(sizeof(CagedHeapLocalData), platform_allocator->CommitPageSize()),
+ PageAllocator::kReadWrite));
+
+ auto* local_data =
+ new (reserved_area_.address()) CagedHeapLocalData(heap_base);
+#if defined(CPPGC_YOUNG_GENERATION)
+ local_data->age_table.Reset(platform_allocator);
+#endif
+ USE(local_data);
+
+ caged_heap_start = reinterpret_cast<void*>(
+ RoundUp(reinterpret_cast<uintptr_t>(caged_heap_start) +
+ sizeof(CagedHeapLocalData),
+ kPageSize));
+ bounded_allocator_ =
+ CreateBoundedAllocator(platform_allocator, caged_heap_start);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/caged-heap.h b/chromium/v8/src/heap/cppgc/caged-heap.h
new file mode 100644
index 00000000000..7ac34624a0a
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/caged-heap.h
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_CAGED_HEAP_H_
+#define V8_HEAP_CPPGC_CAGED_HEAP_H_
+
+#include <memory>
+
+#include "include/cppgc/platform.h"
+#include "src/base/bounded-page-allocator.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/virtual-memory.h"
+
+namespace cppgc {
+namespace internal {
+
+struct CagedHeapLocalData;
+class HeapBase;
+
+class CagedHeap final {
+ public:
+ using AllocatorType = v8::base::BoundedPageAllocator;
+
+ CagedHeap(HeapBase* heap, PageAllocator* platform_allocator);
+
+ CagedHeap(const CagedHeap&) = delete;
+ CagedHeap& operator=(const CagedHeap&) = delete;
+
+ AllocatorType& allocator() { return *bounded_allocator_; }
+ const AllocatorType& allocator() const { return *bounded_allocator_; }
+
+ CagedHeapLocalData& local_data() {
+ return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
+ }
+ const CagedHeapLocalData& local_data() const {
+ return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
+ }
+
+ static uintptr_t OffsetFromAddress(void* address) {
+ return reinterpret_cast<uintptr_t>(address) &
+ (kCagedHeapReservationAlignment - 1);
+ }
+
+ private:
+ VirtualMemory reserved_area_;
+ std::unique_ptr<AllocatorType> bounded_allocator_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_CAGED_HEAP_H_
diff --git a/chromium/v8/src/heap/cppgc/free-list.cc b/chromium/v8/src/heap/cppgc/free-list.cc
index e5e6b70793d..8f649059323 100644
--- a/chromium/v8/src/heap/cppgc/free-list.cc
+++ b/chromium/v8/src/heap/cppgc/free-list.cc
@@ -68,12 +68,17 @@ void FreeList::Add(FreeList::Block block) {
if (block.size < sizeof(Entry)) {
// Create wasted entry. This can happen when an almost emptied linear
// allocation buffer is returned to the freelist.
+ // This could be SET_MEMORY_ACCESSIBLE. Since there's no payload, the next
+ // operating overwrites the memory completely, and we can thus avoid
+ // zeroing it out.
+ ASAN_UNPOISON_MEMORY_REGION(block.address, sizeof(HeapObjectHeader));
new (block.address) HeapObjectHeader(size, kFreeListGCInfoIndex);
return;
}
- // Make sure the freelist header is writable.
- SET_MEMORY_ACCESIBLE(block.address, sizeof(Entry));
+ // Make sure the freelist header is writable. SET_MEMORY_ACCESSIBLE is not
+ // needed as we write the whole payload of Entry.
+ ASAN_UNPOISON_MEMORY_REGION(block.address, sizeof(Entry));
Entry* entry = new (block.address) Entry(size);
const size_t index = BucketIndexForSize(static_cast<uint32_t>(size));
entry->Link(&free_list_heads_[index]);
diff --git a/chromium/v8/src/heap/cppgc/garbage-collector.h b/chromium/v8/src/heap/cppgc/garbage-collector.h
new file mode 100644
index 00000000000..6c906fd501a
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/garbage-collector.h
@@ -0,0 +1,56 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
+#define V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
+
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/sweeper.h"
+
+namespace cppgc {
+namespace internal {
+
+// GC interface that allows abstraction over the actual GC invocation. This is
+// needed to mock/fake GC for testing.
+class GarbageCollector {
+ public:
+ struct Config {
+ using CollectionType = Marker::MarkingConfig::CollectionType;
+ using StackState = cppgc::Heap::StackState;
+ using MarkingType = Marker::MarkingConfig::MarkingType;
+ using SweepingType = Sweeper::Config;
+
+ static constexpr Config ConservativeAtomicConfig() {
+ return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ static constexpr Config PreciseAtomicConfig() {
+ return {CollectionType::kMajor, StackState::kNoHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ static constexpr Config MinorPreciseAtomicConfig() {
+ return {CollectionType::kMinor, StackState::kNoHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ CollectionType collection_type = CollectionType::kMajor;
+ StackState stack_state = StackState::kMayContainHeapPointers;
+ MarkingType marking_type = MarkingType::kAtomic;
+ SweepingType sweeping_type = SweepingType::kAtomic;
+ };
+
+ // Executes a garbage collection specified in config.
+ virtual void CollectGarbage(Config config) = 0;
+
+ // The current epoch that the GC maintains. The epoch is increased on every
+ // GC invocation.
+ virtual size_t epoch() const = 0;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
diff --git a/chromium/v8/src/heap/cppgc/gc-info-table.cc b/chromium/v8/src/heap/cppgc/gc-info-table.cc
index dda5f0a7e83..8f2ee965011 100644
--- a/chromium/v8/src/heap/cppgc/gc-info-table.cc
+++ b/chromium/v8/src/heap/cppgc/gc-info-table.cc
@@ -18,6 +18,11 @@ namespace internal {
namespace {
+// GCInfoTable::table_, the table which holds GCInfos, is maintained as a
+// contiguous array reserved upfront. Subparts of the array are (re-)committed
+// as read/write or read-only in OS pages, whose size is a power of 2. To avoid
+// having GCInfos that cross the boundaries between these subparts we force the
+// size of GCInfo to be a power of 2 as well.
constexpr size_t kEntrySize = sizeof(GCInfo);
static_assert(v8::base::bits::IsPowerOfTwo(kEntrySize),
"GCInfoTable entries size must be power of "
diff --git a/chromium/v8/src/heap/cppgc/gc-info-table.h b/chromium/v8/src/heap/cppgc/gc-info-table.h
index 25141f5d1cc..749f30b258c 100644
--- a/chromium/v8/src/heap/cppgc/gc-info-table.h
+++ b/chromium/v8/src/heap/cppgc/gc-info-table.h
@@ -22,7 +22,10 @@ namespace internal {
// inherit from GarbageCollected.
struct GCInfo final {
FinalizationCallback finalize;
+ TraceCallback trace;
bool has_v_table;
+ // Keep sizeof(GCInfo) a power of 2.
+ size_t padding = 0;
};
class V8_EXPORT GCInfoTable final {
diff --git a/chromium/v8/src/heap/cppgc/gc-info.cc b/chromium/v8/src/heap/cppgc/gc-info.cc
index 007eab3a338..70970139b17 100644
--- a/chromium/v8/src/heap/cppgc/gc-info.cc
+++ b/chromium/v8/src/heap/cppgc/gc-info.cc
@@ -10,9 +10,10 @@ namespace cppgc {
namespace internal {
RegisteredGCInfoIndex::RegisteredGCInfoIndex(
- FinalizationCallback finalization_callback, bool has_v_table)
+ FinalizationCallback finalization_callback, TraceCallback trace_callback,
+ bool has_v_table)
: index_(GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
- {finalization_callback, has_v_table})) {}
+ {finalization_callback, trace_callback, has_v_table})) {}
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/gc-invoker.cc b/chromium/v8/src/heap/cppgc/gc-invoker.cc
new file mode 100644
index 00000000000..a1212d80523
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/gc-invoker.cc
@@ -0,0 +1,105 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/gc-invoker.h"
+
+#include <memory>
+
+#include "include/cppgc/platform.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/task-handle.h"
+
+namespace cppgc {
+namespace internal {
+
+class GCInvoker::GCInvokerImpl final : public GarbageCollector {
+ public:
+ GCInvokerImpl(GarbageCollector*, cppgc::Platform*, cppgc::Heap::StackSupport);
+ ~GCInvokerImpl();
+
+ GCInvokerImpl(const GCInvokerImpl&) = delete;
+ GCInvokerImpl& operator=(const GCInvokerImpl&) = delete;
+
+ void CollectGarbage(GarbageCollector::Config) final;
+ size_t epoch() const final { return collector_->epoch(); }
+
+ private:
+ class GCTask final : public cppgc::Task {
+ public:
+ using Handle = SingleThreadedHandle;
+
+ static Handle Post(GarbageCollector* collector, cppgc::TaskRunner* runner) {
+ auto task = std::make_unique<GCInvoker::GCInvokerImpl::GCTask>(collector);
+ auto handle = task->GetHandle();
+ runner->PostNonNestableTask(std::move(task));
+ return handle;
+ }
+
+ explicit GCTask(GarbageCollector* collector)
+ : collector_(collector), saved_epoch_(collector->epoch()) {}
+
+ private:
+ void Run() final {
+ if (handle_.IsCanceled() || (collector_->epoch() != saved_epoch_)) return;
+
+ collector_->CollectGarbage(
+ GarbageCollector::Config::PreciseAtomicConfig());
+ handle_.Cancel();
+ }
+
+ Handle GetHandle() { return handle_; }
+
+ GarbageCollector* collector_;
+ Handle handle_;
+ size_t saved_epoch_;
+ };
+
+ GarbageCollector* collector_;
+ cppgc::Platform* platform_;
+ cppgc::Heap::StackSupport stack_support_;
+ GCTask::Handle gc_task_handle_;
+};
+
+GCInvoker::GCInvokerImpl::GCInvokerImpl(GarbageCollector* collector,
+ cppgc::Platform* platform,
+ cppgc::Heap::StackSupport stack_support)
+ : collector_(collector),
+ platform_(platform),
+ stack_support_(stack_support) {}
+
+GCInvoker::GCInvokerImpl::~GCInvokerImpl() {
+ if (gc_task_handle_) {
+ gc_task_handle_.Cancel();
+ }
+}
+
+void GCInvoker::GCInvokerImpl::CollectGarbage(GarbageCollector::Config config) {
+ if ((config.stack_state ==
+ GarbageCollector::Config::StackState::kNoHeapPointers) ||
+ (stack_support_ ==
+ cppgc::Heap::StackSupport::kSupportsConservativeStackScan)) {
+ collector_->CollectGarbage(config);
+ } else if (platform_->GetForegroundTaskRunner()->NonNestableTasksEnabled()) {
+ if (!gc_task_handle_) {
+ gc_task_handle_ =
+ GCTask::Post(collector_, platform_->GetForegroundTaskRunner().get());
+ }
+ }
+}
+
+GCInvoker::GCInvoker(GarbageCollector* collector, cppgc::Platform* platform,
+ cppgc::Heap::StackSupport stack_support)
+ : impl_(std::make_unique<GCInvoker::GCInvokerImpl>(collector, platform,
+ stack_support)) {}
+
+GCInvoker::~GCInvoker() = default;
+
+void GCInvoker::CollectGarbage(GarbageCollector::Config config) {
+ impl_->CollectGarbage(config);
+}
+
+size_t GCInvoker::epoch() const { return impl_->epoch(); }
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/gc-invoker.h b/chromium/v8/src/heap/cppgc/gc-invoker.h
new file mode 100644
index 00000000000..a9e3369b3e9
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/gc-invoker.h
@@ -0,0 +1,47 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_GC_INVOKER_H_
+#define V8_HEAP_CPPGC_GC_INVOKER_H_
+
+#include "include/cppgc/heap.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/garbage-collector.h"
+
+namespace cppgc {
+
+class Platform;
+
+namespace internal {
+
+// GC invoker that dispatches GC depending on StackSupport and StackState:
+// 1. If StackState specifies no stack scan needed the GC is invoked
+// synchronously.
+// 2. If StackState specifies conservative GC and StackSupport prohibits stack
+// scanning: Delay GC until it can be invoked without accessing the stack.
+// To do so, a precise GC without stack scan is scheduled using the platform
+// if non-nestable tasks are supported, and otherwise no operation is carried
+// out. This means that the heuristics allows to arbitrary go over the limit
+// in case non-nestable tasks are not supported and only conservative GCs are
+// requested.
+class V8_EXPORT_PRIVATE GCInvoker final : public GarbageCollector {
+ public:
+ GCInvoker(GarbageCollector*, cppgc::Platform*, cppgc::Heap::StackSupport);
+ ~GCInvoker();
+
+ GCInvoker(const GCInvoker&) = delete;
+ GCInvoker& operator=(const GCInvoker&) = delete;
+
+ void CollectGarbage(GarbageCollector::Config) final;
+ size_t epoch() const final;
+
+ private:
+ class GCInvokerImpl;
+ std::unique_ptr<GCInvokerImpl> impl_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_GC_INVOKER_H_
diff --git a/chromium/v8/src/heap/cppgc/globals.h b/chromium/v8/src/heap/cppgc/globals.h
index 734abd508ef..d286a7fa428 100644
--- a/chromium/v8/src/heap/cppgc/globals.h
+++ b/chromium/v8/src/heap/cppgc/globals.h
@@ -16,6 +16,10 @@ namespace internal {
using Address = uint8_t*;
using ConstAddress = const uint8_t*;
+constexpr size_t kKB = 1024;
+constexpr size_t kMB = kKB * 1024;
+constexpr size_t kGB = kMB * 1024;
+
// See 6.7.6 (http://eel.is/c++draft/basic.align) for alignment restrictions. We
// do not fully support all alignment restrictions (following
// alignof(std​::​max_­align_­t)) but limit to alignof(double).
@@ -42,6 +46,9 @@ constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
constexpr GCInfoIndex kFreeListGCInfoIndex = 0;
constexpr size_t kFreeListEntrySize = 2 * sizeof(uintptr_t);
+constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
+constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
+
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/heap-base.cc b/chromium/v8/src/heap/cppgc/heap-base.cc
new file mode 100644
index 00000000000..7963df0af3f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-base.cc
@@ -0,0 +1,88 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-base.h"
+
+#include "src/base/bounded-page-allocator.h"
+#include "src/base/platform/platform.h"
+#include "src/heap/base/stack.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-page-inl.h"
+#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
+#include "src/heap/cppgc/stats-collector.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
+ friend class HeapVisitor<ObjectSizeCounter>;
+
+ public:
+ size_t GetSize(RawHeap* heap) {
+ Traverse(heap);
+ return accumulated_size_;
+ }
+
+ private:
+ static size_t ObjectSize(const HeapObjectHeader* header) {
+ const size_t size =
+ header->IsLargeObject()
+ ? static_cast<const LargePage*>(BasePage::FromPayload(header))
+ ->PayloadSize()
+ : header->GetSize();
+ DCHECK_GE(size, sizeof(HeapObjectHeader));
+ return size - sizeof(HeapObjectHeader);
+ }
+
+ bool VisitHeapObjectHeader(HeapObjectHeader* header) {
+ if (header->IsFree()) return true;
+ accumulated_size_ += ObjectSize(header);
+ return true;
+ }
+
+ size_t accumulated_size_ = 0;
+};
+
+} // namespace
+
+HeapBase::HeapBase(std::shared_ptr<cppgc::Platform> platform,
+ size_t custom_spaces)
+ : raw_heap_(this, custom_spaces),
+ platform_(std::move(platform)),
+#if defined(CPPGC_CAGED_HEAP)
+ caged_heap_(this, platform_->GetPageAllocator()),
+ page_backend_(std::make_unique<PageBackend>(&caged_heap_.allocator())),
+#else
+ page_backend_(
+ std::make_unique<PageBackend>(platform_->GetPageAllocator())),
+#endif
+ stats_collector_(std::make_unique<StatsCollector>()),
+ stack_(std::make_unique<heap::base::Stack>(
+ v8::base::Stack::GetStackStart())),
+ prefinalizer_handler_(std::make_unique<PreFinalizerHandler>()),
+ object_allocator_(&raw_heap_, page_backend_.get(),
+ stats_collector_.get()),
+ sweeper_(&raw_heap_, platform_.get(), stats_collector_.get()) {
+}
+
+HeapBase::~HeapBase() = default;
+
+size_t HeapBase::ObjectPayloadSize() const {
+ return ObjectSizeCounter().GetSize(const_cast<RawHeap*>(&raw_heap()));
+}
+
+HeapBase::NoGCScope::NoGCScope(HeapBase& heap) : heap_(heap) {
+ heap_.no_gc_scope_++;
+}
+
+HeapBase::NoGCScope::~NoGCScope() { heap_.no_gc_scope_--; }
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/heap-base.h b/chromium/v8/src/heap/cppgc/heap-base.h
new file mode 100644
index 00000000000..cc61ed32fc8
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-base.h
@@ -0,0 +1,151 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_BASE_H_
+#define V8_HEAP_CPPGC_HEAP_BASE_H_
+
+#include <memory>
+#include <set>
+
+#include "include/cppgc/internal/persistent-node.h"
+#include "include/cppgc/macros.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/raw-heap.h"
+#include "src/heap/cppgc/sweeper.h"
+
+#if defined(CPPGC_CAGED_HEAP)
+#include "src/heap/cppgc/caged-heap.h"
+#endif
+
+namespace heap {
+namespace base {
+class Stack;
+} // namespace base
+} // namespace heap
+
+namespace cppgc {
+
+class Platform;
+
+namespace internal {
+
+namespace testing {
+class TestWithHeap;
+}
+
+class Marker;
+class PageBackend;
+class PreFinalizerHandler;
+class StatsCollector;
+
+// Base class for heap implementations.
+class V8_EXPORT_PRIVATE HeapBase {
+ public:
+ // NoGCScope allows going over limits and avoids triggering garbage
+ // collection triggered through allocations or even explicitly.
+ class V8_EXPORT_PRIVATE NoGCScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ explicit NoGCScope(HeapBase& heap);
+ ~NoGCScope();
+
+ NoGCScope(const NoGCScope&) = delete;
+ NoGCScope& operator=(const NoGCScope&) = delete;
+
+ private:
+ HeapBase& heap_;
+ };
+
+ HeapBase(std::shared_ptr<cppgc::Platform> platform, size_t custom_spaces);
+ virtual ~HeapBase();
+
+ HeapBase(const HeapBase&) = delete;
+ HeapBase& operator=(const HeapBase&) = delete;
+
+ RawHeap& raw_heap() { return raw_heap_; }
+ const RawHeap& raw_heap() const { return raw_heap_; }
+
+ cppgc::Platform* platform() { return platform_.get(); }
+ const cppgc::Platform* platform() const { return platform_.get(); }
+
+ PageBackend* page_backend() { return page_backend_.get(); }
+ const PageBackend* page_backend() const { return page_backend_.get(); }
+
+ StatsCollector* stats_collector() { return stats_collector_.get(); }
+ const StatsCollector* stats_collector() const {
+ return stats_collector_.get();
+ }
+
+#if defined(CPPGC_CAGED_HEAP)
+ CagedHeap& caged_heap() { return caged_heap_; }
+ const CagedHeap& caged_heap() const { return caged_heap_; }
+#endif
+
+ heap::base::Stack* stack() { return stack_.get(); }
+
+ PreFinalizerHandler* prefinalizer_handler() {
+ return prefinalizer_handler_.get();
+ }
+
+ Marker* marker() const { return marker_.get(); }
+
+ ObjectAllocator& object_allocator() { return object_allocator_; }
+
+ Sweeper& sweeper() { return sweeper_; }
+
+ PersistentRegion& GetStrongPersistentRegion() {
+ return strong_persistent_region_;
+ }
+ const PersistentRegion& GetStrongPersistentRegion() const {
+ return strong_persistent_region_;
+ }
+ PersistentRegion& GetWeakPersistentRegion() {
+ return weak_persistent_region_;
+ }
+ const PersistentRegion& GetWeakPersistentRegion() const {
+ return weak_persistent_region_;
+ }
+
+#if defined(CPPGC_YOUNG_GENERATION)
+ std::set<void*>& remembered_slots() { return remembered_slots_; }
+#endif
+
+ size_t ObjectPayloadSize() const;
+
+ protected:
+ bool in_no_gc_scope() const { return no_gc_scope_ > 0; }
+
+ RawHeap raw_heap_;
+ std::shared_ptr<cppgc::Platform> platform_;
+#if defined(CPPGC_CAGED_HEAP)
+ CagedHeap caged_heap_;
+#endif
+ std::unique_ptr<PageBackend> page_backend_;
+
+ std::unique_ptr<StatsCollector> stats_collector_;
+ std::unique_ptr<heap::base::Stack> stack_;
+ std::unique_ptr<PreFinalizerHandler> prefinalizer_handler_;
+ std::unique_ptr<Marker> marker_;
+
+ ObjectAllocator object_allocator_;
+ Sweeper sweeper_;
+
+ PersistentRegion strong_persistent_region_;
+ PersistentRegion weak_persistent_region_;
+
+#if defined(CPPGC_YOUNG_GENERATION)
+ std::set<void*> remembered_slots_;
+#endif
+
+ size_t no_gc_scope_ = 0;
+
+ friend class testing::TestWithHeap;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_BASE_H_
diff --git a/chromium/v8/src/heap/cppgc/heap-growing.cc b/chromium/v8/src/heap/cppgc/heap-growing.cc
new file mode 100644
index 00000000000..751d32b0e6d
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-growing.cc
@@ -0,0 +1,99 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-growing.h"
+
+#include <memory>
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/stats-collector.h"
+#include "src/heap/cppgc/task-handle.h"
+
+namespace cppgc {
+namespace internal {
+
+class HeapGrowing::HeapGrowingImpl final
+ : public StatsCollector::AllocationObserver {
+ public:
+ HeapGrowingImpl(GarbageCollector*, StatsCollector*,
+ cppgc::Heap::ResourceConstraints);
+ ~HeapGrowingImpl();
+
+ HeapGrowingImpl(const HeapGrowingImpl&) = delete;
+ HeapGrowingImpl& operator=(const HeapGrowingImpl&) = delete;
+
+ void AllocatedObjectSizeIncreased(size_t) final;
+ // Only trigger GC on growing.
+ void AllocatedObjectSizeDecreased(size_t) final {}
+ void ResetAllocatedObjectSize(size_t) final;
+
+ size_t limit() const { return limit_; }
+
+ private:
+ void ConfigureLimit(size_t allocated_object_size);
+
+ GarbageCollector* collector_;
+ StatsCollector* stats_collector_;
+ // Allow 1 MB heap by default;
+ size_t initial_heap_size_ = 1 * kMB;
+ size_t limit_ = 0; // See ConfigureLimit().
+
+ SingleThreadedHandle gc_task_handle_;
+};
+
+HeapGrowing::HeapGrowingImpl::HeapGrowingImpl(
+ GarbageCollector* collector, StatsCollector* stats_collector,
+ cppgc::Heap::ResourceConstraints constraints)
+ : collector_(collector),
+ stats_collector_(stats_collector),
+ gc_task_handle_(SingleThreadedHandle::NonEmptyTag{}) {
+ if (constraints.initial_heap_size_bytes > 0) {
+ initial_heap_size_ = constraints.initial_heap_size_bytes;
+ }
+ constexpr size_t kNoAllocatedBytes = 0;
+ ConfigureLimit(kNoAllocatedBytes);
+ stats_collector->RegisterObserver(this);
+}
+
+HeapGrowing::HeapGrowingImpl::~HeapGrowingImpl() {
+ stats_collector_->UnregisterObserver(this);
+}
+
+void HeapGrowing::HeapGrowingImpl::AllocatedObjectSizeIncreased(size_t) {
+ if (stats_collector_->allocated_object_size() > limit_) {
+ collector_->CollectGarbage(
+ GarbageCollector::Config::ConservativeAtomicConfig());
+ }
+}
+
+void HeapGrowing::HeapGrowingImpl::ResetAllocatedObjectSize(
+ size_t allocated_object_size) {
+ ConfigureLimit(allocated_object_size);
+}
+
+void HeapGrowing::HeapGrowingImpl::ConfigureLimit(
+ size_t allocated_object_size) {
+ const size_t size = std::max(allocated_object_size, initial_heap_size_);
+ limit_ = std::max(static_cast<size_t>(size * kGrowingFactor),
+ size + kMinLimitIncrease);
+}
+
+HeapGrowing::HeapGrowing(GarbageCollector* collector,
+ StatsCollector* stats_collector,
+ cppgc::Heap::ResourceConstraints constraints)
+ : impl_(std::make_unique<HeapGrowing::HeapGrowingImpl>(
+ collector, stats_collector, constraints)) {}
+
+HeapGrowing::~HeapGrowing() = default;
+
+size_t HeapGrowing::limit() const { return impl_->limit(); }
+
+// static
+constexpr double HeapGrowing::kGrowingFactor;
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/heap-growing.h b/chromium/v8/src/heap/cppgc/heap-growing.h
new file mode 100644
index 00000000000..772fc2db55f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-growing.h
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_GROWING_H_
+#define V8_HEAP_CPPGC_HEAP_GROWING_H_
+
+#include "include/cppgc/heap.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/raw-heap.h"
+
+namespace cppgc {
+
+class Platform;
+
+namespace internal {
+
+class GarbageCollector;
+class StatsCollector;
+
+// Growing strategy that invokes garbage collection using GarbageCollector based
+// on allocation statistics provided by StatsCollector and ResourceConstraints.
+//
+// Implements a fixed-ratio growing strategy with an initial heap size that the
+// GC can ignore to avoid excessive GCs for smaller heaps.
+class V8_EXPORT_PRIVATE HeapGrowing final {
+ public:
+ // Constant growing factor for growing the heap limit.
+ static constexpr double kGrowingFactor = 1.5;
+ // For smaller heaps, allow allocating at least LAB in each regular space
+ // before triggering GC again.
+ static constexpr size_t kMinLimitIncrease =
+ kPageSize * RawHeap::kNumberOfRegularSpaces;
+
+ HeapGrowing(GarbageCollector*, StatsCollector*,
+ cppgc::Heap::ResourceConstraints);
+ ~HeapGrowing();
+
+ HeapGrowing(const HeapGrowing&) = delete;
+ HeapGrowing& operator=(const HeapGrowing&) = delete;
+
+ size_t limit() const;
+
+ private:
+ class HeapGrowingImpl;
+ std::unique_ptr<HeapGrowingImpl> impl_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_GROWING_H_
diff --git a/chromium/v8/src/heap/cppgc/heap-inl.h b/chromium/v8/src/heap/cppgc/heap-inl.h
deleted file mode 100644
index 4fe3186230f..00000000000
--- a/chromium/v8/src/heap/cppgc/heap-inl.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_CPPGC_HEAP_INL_H_
-#define V8_HEAP_CPPGC_HEAP_INL_H_
-
-#include "src/heap/cppgc/globals.h"
-#include "src/heap/cppgc/heap.h"
-#include "src/heap/cppgc/object-allocator-inl.h"
-
-namespace cppgc {
-namespace internal {
-
-void* Heap::Allocate(size_t size, GCInfoIndex index) {
- DCHECK(is_allocation_allowed());
- void* result = object_allocator_.AllocateObject(size, index);
- objects_.push_back(&HeapObjectHeader::FromPayload(result));
- return result;
-}
-
-void* Heap::Allocate(size_t size, GCInfoIndex index,
- CustomSpaceIndex space_index) {
- DCHECK(is_allocation_allowed());
- void* result = object_allocator_.AllocateObject(size, index, space_index);
- objects_.push_back(&HeapObjectHeader::FromPayload(result));
- return result;
-}
-
-} // namespace internal
-} // namespace cppgc
-
-#endif // V8_HEAP_CPPGC_HEAP_INL_H_
diff --git a/chromium/v8/src/heap/cppgc/heap-object-header-inl.h b/chromium/v8/src/heap/cppgc/heap-object-header-inl.h
index cba7b24a4cb..0348013e08b 100644
--- a/chromium/v8/src/heap/cppgc/heap-object-header-inl.h
+++ b/chromium/v8/src/heap/cppgc/heap-object-header-inl.h
@@ -113,6 +113,11 @@ bool HeapObjectHeader::TryMarkAtomic() {
}
template <HeapObjectHeader::AccessMode mode>
+bool HeapObjectHeader::IsYoung() const {
+ return !IsMarked<mode>();
+}
+
+template <HeapObjectHeader::AccessMode mode>
bool HeapObjectHeader::IsFree() const {
return GetGCInfoIndex() == kFreeListGCInfoIndex;
}
diff --git a/chromium/v8/src/heap/cppgc/heap-object-header.h b/chromium/v8/src/heap/cppgc/heap-object-header.h
index b517617dd1e..9a2b5283888 100644
--- a/chromium/v8/src/heap/cppgc/heap-object-header.h
+++ b/chromium/v8/src/heap/cppgc/heap-object-header.h
@@ -80,6 +80,9 @@ class HeapObjectHeader {
inline bool TryMarkAtomic();
template <AccessMode = AccessMode::kNonAtomic>
+ bool IsYoung() const;
+
+ template <AccessMode = AccessMode::kNonAtomic>
bool IsFree() const;
inline bool IsFinalizable() const;
diff --git a/chromium/v8/src/heap/cppgc/heap-page-inl.h b/chromium/v8/src/heap/cppgc/heap-page-inl.h
new file mode 100644
index 00000000000..a416a62e492
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-page-inl.h
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_PAGE_INL_H_
+#define V8_HEAP_CPPGC_HEAP_PAGE_INL_H_
+
+#include "src/heap/cppgc/heap-page.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+BasePage* BasePage::FromPayload(void* payload) {
+ return reinterpret_cast<BasePage*>(
+ (reinterpret_cast<uintptr_t>(payload) & kPageBaseMask) + kGuardPageSize);
+}
+
+// static
+const BasePage* BasePage::FromPayload(const void* payload) {
+ return reinterpret_cast<const BasePage*>(
+ (reinterpret_cast<uintptr_t>(const_cast<void*>(payload)) &
+ kPageBaseMask) +
+ kGuardPageSize);
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_PAGE_INL_H_
diff --git a/chromium/v8/src/heap/cppgc/heap-page.cc b/chromium/v8/src/heap/cppgc/heap-page.cc
index e8afbafbd2a..f95f4a37eb6 100644
--- a/chromium/v8/src/heap/cppgc/heap-page.cc
+++ b/chromium/v8/src/heap/cppgc/heap-page.cc
@@ -14,7 +14,7 @@
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/object-start-bitmap-inl.h"
#include "src/heap/cppgc/object-start-bitmap.h"
-#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/page-memory-inl.h"
#include "src/heap/cppgc/raw-heap.h"
namespace cppgc {
@@ -27,63 +27,120 @@ Address AlignAddress(Address address, size_t alignment) {
RoundUp(reinterpret_cast<uintptr_t>(address), alignment));
}
-} // namespace
+const HeapObjectHeader* ObjectHeaderFromInnerAddressImpl(const BasePage* page,
+ const void* address) {
+ if (page->is_large()) {
+ return LargePage::From(page)->ObjectHeader();
+ }
+ const ObjectStartBitmap& bitmap =
+ NormalPage::From(page)->object_start_bitmap();
+ const HeapObjectHeader* header =
+ bitmap.FindHeader(static_cast<ConstAddress>(address));
+ DCHECK_LT(address,
+ reinterpret_cast<ConstAddress>(header) +
+ header->GetSize<HeapObjectHeader::AccessMode::kAtomic>());
+ return header;
+}
-STATIC_ASSERT(kPageSize == api_constants::kPageAlignment);
+} // namespace
// static
-BasePage* BasePage::FromPayload(void* payload) {
- return reinterpret_cast<BasePage*>(
- (reinterpret_cast<uintptr_t>(payload) & kPageBaseMask) + kGuardPageSize);
+BasePage* BasePage::FromInnerAddress(const HeapBase* heap, void* address) {
+ return const_cast<BasePage*>(
+ FromInnerAddress(heap, const_cast<const void*>(address)));
}
// static
-const BasePage* BasePage::FromPayload(const void* payload) {
+const BasePage* BasePage::FromInnerAddress(const HeapBase* heap,
+ const void* address) {
return reinterpret_cast<const BasePage*>(
- (reinterpret_cast<uintptr_t>(const_cast<void*>(payload)) &
- kPageBaseMask) +
- kGuardPageSize);
+ heap->page_backend()->Lookup(static_cast<ConstAddress>(address)));
}
-HeapObjectHeader* BasePage::ObjectHeaderFromInnerAddress(void* address) {
- return const_cast<HeapObjectHeader*>(
+// static
+void BasePage::Destroy(BasePage* page) {
+ if (page->is_large()) {
+ LargePage::Destroy(LargePage::From(page));
+ } else {
+ NormalPage::Destroy(NormalPage::From(page));
+ }
+}
+
+Address BasePage::PayloadStart() {
+ return is_large() ? LargePage::From(this)->PayloadStart()
+ : NormalPage::From(this)->PayloadStart();
+}
+
+ConstAddress BasePage::PayloadStart() const {
+ return const_cast<BasePage*>(this)->PayloadStart();
+}
+
+Address BasePage::PayloadEnd() {
+ return is_large() ? LargePage::From(this)->PayloadEnd()
+ : NormalPage::From(this)->PayloadEnd();
+}
+
+ConstAddress BasePage::PayloadEnd() const {
+ return const_cast<BasePage*>(this)->PayloadEnd();
+}
+
+HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(void* address) const {
+ return const_cast<HeapObjectHeader&>(
ObjectHeaderFromInnerAddress(const_cast<const void*>(address)));
}
-const HeapObjectHeader* BasePage::ObjectHeaderFromInnerAddress(
- const void* address) {
+const HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(
+ const void* address) const {
+ const HeapObjectHeader* header =
+ ObjectHeaderFromInnerAddressImpl(this, address);
+ DCHECK_NE(kFreeListGCInfoIndex, header->GetGCInfoIndex());
+ return *header;
+}
+
+HeapObjectHeader* BasePage::TryObjectHeaderFromInnerAddress(
+ void* address) const {
+ return const_cast<HeapObjectHeader*>(
+ TryObjectHeaderFromInnerAddress(const_cast<const void*>(address)));
+}
+
+const HeapObjectHeader* BasePage::TryObjectHeaderFromInnerAddress(
+ const void* address) const {
if (is_large()) {
- return LargePage::From(this)->ObjectHeader();
+ if (!LargePage::From(this)->PayloadContains(
+ static_cast<ConstAddress>(address)))
+ return nullptr;
+ } else {
+ const NormalPage* normal_page = NormalPage::From(this);
+ if (!normal_page->PayloadContains(static_cast<ConstAddress>(address)))
+ return nullptr;
+ // Check that the space has no linear allocation buffer.
+ DCHECK(!NormalPageSpace::From(normal_page->space())
+ ->linear_allocation_buffer()
+ .size());
}
- ObjectStartBitmap& bitmap = NormalPage::From(this)->object_start_bitmap();
- HeapObjectHeader* header =
- bitmap.FindHeader(static_cast<ConstAddress>(address));
- DCHECK_LT(address,
- reinterpret_cast<ConstAddress>(header) +
- header->GetSize<HeapObjectHeader::AccessMode::kAtomic>());
- DCHECK_NE(kFreeListGCInfoIndex,
- header->GetGCInfoIndex<HeapObjectHeader::AccessMode::kAtomic>());
+
+ // |address| is on the heap, so we FromInnerAddress can get the header.
+ const HeapObjectHeader* header =
+ ObjectHeaderFromInnerAddressImpl(this, address);
+ if (header->IsFree()) return nullptr;
+ DCHECK_NE(kFreeListGCInfoIndex, header->GetGCInfoIndex());
return header;
}
-BasePage::BasePage(Heap* heap, BaseSpace* space, PageType type)
+BasePage::BasePage(HeapBase* heap, BaseSpace* space, PageType type)
: heap_(heap), space_(space), type_(type) {
DCHECK_EQ(0u, (reinterpret_cast<uintptr_t>(this) - kGuardPageSize) &
kPageOffsetMask);
- DCHECK_EQ(reinterpret_cast<void*>(&heap_),
- FromPayload(this) + api_constants::kHeapOffset);
DCHECK_EQ(&heap_->raw_heap(), space_->raw_heap());
}
// static
-NormalPage* NormalPage::Create(NormalPageSpace* space) {
- DCHECK(space);
- Heap* heap = space->raw_heap()->heap();
- DCHECK(heap);
- void* memory = heap->page_backend()->AllocateNormalPageMemory(space->index());
- auto* normal_page = new (memory) NormalPage(heap, space);
- space->AddPage(normal_page);
- space->AddToFreeList(normal_page->PayloadStart(), normal_page->PayloadSize());
+NormalPage* NormalPage::Create(PageBackend* page_backend,
+ NormalPageSpace* space) {
+ DCHECK_NOT_NULL(page_backend);
+ DCHECK_NOT_NULL(space);
+ void* memory = page_backend->AllocateNormalPageMemory(space->index());
+ auto* normal_page = new (memory) NormalPage(space->raw_heap()->heap(), space);
return normal_page;
}
@@ -98,7 +155,7 @@ void NormalPage::Destroy(NormalPage* page) {
reinterpret_cast<Address>(page));
}
-NormalPage::NormalPage(Heap* heap, BaseSpace* space)
+NormalPage::NormalPage(HeapBase* heap, BaseSpace* space)
: BasePage(heap, space, PageType::kNormal),
object_start_bitmap_(PayloadStart()) {
DCHECK_LT(kLargeObjectSizeThreshold,
@@ -142,23 +199,25 @@ size_t NormalPage::PayloadSize() {
return kPageSize - 2 * kGuardPageSize - header_size;
}
-LargePage::LargePage(Heap* heap, BaseSpace* space, size_t size)
+LargePage::LargePage(HeapBase* heap, BaseSpace* space, size_t size)
: BasePage(heap, space, PageType::kLarge), payload_size_(size) {}
LargePage::~LargePage() = default;
// static
-LargePage* LargePage::Create(LargePageSpace* space, size_t size) {
- DCHECK(space);
+LargePage* LargePage::Create(PageBackend* page_backend, LargePageSpace* space,
+ size_t size) {
+ DCHECK_NOT_NULL(page_backend);
+ DCHECK_NOT_NULL(space);
DCHECK_LE(kLargeObjectSizeThreshold, size);
+
const size_t page_header_size =
RoundUp(sizeof(LargePage), kAllocationGranularity);
const size_t allocation_size = page_header_size + size;
- Heap* heap = space->raw_heap()->heap();
- void* memory = heap->page_backend()->AllocateLargePageMemory(allocation_size);
+ auto* heap = space->raw_heap()->heap();
+ void* memory = page_backend->AllocateLargePageMemory(allocation_size);
LargePage* page = new (memory) LargePage(heap, space, size);
- space->AddPage(page);
return page;
}
diff --git a/chromium/v8/src/heap/cppgc/heap-page.h b/chromium/v8/src/heap/cppgc/heap-page.h
index c676bc4bde0..7559d5f1ece 100644
--- a/chromium/v8/src/heap/cppgc/heap-page.h
+++ b/chromium/v8/src/heap/cppgc/heap-page.h
@@ -17,19 +17,24 @@ namespace internal {
class BaseSpace;
class NormalPageSpace;
class LargePageSpace;
-class Heap;
+class HeapBase;
class PageBackend;
class V8_EXPORT_PRIVATE BasePage {
public:
- static BasePage* FromPayload(void*);
- static const BasePage* FromPayload(const void*);
+ static inline BasePage* FromPayload(void*);
+ static inline const BasePage* FromPayload(const void*);
+
+ static BasePage* FromInnerAddress(const HeapBase*, void*);
+ static const BasePage* FromInnerAddress(const HeapBase*, const void*);
+
+ static void Destroy(BasePage*);
BasePage(const BasePage&) = delete;
BasePage& operator=(const BasePage&) = delete;
- Heap* heap() { return heap_; }
- const Heap* heap() const { return heap_; }
+ HeapBase* heap() { return heap_; }
+ const HeapBase* heap() const { return heap_; }
BaseSpace* space() { return space_; }
const BaseSpace* space() const { return space_; }
@@ -37,16 +42,29 @@ class V8_EXPORT_PRIVATE BasePage {
bool is_large() const { return type_ == PageType::kLarge; }
+ Address PayloadStart();
+ ConstAddress PayloadStart() const;
+ Address PayloadEnd();
+ ConstAddress PayloadEnd() const;
+
// |address| must refer to real object.
- HeapObjectHeader* ObjectHeaderFromInnerAddress(void* address);
- const HeapObjectHeader* ObjectHeaderFromInnerAddress(const void* address);
+ HeapObjectHeader& ObjectHeaderFromInnerAddress(void* address) const;
+ const HeapObjectHeader& ObjectHeaderFromInnerAddress(
+ const void* address) const;
+
+ // |address| is guaranteed to point into the page but not payload. Returns
+ // nullptr when pointing into free list entries and the valid header
+ // otherwise.
+ HeapObjectHeader* TryObjectHeaderFromInnerAddress(void* address) const;
+ const HeapObjectHeader* TryObjectHeaderFromInnerAddress(
+ const void* address) const;
protected:
enum class PageType { kNormal, kLarge };
- BasePage(Heap*, BaseSpace*, PageType);
+ BasePage(HeapBase*, BaseSpace*, PageType);
private:
- Heap* heap_;
+ HeapBase* heap_;
BaseSpace* space_;
PageType type_;
};
@@ -98,8 +116,8 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
using iterator = IteratorImpl<HeapObjectHeader>;
using const_iterator = IteratorImpl<const HeapObjectHeader>;
- // Allocates a new page.
- static NormalPage* Create(NormalPageSpace*);
+ // Allocates a new page in the detached state.
+ static NormalPage* Create(PageBackend*, NormalPageSpace*);
// Destroys and frees the page. The page must be detached from the
// corresponding space (i.e. be swept when called).
static void Destroy(NormalPage*);
@@ -130,13 +148,17 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
static size_t PayloadSize();
+ bool PayloadContains(ConstAddress address) const {
+ return (PayloadStart() <= address) && (address < PayloadEnd());
+ }
+
ObjectStartBitmap& object_start_bitmap() { return object_start_bitmap_; }
const ObjectStartBitmap& object_start_bitmap() const {
return object_start_bitmap_;
}
private:
- NormalPage(Heap* heap, BaseSpace* space);
+ NormalPage(HeapBase* heap, BaseSpace* space);
~NormalPage();
ObjectStartBitmap object_start_bitmap_;
@@ -144,8 +166,8 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
class V8_EXPORT_PRIVATE LargePage final : public BasePage {
public:
- // Allocates a new page.
- static LargePage* Create(LargePageSpace*, size_t);
+ // Allocates a new page in the detached state.
+ static LargePage* Create(PageBackend*, LargePageSpace*, size_t);
// Destroys and frees the page. The page must be detached from the
// corresponding space (i.e. be swept when called).
static void Destroy(LargePage*);
@@ -168,8 +190,12 @@ class V8_EXPORT_PRIVATE LargePage final : public BasePage {
size_t PayloadSize() const { return payload_size_; }
+ bool PayloadContains(ConstAddress address) const {
+ return (PayloadStart() <= address) && (address < PayloadEnd());
+ }
+
private:
- LargePage(Heap* heap, BaseSpace* space, size_t);
+ LargePage(HeapBase* heap, BaseSpace* space, size_t);
~LargePage();
size_t payload_size_;
diff --git a/chromium/v8/src/heap/cppgc/heap-space.cc b/chromium/v8/src/heap/cppgc/heap-space.cc
index 70ddb935314..3a213dc18ad 100644
--- a/chromium/v8/src/heap/cppgc/heap-space.cc
+++ b/chromium/v8/src/heap/cppgc/heap-space.cc
@@ -7,7 +7,8 @@
#include <algorithm>
#include "src/base/logging.h"
-#include "src/heap/cppgc/heap-page.h"
+#include "src/base/platform/mutex.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/object-start-bitmap-inl.h"
namespace cppgc {
@@ -17,11 +18,13 @@ BaseSpace::BaseSpace(RawHeap* heap, size_t index, PageType type)
: heap_(heap), index_(index), type_(type) {}
void BaseSpace::AddPage(BasePage* page) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&pages_mutex_);
DCHECK_EQ(pages_.cend(), std::find(pages_.cbegin(), pages_.cend(), page));
pages_.push_back(page);
}
void BaseSpace::RemovePage(BasePage* page) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&pages_mutex_);
auto it = std::find(pages_.cbegin(), pages_.cend(), page);
DCHECK_NE(pages_.cend(), it);
pages_.erase(it);
@@ -36,21 +39,6 @@ BaseSpace::Pages BaseSpace::RemoveAllPages() {
NormalPageSpace::NormalPageSpace(RawHeap* heap, size_t index)
: BaseSpace(heap, index, PageType::kNormal) {}
-void NormalPageSpace::AddToFreeList(void* address, size_t size) {
- free_list_.Add({address, size});
- NormalPage::From(BasePage::FromPayload(address))
- ->object_start_bitmap()
- .SetBit(static_cast<Address>(address));
-}
-
-void NormalPageSpace::ResetLinearAllocationBuffer() {
- if (current_lab_.size()) {
- DCHECK_NOT_NULL(current_lab_.start());
- AddToFreeList(current_lab_.start(), current_lab_.size());
- current_lab_.Set(nullptr, 0);
- }
-}
-
LargePageSpace::LargePageSpace(RawHeap* heap, size_t index)
: BaseSpace(heap, index, PageType::kLarge) {}
diff --git a/chromium/v8/src/heap/cppgc/heap-space.h b/chromium/v8/src/heap/cppgc/heap-space.h
index d84207c2cd4..a7e50d4f48d 100644
--- a/chromium/v8/src/heap/cppgc/heap-space.h
+++ b/chromium/v8/src/heap/cppgc/heap-space.h
@@ -9,6 +9,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/free-list.h"
namespace cppgc {
@@ -53,6 +54,7 @@ class V8_EXPORT_PRIVATE BaseSpace {
private:
RawHeap* heap_;
Pages pages_;
+ v8::base::Mutex pages_mutex_;
const size_t index_;
const PageType type_;
};
@@ -92,9 +94,6 @@ class V8_EXPORT_PRIVATE NormalPageSpace final : public BaseSpace {
NormalPageSpace(RawHeap* heap, size_t index);
- void AddToFreeList(void*, size_t);
- void ResetLinearAllocationBuffer();
-
LinearAllocationBuffer& linear_allocation_buffer() { return current_lab_; }
const LinearAllocationBuffer& linear_allocation_buffer() const {
return current_lab_;
diff --git a/chromium/v8/src/heap/cppgc/heap.cc b/chromium/v8/src/heap/cppgc/heap.cc
index ee400cee28c..431ad8df668 100644
--- a/chromium/v8/src/heap/cppgc/heap.cc
+++ b/chromium/v8/src/heap/cppgc/heap.cc
@@ -4,15 +4,13 @@
#include "src/heap/cppgc/heap.h"
-#include <memory>
-
-#include "src/base/platform/platform.h"
+#include "src/heap/base/stack.h"
+#include "src/heap/cppgc/garbage-collector.h"
+#include "src/heap/cppgc/gc-invoker.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
-#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-visitor.h"
-#include "src/heap/cppgc/stack.h"
-#include "src/heap/cppgc/sweeper.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
namespace cppgc {
@@ -31,49 +29,49 @@ void VerifyCustomSpaces(
} // namespace
-std::unique_ptr<Heap> Heap::Create(cppgc::Heap::HeapOptions options) {
+std::unique_ptr<Heap> Heap::Create(std::shared_ptr<cppgc::Platform> platform,
+ cppgc::Heap::HeapOptions options) {
+ DCHECK(platform.get());
VerifyCustomSpaces(options.custom_spaces);
- return std::make_unique<internal::Heap>(options.custom_spaces.size());
+ return std::make_unique<internal::Heap>(std::move(platform),
+ std::move(options));
}
void Heap::ForceGarbageCollectionSlow(const char* source, const char* reason,
Heap::StackState stack_state) {
- internal::Heap::From(this)->CollectGarbage({stack_state});
+ internal::Heap::From(this)->CollectGarbage(
+ {internal::GarbageCollector::Config::CollectionType::kMajor,
+ stack_state});
+}
+
+AllocationHandle& Heap::GetAllocationHandle() {
+ return internal::Heap::From(this)->object_allocator();
}
namespace internal {
namespace {
-class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
- friend class HeapVisitor<ObjectSizeCounter>;
+class Unmarker final : private HeapVisitor<Unmarker> {
+ friend class HeapVisitor<Unmarker>;
public:
- size_t GetSize(RawHeap* heap) {
- Traverse(heap);
- return accumulated_size_;
- }
+ explicit Unmarker(RawHeap* heap) { Traverse(heap); }
private:
- static size_t ObjectSize(const HeapObjectHeader* header) {
- const size_t size =
- header->IsLargeObject()
- ? static_cast<const LargePage*>(BasePage::FromPayload(header))
- ->PayloadSize()
- : header->GetSize();
- DCHECK_GE(size, sizeof(HeapObjectHeader));
- return size - sizeof(HeapObjectHeader);
- }
-
bool VisitHeapObjectHeader(HeapObjectHeader* header) {
- if (header->IsFree()) return true;
- accumulated_size_ += ObjectSize(header);
+ if (header->IsMarked()) header->Unmark();
return true;
}
-
- size_t accumulated_size_ = 0;
};
+void CheckConfig(Heap::Config config) {
+ CHECK_WITH_MSG(
+ (config.collection_type != Heap::Config::CollectionType::kMinor) ||
+ (config.stack_state == Heap::Config::StackState::kNoHeapPointers),
+ "Minor GCs with stack is currently not supported");
+}
+
} // namespace
// static
@@ -81,56 +79,50 @@ cppgc::LivenessBroker LivenessBrokerFactory::Create() {
return cppgc::LivenessBroker();
}
-Heap::Heap(size_t custom_spaces)
- : raw_heap_(this, custom_spaces),
- page_backend_(std::make_unique<PageBackend>(&system_allocator_)),
- object_allocator_(&raw_heap_),
- sweeper_(&raw_heap_),
- stack_(std::make_unique<Stack>(v8::base::Stack::GetStackStart())),
- prefinalizer_handler_(std::make_unique<PreFinalizerHandler>()) {}
+Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
+ cppgc::Heap::HeapOptions options)
+ : HeapBase(platform, options.custom_spaces.size()),
+ gc_invoker_(this, platform_.get(), options.stack_support),
+ growing_(&gc_invoker_, stats_collector_.get(),
+ options.resource_constraints) {}
Heap::~Heap() {
- NoGCScope no_gc(this);
+ NoGCScope no_gc(*this);
// Finish already running GC if any, but don't finalize live objects.
sweeper_.Finish();
}
-void Heap::CollectGarbage(GCConfig config) {
+void Heap::CollectGarbage(Config config) {
+ CheckConfig(config);
+
if (in_no_gc_scope()) return;
epoch_++;
- // TODO(chromium:1056170): Replace with proper mark-sweep algorithm.
+#if defined(CPPGC_YOUNG_GENERATION)
+ if (config.collection_type == Config::CollectionType::kMajor)
+ Unmarker unmarker(&raw_heap());
+#endif
+
// "Marking".
- marker_ = std::make_unique<Marker>(this);
- marker_->StartMarking(Marker::MarkingConfig(config.stack_state));
- marker_->FinishMarking();
+ marker_ = std::make_unique<Marker>(AsBase());
+ const Marker::MarkingConfig marking_config{
+ config.collection_type, config.stack_state, config.marking_type};
+ marker_->StartMarking(marking_config);
+ marker_->FinishMarking(marking_config);
// "Sweeping and finalization".
{
// Pre finalizers are forbidden from allocating objects
- NoAllocationScope no_allocation_scope_(this);
+ ObjectAllocator::NoAllocationScope no_allocation_scope_(object_allocator_);
marker_->ProcessWeakness();
prefinalizer_handler_->InvokePreFinalizers();
}
marker_.reset();
{
- NoGCScope no_gc(this);
- sweeper_.Start(Sweeper::Config::kAtomic);
+ NoGCScope no_gc(*this);
+ sweeper_.Start(config.sweeping_type);
}
}
-size_t Heap::ObjectPayloadSize() const {
- return ObjectSizeCounter().GetSize(const_cast<RawHeap*>(&raw_heap()));
-}
-
-Heap::NoGCScope::NoGCScope(Heap* heap) : heap_(heap) { heap_->no_gc_scope_++; }
-
-Heap::NoGCScope::~NoGCScope() { heap_->no_gc_scope_--; }
-
-Heap::NoAllocationScope::NoAllocationScope(Heap* heap) : heap_(heap) {
- heap_->no_allocation_scope_++;
-}
-Heap::NoAllocationScope::~NoAllocationScope() { heap_->no_allocation_scope_--; }
-
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/heap.h b/chromium/v8/src/heap/cppgc/heap.h
index fa19b74be53..f96f81e3217 100644
--- a/chromium/v8/src/heap/cppgc/heap.h
+++ b/chromium/v8/src/heap/cppgc/heap.h
@@ -5,143 +5,47 @@
#ifndef V8_HEAP_CPPGC_HEAP_H_
#define V8_HEAP_CPPGC_HEAP_H_
-#include <memory>
-#include <vector>
-
#include "include/cppgc/heap.h"
-#include "include/cppgc/internal/gc-info.h"
-#include "include/cppgc/internal/persistent-node.h"
#include "include/cppgc/liveness-broker.h"
-#include "src/base/page-allocator.h"
-#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/marker.h"
-#include "src/heap/cppgc/object-allocator.h"
-#include "src/heap/cppgc/page-memory.h"
-#include "src/heap/cppgc/prefinalizer-handler.h"
-#include "src/heap/cppgc/raw-heap.h"
-#include "src/heap/cppgc/sweeper.h"
+#include "include/cppgc/macros.h"
+#include "src/heap/cppgc/garbage-collector.h"
+#include "src/heap/cppgc/gc-invoker.h"
+#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/heap-growing.h"
namespace cppgc {
namespace internal {
-class Stack;
-
class V8_EXPORT_PRIVATE LivenessBrokerFactory {
public:
static LivenessBroker Create();
};
-class V8_EXPORT_PRIVATE Heap final : public cppgc::Heap {
+class V8_EXPORT_PRIVATE Heap final : public HeapBase,
+ public cppgc::Heap,
+ public GarbageCollector {
public:
- // NoGCScope allows going over limits and avoids triggering garbage
- // collection triggered through allocations or even explicitly.
- class V8_EXPORT_PRIVATE NoGCScope final {
- CPPGC_STACK_ALLOCATED();
-
- public:
- explicit NoGCScope(Heap* heap);
- ~NoGCScope();
-
- NoGCScope(const NoGCScope&) = delete;
- NoGCScope& operator=(const NoGCScope&) = delete;
-
- private:
- Heap* const heap_;
- };
-
- // NoAllocationScope is used in debug mode to catch unwanted allocations. E.g.
- // allocations during GC.
- class V8_EXPORT_PRIVATE NoAllocationScope final {
- CPPGC_STACK_ALLOCATED();
-
- public:
- explicit NoAllocationScope(Heap* heap);
- ~NoAllocationScope();
-
- NoAllocationScope(const NoAllocationScope&) = delete;
- NoAllocationScope& operator=(const NoAllocationScope&) = delete;
-
- private:
- Heap* const heap_;
- };
-
- struct GCConfig {
- using StackState = Heap::StackState;
-
- static GCConfig Default() { return {StackState::kMayContainHeapPointers}; }
-
- StackState stack_state = StackState::kMayContainHeapPointers;
- };
-
static Heap* From(cppgc::Heap* heap) { return static_cast<Heap*>(heap); }
-
- explicit Heap(size_t custom_spaces);
- ~Heap() final;
-
- inline void* Allocate(size_t size, GCInfoIndex index);
- inline void* Allocate(size_t size, GCInfoIndex index,
- CustomSpaceIndex space_index);
-
- void CollectGarbage(GCConfig config = GCConfig::Default());
-
- PreFinalizerHandler* prefinalizer_handler() {
- return prefinalizer_handler_.get();
- }
-
- PersistentRegion& GetStrongPersistentRegion() {
- return strong_persistent_region_;
+ static const Heap* From(const cppgc::Heap* heap) {
+ return static_cast<const Heap*>(heap);
}
- const PersistentRegion& GetStrongPersistentRegion() const {
- return strong_persistent_region_;
- }
- PersistentRegion& GetWeakPersistentRegion() {
- return weak_persistent_region_;
- }
- const PersistentRegion& GetWeakPersistentRegion() const {
- return weak_persistent_region_;
- }
-
- RawHeap& raw_heap() { return raw_heap_; }
- const RawHeap& raw_heap() const { return raw_heap_; }
- Stack* stack() { return stack_.get(); }
-
- PageBackend* page_backend() { return page_backend_.get(); }
- const PageBackend* page_backend() const { return page_backend_.get(); }
-
- Sweeper& sweeper() { return sweeper_; }
+ Heap(std::shared_ptr<cppgc::Platform> platform,
+ cppgc::Heap::HeapOptions options);
+ ~Heap() final;
- size_t epoch() const { return epoch_; }
+ HeapBase& AsBase() { return *this; }
+ const HeapBase& AsBase() const { return *this; }
- size_t ObjectPayloadSize() const;
+ void CollectGarbage(Config config) final;
- // Temporary getter until proper visitation of on-stack objects is
- // implemented.
- std::vector<HeapObjectHeader*>& objects() { return objects_; }
+ size_t epoch() const final { return epoch_; }
private:
- bool in_no_gc_scope() const { return no_gc_scope_ > 0; }
- bool is_allocation_allowed() const { return no_allocation_scope_ == 0; }
-
- RawHeap raw_heap_;
-
- v8::base::PageAllocator system_allocator_;
- std::unique_ptr<PageBackend> page_backend_;
- ObjectAllocator object_allocator_;
- Sweeper sweeper_;
-
- std::unique_ptr<Stack> stack_;
- std::unique_ptr<PreFinalizerHandler> prefinalizer_handler_;
- std::unique_ptr<Marker> marker_;
- std::vector<HeapObjectHeader*> objects_;
-
- PersistentRegion strong_persistent_region_;
- PersistentRegion weak_persistent_region_;
+ GCInvoker gc_invoker_;
+ HeapGrowing growing_;
size_t epoch_ = 0;
-
- size_t no_gc_scope_ = 0;
- size_t no_allocation_scope_ = 0;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/marker.cc b/chromium/v8/src/heap/cppgc/marker.cc
index 5a30c89f0dd..1ba6d766a4f 100644
--- a/chromium/v8/src/heap/cppgc/marker.cc
+++ b/chromium/v8/src/heap/cppgc/marker.cc
@@ -4,14 +4,75 @@
#include "src/heap/cppgc/marker.h"
+#include "include/cppgc/internal/process-heap.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-page-inl.h"
+#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/stats-collector.h"
+
+#if defined(CPPGC_CAGED_HEAP)
+#include "include/cppgc/internal/caged-heap-local-data.h"
+#endif
namespace cppgc {
namespace internal {
namespace {
+
+void EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
+ HeapBase& heap) {
+ if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
+ config.marking_type ==
+ Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ ProcessHeap::EnterIncrementalOrConcurrentMarking();
+ }
+#if defined(CPPGC_CAGED_HEAP)
+ heap.caged_heap().local_data().is_marking_in_progress = true;
+#endif
+}
+
+void ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
+ HeapBase& heap) {
+ if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
+ config.marking_type ==
+ Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ ProcessHeap::ExitIncrementalOrConcurrentMarking();
+ }
+#if defined(CPPGC_CAGED_HEAP)
+ heap.caged_heap().local_data().is_marking_in_progress = false;
+#endif
+}
+
+// Visit remembered set that was recorded in the generational barrier.
+void VisitRememberedSlots(HeapBase& heap, MarkingVisitor* visitor) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ for (void* slot : heap.remembered_slots()) {
+ auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
+ ->ObjectHeaderFromInnerAddress(slot);
+ if (slot_header.IsYoung()) continue;
+ // The design of young generation requires collections to be executed at the
+ // top level (with the guarantee that no objects are currently being in
+ // construction). This can be ensured by running young GCs from safe points
+ // or by reintroducing nested allocation scopes that avoid finalization.
+ DCHECK(!MarkingVisitor::IsInConstruction(slot_header));
+
+ void* value = *reinterpret_cast<void**>(slot);
+ visitor->DynamicallyMarkAddress(static_cast<Address>(value));
+ }
+#endif
+}
+
+// Assumes that all spaces have their LABs reset.
+void ResetRememberedSet(HeapBase& heap) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ auto& local_data = heap.caged_heap().local_data();
+ local_data.age_table.Reset(&heap.caged_heap().allocator());
+ heap.remembered_slots().clear();
+#endif
+}
+
template <typename Worklist, typename Callback>
bool DrainWorklistWithDeadline(v8::base::TimeTicks deadline, Worklist* worklist,
Callback callback, int task_id) {
@@ -31,11 +92,12 @@ bool DrainWorklistWithDeadline(v8::base::TimeTicks deadline, Worklist* worklist,
}
return true;
}
+
} // namespace
constexpr int Marker::kMutatorThreadId;
-Marker::Marker(Heap* heap)
+Marker::Marker(HeapBase& heap)
: heap_(heap), marking_visitor_(CreateMutatorThreadMarkingVisitor()) {}
Marker::~Marker() {
@@ -44,17 +106,15 @@ Marker::~Marker() {
// and should thus already be marked.
if (!not_fully_constructed_worklist_.IsEmpty()) {
#if DEBUG
- DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state_);
+ DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state);
NotFullyConstructedItem item;
NotFullyConstructedWorklist::View view(&not_fully_constructed_worklist_,
kMutatorThreadId);
while (view.Pop(&item)) {
- // TODO(chromium:1056170): uncomment following check after implementing
- // FromInnerAddress.
- //
- // HeapObjectHeader* const header = HeapObjectHeader::FromInnerAddress(
- // reinterpret_cast<Address>(const_cast<void*>(item)));
- // DCHECK(header->IsMarked())
+ const HeapObjectHeader& header =
+ BasePage::FromPayload(item)->ObjectHeaderFromInnerAddress(
+ static_cast<ConstAddress>(item));
+ DCHECK(header.IsMarked());
}
#else
not_fully_constructed_worklist_.Clear();
@@ -63,19 +123,40 @@ Marker::~Marker() {
}
void Marker::StartMarking(MarkingConfig config) {
+ heap().stats_collector()->NotifyMarkingStarted();
+
config_ = config;
VisitRoots();
+ EnterIncrementalMarkingIfNeeded(config, heap());
}
-void Marker::FinishMarking() {
- if (config_.stack_state_ == MarkingConfig::StackState::kNoHeapPointers) {
+void Marker::EnterAtomicPause(MarkingConfig config) {
+ ExitIncrementalMarkingIfNeeded(config_, heap());
+ config_ = config;
+
+ // VisitRoots also resets the LABs.
+ VisitRoots();
+ if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
FlushNotFullyConstructedObjects();
+ } else {
+ MarkNotFullyConstructedObjects();
}
+}
+
+void Marker::LeaveAtomicPause() {
+ ResetRememberedSet(heap());
+ heap().stats_collector()->NotifyMarkingCompleted(
+ marking_visitor_->marked_bytes());
+}
+
+void Marker::FinishMarking(MarkingConfig config) {
+ EnterAtomicPause(config);
AdvanceMarkingWithDeadline(v8::base::TimeDelta::Max());
+ LeaveAtomicPause();
}
void Marker::ProcessWeakness() {
- heap_->GetWeakPersistentRegion().Trace(marking_visitor_.get());
+ heap().GetWeakPersistentRegion().Trace(marking_visitor_.get());
// Call weak callbacks on objects that may now be pointing to dead objects.
WeakCallbackItem item;
@@ -89,9 +170,17 @@ void Marker::ProcessWeakness() {
}
void Marker::VisitRoots() {
- heap_->GetStrongPersistentRegion().Trace(marking_visitor_.get());
- if (config_.stack_state_ != MarkingConfig::StackState::kNoHeapPointers)
- heap_->stack()->IteratePointers(marking_visitor_.get());
+ // Reset LABs before scanning roots. LABs are cleared to allow
+ // ObjectStartBitmap handling without considering LABs.
+ heap().object_allocator().ResetLinearAllocationBuffers();
+
+ heap().GetStrongPersistentRegion().Trace(marking_visitor_.get());
+ if (config_.stack_state != MarkingConfig::StackState::kNoHeapPointers) {
+ heap().stack()->IteratePointers(marking_visitor_.get());
+ }
+ if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
+ VisitRememberedSlots(heap(), marking_visitor_.get());
+ }
}
std::unique_ptr<MutatorThreadMarkingVisitor>
@@ -127,6 +216,19 @@ bool Marker::AdvanceMarkingWithDeadline(v8::base::TimeDelta duration) {
},
kMutatorThreadId))
return false;
+
+ if (!DrainWorklistWithDeadline(
+ deadline, &write_barrier_worklist_,
+ [visitor](HeapObjectHeader* header) {
+ DCHECK(header);
+ DCHECK(!MutatorThreadMarkingVisitor::IsInConstruction(*header));
+ const GCInfo& gcinfo =
+ GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex());
+ gcinfo.trace(visitor, header->Payload());
+ visitor->AccountMarkedBytes(*header);
+ },
+ kMutatorThreadId))
+ return false;
} while (!marking_worklist_.IsLocalViewEmpty(kMutatorThreadId));
return true;
@@ -141,10 +243,20 @@ void Marker::FlushNotFullyConstructedObjects() {
DCHECK(not_fully_constructed_worklist_.IsLocalViewEmpty(kMutatorThreadId));
}
+void Marker::MarkNotFullyConstructedObjects() {
+ NotFullyConstructedItem item;
+ NotFullyConstructedWorklist::View view(&not_fully_constructed_worklist_,
+ kMutatorThreadId);
+ while (view.Pop(&item)) {
+ marking_visitor_->TraceConservativelyIfNeeded(item);
+ }
+}
+
void Marker::ClearAllWorklistsForTesting() {
marking_worklist_.Clear();
not_fully_constructed_worklist_.Clear();
previously_not_fully_constructed_worklist_.Clear();
+ write_barrier_worklist_.Clear();
weak_callback_worklist_.Clear();
}
diff --git a/chromium/v8/src/heap/cppgc/marker.h b/chromium/v8/src/heap/cppgc/marker.h
index c18c23df2ca..3edba06c4b6 100644
--- a/chromium/v8/src/heap/cppgc/marker.h
+++ b/chromium/v8/src/heap/cppgc/marker.h
@@ -16,9 +16,19 @@
namespace cppgc {
namespace internal {
-class Heap;
+class HeapBase;
+class HeapObjectHeader;
class MutatorThreadMarkingVisitor;
+// Marking algorithm. Example for a valid call sequence creating the marking
+// phase:
+// 1. StartMarking()
+// 2. AdvanceMarkingWithDeadline() [Optional, depending on environment.]
+// 3. EnterAtomicPause()
+// 4. AdvanceMarkingWithDeadline()
+// 5. LeaveAtomicPause()
+//
+// Alternatively, FinishMarking combines steps 3.-5.
class V8_EXPORT_PRIVATE Marker {
static constexpr int kNumConcurrentMarkers = 0;
static constexpr int kNumMarkers = 1 + kNumConcurrentMarkers;
@@ -41,34 +51,29 @@ class V8_EXPORT_PRIVATE Marker {
Worklist<NotFullyConstructedItem, 16 /* local entries */, kNumMarkers>;
using WeakCallbackWorklist =
Worklist<WeakCallbackItem, 64 /* local entries */, kNumMarkers>;
+ using WriteBarrierWorklist =
+ Worklist<HeapObjectHeader*, 64 /*local entries */, kNumMarkers>;
struct MarkingConfig {
+ enum class CollectionType : uint8_t {
+ kMinor,
+ kMajor,
+ };
using StackState = cppgc::Heap::StackState;
- enum class IncrementalMarking : uint8_t { kDisabled };
- enum class ConcurrentMarking : uint8_t { kDisabled };
-
- static MarkingConfig Default() {
- return {StackState::kMayContainHeapPointers,
- IncrementalMarking::kDisabled, ConcurrentMarking::kDisabled};
- }
-
- explicit MarkingConfig(StackState stack_state)
- : MarkingConfig(stack_state, IncrementalMarking::kDisabled,
- ConcurrentMarking::kDisabled) {}
-
- MarkingConfig(StackState stack_state,
- IncrementalMarking incremental_marking_state,
- ConcurrentMarking concurrent_marking_state)
- : stack_state_(stack_state),
- incremental_marking_state_(incremental_marking_state),
- concurrent_marking_state_(concurrent_marking_state) {}
-
- StackState stack_state_;
- IncrementalMarking incremental_marking_state_;
- ConcurrentMarking concurrent_marking_state_;
+ enum MarkingType : uint8_t {
+ kAtomic,
+ kIncremental,
+ kIncrementalAndConcurrent
+ };
+
+ static constexpr MarkingConfig Default() { return {}; }
+
+ CollectionType collection_type = CollectionType::kMajor;
+ StackState stack_state = StackState::kMayContainHeapPointers;
+ MarkingType marking_type = MarkingType::kAtomic;
};
- explicit Marker(Heap* heap);
+ explicit Marker(HeapBase& heap);
virtual ~Marker();
Marker(const Marker&) = delete;
@@ -77,34 +82,56 @@ class V8_EXPORT_PRIVATE Marker {
// Initialize marking according to the given config. This method will
// trigger incremental/concurrent marking if needed.
void StartMarking(MarkingConfig config);
- // Finalize marking. This method stops incremental/concurrent marking
- // if exsists and performs atomic pause marking.
- void FinishMarking();
+
+ // Signals entering the atomic marking pause. The method
+ // - stops incremental/concurrent marking;
+ // - flushes back any in-construction worklists if needed;
+ // - Updates the MarkingConfig if the stack state has changed;
+ void EnterAtomicPause(MarkingConfig config);
+
+ // Makes marking progress.
+ virtual bool AdvanceMarkingWithDeadline(v8::base::TimeDelta);
+
+ // Signals leaving the atomic marking pause. This method expects no more
+ // objects to be marked and merely updates marking states if needed.
+ void LeaveAtomicPause();
+
+ // Combines:
+ // - EnterAtomicPause()
+ // - AdvanceMarkingWithDeadline()
+ // - LeaveAtomicPause()
+ void FinishMarking(MarkingConfig config);
void ProcessWeakness();
- Heap* heap() { return heap_; }
+ HeapBase& heap() { return heap_; }
MarkingWorklist* marking_worklist() { return &marking_worklist_; }
NotFullyConstructedWorklist* not_fully_constructed_worklist() {
return &not_fully_constructed_worklist_;
}
+ WriteBarrierWorklist* write_barrier_worklist() {
+ return &write_barrier_worklist_;
+ }
WeakCallbackWorklist* weak_callback_worklist() {
return &weak_callback_worklist_;
}
void ClearAllWorklistsForTesting();
+ MutatorThreadMarkingVisitor* GetMarkingVisitorForTesting() {
+ return marking_visitor_.get();
+ }
+
protected:
virtual std::unique_ptr<MutatorThreadMarkingVisitor>
CreateMutatorThreadMarkingVisitor();
- private:
void VisitRoots();
- bool AdvanceMarkingWithDeadline(v8::base::TimeDelta);
void FlushNotFullyConstructedObjects();
+ void MarkNotFullyConstructedObjects();
- Heap* const heap_;
+ HeapBase& heap_;
MarkingConfig config_ = MarkingConfig::Default();
std::unique_ptr<MutatorThreadMarkingVisitor> marking_visitor_;
@@ -112,6 +139,7 @@ class V8_EXPORT_PRIVATE Marker {
MarkingWorklist marking_worklist_;
NotFullyConstructedWorklist not_fully_constructed_worklist_;
NotFullyConstructedWorklist previously_not_fully_constructed_worklist_;
+ WriteBarrierWorklist write_barrier_worklist_;
WeakCallbackWorklist weak_callback_worklist_;
};
diff --git a/chromium/v8/src/heap/cppgc/marking-visitor.cc b/chromium/v8/src/heap/cppgc/marking-visitor.cc
index 9647f9b3ca3..37d88e65ee3 100644
--- a/chromium/v8/src/heap/cppgc/marking-visitor.cc
+++ b/chromium/v8/src/heap/cppgc/marking-visitor.cc
@@ -5,8 +5,8 @@
#include "src/heap/cppgc/marking-visitor.h"
#include "include/cppgc/garbage-collected.h"
-#include "include/cppgc/internal/accessors.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/heap.h"
namespace cppgc {
@@ -17,13 +17,14 @@ bool MarkingVisitor::IsInConstruction(const HeapObjectHeader& header) {
return header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>();
}
-MarkingVisitor::MarkingVisitor(Marker* marking_handler, int task_id)
- : marker_(marking_handler),
- marking_worklist_(marking_handler->marking_worklist(), task_id),
- not_fully_constructed_worklist_(
- marking_handler->not_fully_constructed_worklist(), task_id),
- weak_callback_worklist_(marking_handler->weak_callback_worklist(),
- task_id) {}
+MarkingVisitor::MarkingVisitor(
+ HeapBase& heap, Marker::MarkingWorklist* marking_worklist,
+ Marker::NotFullyConstructedWorklist* not_fully_constructed_worklist,
+ Marker::WeakCallbackWorklist* weak_callback_worklist, int task_id)
+ : ConservativeTracingVisitor(heap, *heap.page_backend()),
+ marking_worklist_(marking_worklist, task_id),
+ not_fully_constructed_worklist_(not_fully_constructed_worklist, task_id),
+ weak_callback_worklist_(weak_callback_worklist, task_id) {}
void MarkingVisitor::AccountMarkedBytes(const HeapObjectHeader& header) {
marked_bytes_ +=
@@ -74,11 +75,22 @@ void MarkingVisitor::VisitWeakRoot(const void* object, TraceDescriptor desc,
// construction, then it should be reachable from the stack.
return;
}
- // Since weak roots arev only traced at the end of marking, we can execute
+ // Since weak roots are only traced at the end of marking, we can execute
// the callback instead of registering it.
weak_callback(LivenessBrokerFactory::Create(), weak_root);
}
+void MarkingVisitor::VisitPointer(const void* address) {
+ TraceConservativelyIfNeeded(address);
+}
+
+void MarkingVisitor::VisitConservatively(HeapObjectHeader& header,
+ TraceConservativelyCallback callback) {
+ MarkHeaderNoTracing(&header);
+ callback(this, header);
+ AccountMarkedBytes(header);
+}
+
void MarkingVisitor::MarkHeader(HeapObjectHeader* header,
TraceDescriptor desc) {
DCHECK(header);
@@ -94,7 +106,7 @@ void MarkingVisitor::MarkHeader(HeapObjectHeader* header,
bool MarkingVisitor::MarkHeaderNoTracing(HeapObjectHeader* header) {
DCHECK(header);
// A GC should only mark the objects that belong in its heap.
- DCHECK_EQ(marker_->heap(), BasePage::FromPayload(header)->heap());
+ DCHECK_EQ(&heap_, BasePage::FromPayload(header)->heap());
// Never mark free space objects. This would e.g. hint to marking a promptly
// freed backing store.
DCHECK(!header->IsFree());
@@ -114,30 +126,29 @@ void MarkingVisitor::FlushWorklists() {
}
void MarkingVisitor::DynamicallyMarkAddress(ConstAddress address) {
- for (auto* header : marker_->heap()->objects()) {
- if (address >= header->Payload() &&
- address < (header->Payload() + header->GetSize())) {
- header->TryMarkAtomic();
- }
+ HeapObjectHeader& header =
+ BasePage::FromPayload(address)->ObjectHeaderFromInnerAddress(
+ const_cast<Address>(address));
+ DCHECK(!IsInConstruction(header));
+ if (MarkHeaderNoTracing(&header)) {
+ marking_worklist_.Push(
+ {reinterpret_cast<void*>(header.Payload()),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
- // TODO(chromium:1056170): Implement dynamically getting HeapObjectHeader
- // for handling previously_not_fully_constructed objects. Requires object
- // start bitmap.
}
-void MarkingVisitor::VisitPointer(const void* address) {
- for (auto* header : marker_->heap()->objects()) {
- if (address >= header->Payload() &&
- address < (header->Payload() + header->GetSize())) {
- header->TryMarkAtomic();
- }
- }
- // TODO(chromium:1056170): Implement proper conservative scanning for
- // on-stack objects. Requires page bloom filter.
+void MarkingVisitor::MarkObject(HeapObjectHeader& header) {
+ MarkHeader(
+ &header,
+ {header.Payload(),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
MutatorThreadMarkingVisitor::MutatorThreadMarkingVisitor(Marker* marker)
- : MarkingVisitor(marker, Marker::kMutatorThreadId) {}
+ : MarkingVisitor(marker->heap(), marker->marking_worklist(),
+ marker->not_fully_constructed_worklist(),
+ marker->weak_callback_worklist(),
+ Marker::kMutatorThreadId) {}
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/marking-visitor.h b/chromium/v8/src/heap/cppgc/marking-visitor.h
index 33616b37844..50427162a14 100644
--- a/chromium/v8/src/heap/cppgc/marking-visitor.h
+++ b/chromium/v8/src/heap/cppgc/marking-visitor.h
@@ -8,20 +8,25 @@
#include "include/cppgc/source-location.h"
#include "include/cppgc/trace-trait.h"
#include "include/v8config.h"
+#include "src/base/macros.h"
+#include "src/heap/base/stack.h"
#include "src/heap/cppgc/globals.h"
-#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marker.h"
-#include "src/heap/cppgc/stack.h"
#include "src/heap/cppgc/visitor.h"
namespace cppgc {
namespace internal {
-class MarkingVisitor : public VisitorBase, public StackVisitor {
+class BasePage;
+class HeapObjectHeader;
+
+class MarkingVisitor : public ConservativeTracingVisitor,
+ public heap::base::StackVisitor {
public:
- MarkingVisitor(Marker*, int);
+ MarkingVisitor(HeapBase&, Marker::MarkingWorklist*,
+ Marker::NotFullyConstructedWorklist*,
+ Marker::WeakCallbackWorklist*, int);
virtual ~MarkingVisitor() = default;
MarkingVisitor(const MarkingVisitor&) = delete;
@@ -30,6 +35,7 @@ class MarkingVisitor : public VisitorBase, public StackVisitor {
void FlushWorklists();
void DynamicallyMarkAddress(ConstAddress);
+ void MarkObject(HeapObjectHeader&);
void AccountMarkedBytes(const HeapObjectHeader&);
size_t marked_bytes() const { return marked_bytes_; }
@@ -43,7 +49,10 @@ class MarkingVisitor : public VisitorBase, public StackVisitor {
void VisitRoot(const void*, TraceDescriptor) override;
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
const void*) override;
+ void VisitConservatively(HeapObjectHeader&,
+ TraceConservativelyCallback) override;
+ // StackMarker interface.
void VisitPointer(const void*) override;
private:
@@ -51,12 +60,11 @@ class MarkingVisitor : public VisitorBase, public StackVisitor {
bool MarkHeaderNoTracing(HeapObjectHeader*);
void RegisterWeakCallback(WeakCallback, const void*) override;
- Marker* const marker_;
Marker::MarkingWorklist::View marking_worklist_;
Marker::NotFullyConstructedWorklist::View not_fully_constructed_worklist_;
Marker::WeakCallbackWorklist::View weak_callback_worklist_;
- size_t marked_bytes_;
+ size_t marked_bytes_ = 0;
};
class V8_EXPORT_PRIVATE MutatorThreadMarkingVisitor : public MarkingVisitor {
diff --git a/chromium/v8/src/heap/cppgc/object-allocator-inl.h b/chromium/v8/src/heap/cppgc/object-allocator-inl.h
index 7d8d126d633..b75c296f51a 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator-inl.h
+++ b/chromium/v8/src/heap/cppgc/object-allocator-inl.h
@@ -10,7 +10,7 @@
#include "src/base/logging.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/object-start-bitmap-inl.h"
#include "src/heap/cppgc/object-start-bitmap.h"
@@ -20,6 +20,7 @@ namespace cppgc {
namespace internal {
void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
+ DCHECK(is_allocation_allowed());
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
const RawHeap::RegularSpaceType type =
@@ -30,6 +31,7 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo,
CustomSpaceIndex space_index) {
+ DCHECK(is_allocation_allowed());
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
return AllocateObjectOnSpace(
diff --git a/chromium/v8/src/heap/cppgc/object-allocator.cc b/chromium/v8/src/heap/cppgc/object-allocator.cc
index df83d8ee9d3..b8203a1d8a2 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator.cc
+++ b/chromium/v8/src/heap/cppgc/object-allocator.cc
@@ -4,36 +4,119 @@
#include "src/heap/cppgc/object-allocator.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/free-list.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/object-allocator-inl.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/sweeper.h"
namespace cppgc {
namespace internal {
namespace {
-void* AllocateLargeObject(RawHeap* raw_heap, LargePageSpace* space, size_t size,
+void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ DCHECK_LT(begin, end);
+
+ static constexpr auto kEntrySize = AgeTable::kEntrySizeInBytes;
+
+ const uintptr_t offset_begin = CagedHeap::OffsetFromAddress(begin);
+ const uintptr_t offset_end = CagedHeap::OffsetFromAddress(end);
+
+ const uintptr_t young_offset_begin = (begin == page->PayloadStart())
+ ? RoundDown(offset_begin, kEntrySize)
+ : RoundUp(offset_begin, kEntrySize);
+ const uintptr_t young_offset_end = (end == page->PayloadEnd())
+ ? RoundUp(offset_end, kEntrySize)
+ : RoundDown(offset_end, kEntrySize);
+
+ auto& age_table = page->heap()->caged_heap().local_data().age_table;
+ for (auto offset = young_offset_begin; offset < young_offset_end;
+ offset += AgeTable::kEntrySizeInBytes) {
+ age_table[offset] = AgeTable::Age::kYoung;
+ }
+
+ // Set to kUnknown the first and the last regions of the newly allocated
+ // linear buffer.
+ if (begin != page->PayloadStart() && !IsAligned(offset_begin, kEntrySize))
+ age_table[offset_begin] = AgeTable::Age::kUnknown;
+ if (end != page->PayloadEnd() && !IsAligned(offset_end, kEntrySize))
+ age_table[offset_end] = AgeTable::Age::kUnknown;
+#endif
+}
+
+void AddToFreeList(NormalPageSpace* space, Address start, size_t size) {
+ auto& free_list = space->free_list();
+ free_list.Add({start, size});
+ NormalPage::From(BasePage::FromPayload(start))
+ ->object_start_bitmap()
+ .SetBit(start);
+}
+
+void ReplaceLinearAllocationBuffer(NormalPageSpace* space,
+ StatsCollector* stats_collector,
+ Address new_buffer, size_t new_size) {
+ DCHECK_NOT_NULL(space);
+ DCHECK_NOT_NULL(stats_collector);
+
+ auto& lab = space->linear_allocation_buffer();
+ if (lab.size()) {
+ AddToFreeList(space, lab.start(), lab.size());
+ stats_collector->NotifyExplicitFree(lab.size());
+ }
+
+ lab.Set(new_buffer, new_size);
+ if (new_size) {
+ DCHECK_NOT_NULL(new_buffer);
+ stats_collector->NotifyAllocation(new_size);
+ auto* page = NormalPage::From(BasePage::FromPayload(new_buffer));
+ page->object_start_bitmap().ClearBit(new_buffer);
+ MarkRangeAsYoung(page, new_buffer, new_buffer + new_size);
+ }
+}
+
+void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
+ StatsCollector* stats_collector, size_t size,
GCInfoIndex gcinfo) {
- LargePage* page = LargePage::Create(space, size);
+ LargePage* page = LargePage::Create(page_backend, space, size);
+ space->AddPage(page);
+
auto* header = new (page->ObjectHeader())
HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo);
+ stats_collector->NotifyAllocation(size);
+ MarkRangeAsYoung(page, page->PayloadStart(), page->PayloadEnd());
+
return header->Payload();
}
} // namespace
-ObjectAllocator::ObjectAllocator(RawHeap* heap) : raw_heap_(heap) {}
+ObjectAllocator::ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
+ StatsCollector* stats_collector)
+ : raw_heap_(heap),
+ page_backend_(page_backend),
+ stats_collector_(stats_collector) {}
void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
GCInfoIndex gcinfo) {
+ void* memory = OutOfLineAllocateImpl(space, size, gcinfo);
+ stats_collector_->NotifySafePointForConservativeCollection();
+ return memory;
+}
+
+void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
+ size_t size, GCInfoIndex gcinfo) {
DCHECK_EQ(0, size & kAllocationMask);
DCHECK_LE(kFreeListEntrySize, size);
@@ -41,7 +124,8 @@ void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
if (size >= kLargeObjectSizeThreshold) {
auto* large_space = LargePageSpace::From(
raw_heap_->Space(RawHeap::RegularSpaceType::kLarge));
- return AllocateLargeObject(raw_heap_, large_space, size, gcinfo);
+ return AllocateLargeObject(page_backend_, large_space, stats_collector_,
+ size, gcinfo);
}
// 2. Try to allocate from the freelist.
@@ -57,11 +141,17 @@ void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
raw_heap_->heap()->sweeper().Finish();
// 5. Add a new page to this heap.
- NormalPage::Create(space);
+ auto* new_page = NormalPage::Create(page_backend_, space);
+ space->AddPage(new_page);
+
+ // 6. Set linear allocation buffer to new page.
+ ReplaceLinearAllocationBuffer(space, stats_collector_,
+ new_page->PayloadStart(),
+ new_page->PayloadSize());
- // 6. Try to allocate from the freelist. This allocation must succeed.
- void* result = AllocateFromFreeList(space, size, gcinfo);
- CPPGC_CHECK(result);
+ // 7. Allocate from it. The allocation must succeed.
+ void* result = AllocateObjectOnSpace(space, size, gcinfo);
+ CHECK(result);
return result;
}
@@ -71,17 +161,40 @@ void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace* space, size_t size,
const FreeList::Block entry = space->free_list().Allocate(size);
if (!entry.address) return nullptr;
- auto& current_lab = space->linear_allocation_buffer();
- if (current_lab.size()) {
- space->AddToFreeList(current_lab.start(), current_lab.size());
- }
+ ReplaceLinearAllocationBuffer(
+ space, stats_collector_, static_cast<Address>(entry.address), entry.size);
- current_lab.Set(static_cast<Address>(entry.address), entry.size);
- NormalPage::From(BasePage::FromPayload(current_lab.start()))
- ->object_start_bitmap()
- .ClearBit(current_lab.start());
return AllocateObjectOnSpace(space, size, gcinfo);
}
+void ObjectAllocator::ResetLinearAllocationBuffers() {
+ class Resetter : public HeapVisitor<Resetter> {
+ public:
+ explicit Resetter(StatsCollector* stats) : stats_collector_(stats) {}
+
+ bool VisitLargePageSpace(LargePageSpace*) { return true; }
+
+ bool VisitNormalPageSpace(NormalPageSpace* space) {
+ ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
+ return true;
+ }
+
+ private:
+ StatsCollector* stats_collector_;
+ } visitor(stats_collector_);
+
+ visitor.Traverse(raw_heap_);
+}
+
+ObjectAllocator::NoAllocationScope::NoAllocationScope(
+ ObjectAllocator& allocator)
+ : allocator_(allocator) {
+ allocator.no_allocation_scope_++;
+}
+
+ObjectAllocator::NoAllocationScope::~NoAllocationScope() {
+ allocator_.no_allocation_scope_--;
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/object-allocator.h b/chromium/v8/src/heap/cppgc/object-allocator.h
index 510a935f565..1536ed63730 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator.h
+++ b/chromium/v8/src/heap/cppgc/object-allocator.h
@@ -5,33 +5,70 @@
#ifndef V8_HEAP_CPPGC_OBJECT_ALLOCATOR_H_
#define V8_HEAP_CPPGC_OBJECT_ALLOCATOR_H_
+#include "include/cppgc/allocation.h"
#include "include/cppgc/internal/gc-info.h"
+#include "include/cppgc/macros.h"
#include "src/heap/cppgc/heap-space.h"
#include "src/heap/cppgc/raw-heap.h"
namespace cppgc {
+
+class V8_EXPORT AllocationHandle {
+ private:
+ AllocationHandle() = default;
+ friend class internal::ObjectAllocator;
+};
+
namespace internal {
-class V8_EXPORT_PRIVATE ObjectAllocator final {
+class StatsCollector;
+class PageBackend;
+
+class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
public:
- explicit ObjectAllocator(RawHeap* heap);
+ // NoAllocationScope is used in debug mode to catch unwanted allocations. E.g.
+ // allocations during GC.
+ class V8_EXPORT_PRIVATE NoAllocationScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ explicit NoAllocationScope(ObjectAllocator&);
+ ~NoAllocationScope();
+
+ NoAllocationScope(const NoAllocationScope&) = delete;
+ NoAllocationScope& operator=(const NoAllocationScope&) = delete;
+
+ private:
+ ObjectAllocator& allocator_;
+ };
+
+ ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
+ StatsCollector* stats_collector);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo,
CustomSpaceIndex space_index);
+ void ResetLinearAllocationBuffers();
+
private:
// Returns the initially tried SpaceType to allocate an object of |size| bytes
// on. Returns the largest regular object size bucket for large objects.
inline static RawHeap::RegularSpaceType GetInitialSpaceIndexForSize(
size_t size);
+ bool is_allocation_allowed() const { return no_allocation_scope_ == 0; }
+
inline void* AllocateObjectOnSpace(NormalPageSpace* space, size_t size,
GCInfoIndex gcinfo);
void* OutOfLineAllocate(NormalPageSpace*, size_t, GCInfoIndex);
+ void* OutOfLineAllocateImpl(NormalPageSpace*, size_t, GCInfoIndex);
void* AllocateFromFreeList(NormalPageSpace*, size_t, GCInfoIndex);
RawHeap* raw_heap_;
+ PageBackend* page_backend_;
+ StatsCollector* stats_collector_;
+ size_t no_allocation_scope_ = 0;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h b/chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h
index 93243979aac..6d963cc9486 100644
--- a/chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h
+++ b/chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h
@@ -19,6 +19,7 @@ ObjectStartBitmap::ObjectStartBitmap(Address offset) : offset_(offset) {
HeapObjectHeader* ObjectStartBitmap::FindHeader(
ConstAddress address_maybe_pointing_to_the_middle_of_object) const {
+ DCHECK_LE(offset_, address_maybe_pointing_to_the_middle_of_object);
size_t object_offset =
address_maybe_pointing_to_the_middle_of_object - offset_;
size_t object_start_number = object_offset / kAllocationGranularity;
diff --git a/chromium/v8/src/heap/cppgc/page-memory-inl.h b/chromium/v8/src/heap/cppgc/page-memory-inl.h
index 23ce061b435..8b2022eeb26 100644
--- a/chromium/v8/src/heap/cppgc/page-memory-inl.h
+++ b/chromium/v8/src/heap/cppgc/page-memory-inl.h
@@ -16,19 +16,19 @@ inline bool SupportsCommittingGuardPages(PageAllocator* allocator) {
return kGuardPageSize % allocator->CommitPageSize() == 0;
}
-Address NormalPageMemoryRegion::Lookup(Address address) const {
+Address NormalPageMemoryRegion::Lookup(ConstAddress address) const {
size_t index = GetIndex(address);
if (!page_memories_in_use_[index]) return nullptr;
const MemoryRegion writeable_region = GetPageMemory(index).writeable_region();
return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
}
-Address LargePageMemoryRegion::Lookup(Address address) const {
+Address LargePageMemoryRegion::Lookup(ConstAddress address) const {
const MemoryRegion writeable_region = GetPageMemory().writeable_region();
return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
}
-Address PageMemoryRegion::Lookup(Address address) const {
+Address PageMemoryRegion::Lookup(ConstAddress address) const {
DCHECK(reserved_region().Contains(address));
return is_large()
? static_cast<const LargePageMemoryRegion*>(this)->Lookup(address)
@@ -36,7 +36,7 @@ Address PageMemoryRegion::Lookup(Address address) const {
address);
}
-PageMemoryRegion* PageMemoryRegionTree::Lookup(Address address) const {
+PageMemoryRegion* PageMemoryRegionTree::Lookup(ConstAddress address) const {
auto it = set_.upper_bound(address);
// This check also covers set_.size() > 0, since for empty vectors it is
// guaranteed that begin() == end().
@@ -46,7 +46,7 @@ PageMemoryRegion* PageMemoryRegionTree::Lookup(Address address) const {
return nullptr;
}
-Address PageBackend::Lookup(Address address) const {
+Address PageBackend::Lookup(ConstAddress address) const {
PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(address);
return pmr ? pmr->Lookup(address) : nullptr;
}
diff --git a/chromium/v8/src/heap/cppgc/page-memory.h b/chromium/v8/src/heap/cppgc/page-memory.h
index f3bc685fa31..b7f1917be7f 100644
--- a/chromium/v8/src/heap/cppgc/page-memory.h
+++ b/chromium/v8/src/heap/cppgc/page-memory.h
@@ -30,7 +30,7 @@ class V8_EXPORT_PRIVATE MemoryRegion final {
size_t size() const { return size_; }
Address end() const { return base_ + size_; }
- bool Contains(Address addr) const {
+ bool Contains(ConstAddress addr) const {
return (reinterpret_cast<uintptr_t>(addr) -
reinterpret_cast<uintptr_t>(base_)) < size_;
}
@@ -70,7 +70,7 @@ class V8_EXPORT_PRIVATE PageMemoryRegion {
// Lookup writeable base for an |address| that's contained in
// PageMemoryRegion. Filters out addresses that are contained in non-writeable
// regions (e.g. guard pages).
- inline Address Lookup(Address address) const;
+ inline Address Lookup(ConstAddress address) const;
// Disallow copy/move.
PageMemoryRegion(const PageMemoryRegion&) = delete;
@@ -111,7 +111,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
// protection.
void Free(Address);
- inline Address Lookup(Address) const;
+ inline Address Lookup(ConstAddress) const;
void UnprotectForTesting() final;
@@ -122,7 +122,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
page_memories_in_use_[index] = value;
}
- size_t GetIndex(Address address) const {
+ size_t GetIndex(ConstAddress address) const {
return static_cast<size_t>(address - reserved_region().base()) >>
kPageSizeLog2;
}
@@ -143,7 +143,7 @@ class V8_EXPORT_PRIVATE LargePageMemoryRegion final : public PageMemoryRegion {
reserved_region().size() - 2 * kGuardPageSize));
}
- inline Address Lookup(Address) const;
+ inline Address Lookup(ConstAddress) const;
void UnprotectForTesting() final;
};
@@ -161,10 +161,10 @@ class V8_EXPORT_PRIVATE PageMemoryRegionTree final {
void Add(PageMemoryRegion*);
void Remove(PageMemoryRegion*);
- inline PageMemoryRegion* Lookup(Address) const;
+ inline PageMemoryRegion* Lookup(ConstAddress) const;
private:
- std::map<Address, PageMemoryRegion*> set_;
+ std::map<ConstAddress, PageMemoryRegion*> set_;
};
// A pool of PageMemory objects represented by the writeable base addresses.
@@ -216,7 +216,7 @@ class V8_EXPORT_PRIVATE PageBackend final {
// Returns the writeable base if |address| is contained in a valid page
// memory.
- inline Address Lookup(Address) const;
+ inline Address Lookup(ConstAddress) const;
// Disallow copy/move.
PageBackend(const PageBackend&) = delete;
diff --git a/chromium/v8/src/heap/cppgc/persistent-node.cc b/chromium/v8/src/heap/cppgc/persistent-node.cc
index 299cefc5210..9c5113f86a2 100644
--- a/chromium/v8/src/heap/cppgc/persistent-node.cc
+++ b/chromium/v8/src/heap/cppgc/persistent-node.cc
@@ -7,9 +7,21 @@
#include <algorithm>
#include <numeric>
+#include "include/cppgc/persistent.h"
+
namespace cppgc {
namespace internal {
+PersistentRegion::~PersistentRegion() {
+ for (auto& slots : nodes_) {
+ for (auto& node : *slots) {
+ if (node.IsUsed()) {
+ static_cast<PersistentBase*>(node.owner())->ClearFromGC();
+ }
+ }
+ }
+}
+
size_t PersistentRegion::NodesInUse() const {
return std::accumulate(
nodes_.cbegin(), nodes_.cend(), 0u, [](size_t acc, const auto& slots) {
diff --git a/chromium/v8/src/heap/cppgc/platform.cc b/chromium/v8/src/heap/cppgc/platform.cc
index 3b20060392d..e96d69b2257 100644
--- a/chromium/v8/src/heap/cppgc/platform.cc
+++ b/chromium/v8/src/heap/cppgc/platform.cc
@@ -8,18 +8,12 @@
#include "src/heap/cppgc/gc-info-table.h"
namespace cppgc {
-namespace internal {
-
-static PageAllocator* g_page_allocator;
-
-} // namespace internal
-void InitializePlatform(PageAllocator* page_allocator) {
- internal::g_page_allocator = page_allocator;
+void InitializeProcess(PageAllocator* page_allocator) {
internal::GlobalGCInfoTable::Create(page_allocator);
}
-void ShutdownPlatform() { internal::g_page_allocator = nullptr; }
+void ShutdownProcess() {}
namespace internal {
diff --git a/chromium/v8/src/heap/cppgc/pointer-policies.cc b/chromium/v8/src/heap/cppgc/pointer-policies.cc
index e9dfcecdf3e..5048d1bd59f 100644
--- a/chromium/v8/src/heap/cppgc/pointer-policies.cc
+++ b/chromium/v8/src/heap/cppgc/pointer-policies.cc
@@ -3,10 +3,10 @@
// found in the LICENSE file.
#include "include/cppgc/internal/pointer-policies.h"
-#include "include/cppgc/internal/persistent-node.h"
+#include "include/cppgc/internal/persistent-node.h"
#include "src/base/macros.h"
-#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/heap.h"
namespace cppgc {
diff --git a/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc b/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc
index 40107c15262..c28cedfbab9 100644
--- a/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc
+++ b/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -8,6 +8,7 @@
#include <memory>
#include "src/base/platform/platform.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/heap.h"
namespace cppgc {
@@ -15,14 +16,16 @@ namespace internal {
// static
void PreFinalizerRegistrationDispatcher::RegisterPrefinalizer(
- cppgc::Heap* heap, PreFinalizer prefinalzier) {
- internal::Heap::From(heap)->prefinalizer_handler()->RegisterPrefinalizer(
- prefinalzier);
+ PreFinalizer pre_finalizer) {
+ BasePage::FromPayload(pre_finalizer.object)
+ ->heap()
+ ->prefinalizer_handler()
+ ->RegisterPrefinalizer(pre_finalizer);
}
bool PreFinalizerRegistrationDispatcher::PreFinalizer::operator==(
const PreFinalizer& other) {
- return (object_ == other.object_) && (callback_ == other.callback_);
+ return (object == other.object) && (callback == other.callback);
}
PreFinalizerHandler::PreFinalizerHandler()
@@ -32,12 +35,12 @@ PreFinalizerHandler::PreFinalizerHandler()
{
}
-void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer prefinalizer) {
+void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer pre_finalizer) {
DCHECK(CurrentThreadIsCreationThread());
DCHECK_EQ(ordered_pre_finalizers_.end(),
std::find(ordered_pre_finalizers_.begin(),
- ordered_pre_finalizers_.end(), prefinalizer));
- ordered_pre_finalizers_.push_back(prefinalizer);
+ ordered_pre_finalizers_.end(), pre_finalizer));
+ ordered_pre_finalizers_.push_back(pre_finalizer);
}
void PreFinalizerHandler::InvokePreFinalizers() {
@@ -48,7 +51,7 @@ void PreFinalizerHandler::InvokePreFinalizers() {
std::remove_if(ordered_pre_finalizers_.rbegin(),
ordered_pre_finalizers_.rend(),
[liveness_broker](const PreFinalizer& pf) {
- return (pf.callback_)(liveness_broker, pf.object_);
+ return (pf.callback)(liveness_broker, pf.object);
})
.base());
ordered_pre_finalizers_.shrink_to_fit();
diff --git a/chromium/v8/src/heap/cppgc/prefinalizer-handler.h b/chromium/v8/src/heap/cppgc/prefinalizer-handler.h
index a6255534710..15d24e862cf 100644
--- a/chromium/v8/src/heap/cppgc/prefinalizer-handler.h
+++ b/chromium/v8/src/heap/cppgc/prefinalizer-handler.h
@@ -19,7 +19,7 @@ class PreFinalizerHandler final {
PreFinalizerHandler();
- void RegisterPrefinalizer(PreFinalizer prefinalzier);
+ void RegisterPrefinalizer(PreFinalizer pre_finalizer);
void InvokePreFinalizers();
diff --git a/chromium/v8/src/heap/cppgc/process-heap.cc b/chromium/v8/src/heap/cppgc/process-heap.cc
new file mode 100644
index 00000000000..14089883967
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/process-heap.cc
@@ -0,0 +1,13 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/process-heap.h"
+
+namespace cppgc {
+namespace internal {
+
+AtomicEntryFlag ProcessHeap::concurrent_marking_flag_;
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/raw-heap.cc b/chromium/v8/src/heap/cppgc/raw-heap.cc
index cf7311b46f2..19200ae8a20 100644
--- a/chromium/v8/src/heap/cppgc/raw-heap.cc
+++ b/chromium/v8/src/heap/cppgc/raw-heap.cc
@@ -12,7 +12,7 @@ namespace internal {
// static
constexpr size_t RawHeap::kNumberOfRegularSpaces;
-RawHeap::RawHeap(Heap* heap, size_t custom_spaces) : main_heap_(heap) {
+RawHeap::RawHeap(HeapBase* heap, size_t custom_spaces) : main_heap_(heap) {
size_t i = 0;
for (; i < static_cast<size_t>(RegularSpaceType::kLarge); ++i) {
spaces_.push_back(std::make_unique<NormalPageSpace>(this, i));
diff --git a/chromium/v8/src/heap/cppgc/raw-heap.h b/chromium/v8/src/heap/cppgc/raw-heap.h
index 0591fa87ab7..e63fc32c439 100644
--- a/chromium/v8/src/heap/cppgc/raw-heap.h
+++ b/chromium/v8/src/heap/cppgc/raw-heap.h
@@ -16,7 +16,7 @@
namespace cppgc {
namespace internal {
-class Heap;
+class HeapBase;
class BaseSpace;
// RawHeap is responsible for space management.
@@ -47,7 +47,7 @@ class V8_EXPORT_PRIVATE RawHeap final {
using iterator = Spaces::iterator;
using const_iterator = Spaces::const_iterator;
- explicit RawHeap(Heap* heap, size_t custom_spaces);
+ explicit RawHeap(HeapBase* heap, size_t custom_spaces);
~RawHeap();
// Space iteration support.
@@ -77,8 +77,8 @@ class V8_EXPORT_PRIVATE RawHeap final {
return const_cast<RawHeap&>(*this).CustomSpace(space_index);
}
- Heap* heap() { return main_heap_; }
- const Heap* heap() const { return main_heap_; }
+ HeapBase* heap() { return main_heap_; }
+ const HeapBase* heap() const { return main_heap_; }
private:
size_t SpaceIndexForCustomSpace(CustomSpaceIndex space_index) const {
@@ -96,7 +96,7 @@ class V8_EXPORT_PRIVATE RawHeap final {
return const_cast<RawHeap&>(*this).Space(space_index);
}
- Heap* main_heap_;
+ HeapBase* main_heap_;
Spaces spaces_;
};
diff --git a/chromium/v8/src/heap/cppgc/stats-collector.cc b/chromium/v8/src/heap/cppgc/stats-collector.cc
new file mode 100644
index 00000000000..a92aba021d7
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/stats-collector.cc
@@ -0,0 +1,114 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/stats-collector.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "src/base/logging.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+constexpr size_t StatsCollector::kAllocationThresholdBytes;
+
+void StatsCollector::RegisterObserver(AllocationObserver* observer) {
+ DCHECK_EQ(allocation_observers_.end(),
+ std::find(allocation_observers_.begin(),
+ allocation_observers_.end(), observer));
+ allocation_observers_.push_back(observer);
+}
+
+void StatsCollector::UnregisterObserver(AllocationObserver* observer) {
+ auto it = std::find(allocation_observers_.begin(),
+ allocation_observers_.end(), observer);
+ DCHECK_NE(allocation_observers_.end(), it);
+ allocation_observers_.erase(it);
+}
+
+void StatsCollector::NotifyAllocation(size_t bytes) {
+ // The current GC may not have been started. This is ok as recording considers
+ // the whole time range between garbage collections.
+ allocated_bytes_since_safepoint_ += bytes;
+}
+
+void StatsCollector::NotifyExplicitFree(size_t bytes) {
+ // See IncreaseAllocatedObjectSize for lifetime of the counter.
+ explicitly_freed_bytes_since_safepoint_ += bytes;
+}
+
+void StatsCollector::NotifySafePointForConservativeCollection() {
+ if (std::abs(allocated_bytes_since_safepoint_ -
+ explicitly_freed_bytes_since_safepoint_) >=
+ static_cast<int64_t>(kAllocationThresholdBytes)) {
+ AllocatedObjectSizeSafepointImpl();
+ }
+}
+
+void StatsCollector::AllocatedObjectSizeSafepointImpl() {
+ allocated_bytes_since_end_of_marking_ +=
+ static_cast<int64_t>(allocated_bytes_since_safepoint_) -
+ static_cast<int64_t>(explicitly_freed_bytes_since_safepoint_);
+
+ // These observer methods may start or finalize GC. In case they trigger a
+ // final GC pause, the delta counters are reset there and the following
+ // observer calls are called with '0' updates.
+ ForAllAllocationObservers([this](AllocationObserver* observer) {
+ // Recompute delta here so that a GC finalization is able to clear the
+ // delta for other observer calls.
+ int64_t delta = allocated_bytes_since_safepoint_ -
+ explicitly_freed_bytes_since_safepoint_;
+ if (delta < 0) {
+ observer->AllocatedObjectSizeDecreased(static_cast<size_t>(-delta));
+ } else {
+ observer->AllocatedObjectSizeIncreased(static_cast<size_t>(delta));
+ }
+ });
+ allocated_bytes_since_safepoint_ = 0;
+ explicitly_freed_bytes_since_safepoint_ = 0;
+}
+
+void StatsCollector::NotifyMarkingStarted() {
+ DCHECK_EQ(GarbageCollectionState::kNotRunning, gc_state_);
+ gc_state_ = GarbageCollectionState::kMarking;
+}
+
+void StatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
+ DCHECK_EQ(GarbageCollectionState::kMarking, gc_state_);
+ gc_state_ = GarbageCollectionState::kSweeping;
+ current_.marked_bytes = marked_bytes;
+ allocated_bytes_since_end_of_marking_ = 0;
+ allocated_bytes_since_safepoint_ = 0;
+ explicitly_freed_bytes_since_safepoint_ = 0;
+
+ ForAllAllocationObservers([marked_bytes](AllocationObserver* observer) {
+ observer->ResetAllocatedObjectSize(marked_bytes);
+ });
+}
+
+const StatsCollector::Event& StatsCollector::NotifySweepingCompleted() {
+ DCHECK_EQ(GarbageCollectionState::kSweeping, gc_state_);
+ gc_state_ = GarbageCollectionState::kNotRunning;
+ previous_ = std::move(current_);
+ current_ = Event();
+ return previous_;
+}
+
+size_t StatsCollector::allocated_object_size() const {
+ // During sweeping we refer to the current Event as that already holds the
+ // correct marking information. In all other phases, the previous event holds
+ // the most up-to-date marking information.
+ const Event& event =
+ gc_state_ == GarbageCollectionState::kSweeping ? current_ : previous_;
+ DCHECK_GE(static_cast<int64_t>(event.marked_bytes) +
+ allocated_bytes_since_end_of_marking_,
+ 0);
+ return static_cast<size_t>(static_cast<int64_t>(event.marked_bytes) +
+ allocated_bytes_since_end_of_marking_);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/stats-collector.h b/chromium/v8/src/heap/cppgc/stats-collector.h
new file mode 100644
index 00000000000..cc122a17dd5
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/stats-collector.h
@@ -0,0 +1,130 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_STATS_COLLECTOR_H_
+#define V8_HEAP_CPPGC_STATS_COLLECTOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+// Sink for various time and memory statistics.
+class V8_EXPORT_PRIVATE StatsCollector final {
+ public:
+ // POD to hold interesting data accumulated during a garbage collection cycle.
+ //
+ // The event is always fully populated when looking at previous events but
+ // may only be partially populated when looking at the current event.
+ struct Event final {
+ // Marked bytes collected during marking.
+ size_t marked_bytes = 0;
+ };
+
+ // Observer for allocated object size. May be used to implement heap growing
+ // heuristics.
+ class AllocationObserver {
+ public:
+ // Called after observing at least
+ // StatsCollector::kAllocationThresholdBytes changed bytes through
+ // allocation or explicit free. Reports both, negative and positive
+ // increments, to allow observer to decide whether absolute values or only
+ // the deltas is interesting.
+ //
+ // May trigger GC.
+ virtual void AllocatedObjectSizeIncreased(size_t) = 0;
+ virtual void AllocatedObjectSizeDecreased(size_t) = 0;
+
+ // Called when the exact size of allocated object size is known. In
+ // practice, this is after marking when marked bytes == allocated bytes.
+ //
+ // Must not trigger GC synchronously.
+ virtual void ResetAllocatedObjectSize(size_t) = 0;
+ };
+
+ // Observers are implemented using virtual calls. Avoid notifications below
+ // reasonably interesting sizes.
+ static constexpr size_t kAllocationThresholdBytes = 1024;
+
+ StatsCollector() = default;
+ StatsCollector(const StatsCollector&) = delete;
+ StatsCollector& operator=(const StatsCollector&) = delete;
+
+ void RegisterObserver(AllocationObserver*);
+ void UnregisterObserver(AllocationObserver*);
+
+ void NotifyAllocation(size_t);
+ void NotifyExplicitFree(size_t);
+ // Safepoints should only be invoked when garabge collections are possible.
+ // This is necessary as increments and decrements are reported as close to
+ // their actual allocation/reclamation as possible.
+ void NotifySafePointForConservativeCollection();
+
+ // Indicates a new garbage collection cycle.
+ void NotifyMarkingStarted();
+ // Indicates that marking of the current garbage collection cycle is
+ // completed.
+ void NotifyMarkingCompleted(size_t marked_bytes);
+ // Indicates the end of a garbage collection cycle. This means that sweeping
+ // is finished at this point.
+ const Event& NotifySweepingCompleted();
+
+ // Size of live objects in bytes on the heap. Based on the most recent marked
+ // bytes and the bytes allocated since last marking.
+ size_t allocated_object_size() const;
+
+ private:
+ enum class GarbageCollectionState : uint8_t {
+ kNotRunning,
+ kMarking,
+ kSweeping
+ };
+
+ // Invokes |callback| for all registered observers.
+ template <typename Callback>
+ void ForAllAllocationObservers(Callback callback);
+
+ void AllocatedObjectSizeSafepointImpl();
+
+ // Allocated bytes since the end of marking. These bytes are reset after
+ // marking as they are accounted in marked_bytes then. May be negative in case
+ // an object was explicitly freed that was marked as live in the previous
+ // cycle.
+ int64_t allocated_bytes_since_end_of_marking_ = 0;
+ // Counters for allocation and free. The individual values are never negative
+ // but their delta may be because of the same reason the overall
+ // allocated_bytes_since_end_of_marking_ may be negative. Keep integer
+ // arithmetic for simplicity.
+ int64_t allocated_bytes_since_safepoint_ = 0;
+ int64_t explicitly_freed_bytes_since_safepoint_ = 0;
+
+ // vector to allow fast iteration of observers. Register/Unregisters only
+ // happens on startup/teardown.
+ std::vector<AllocationObserver*> allocation_observers_;
+
+ GarbageCollectionState gc_state_ = GarbageCollectionState::kNotRunning;
+
+ // The event being filled by the current GC cycle between NotifyMarkingStarted
+ // and NotifySweepingFinished.
+ Event current_;
+ // The previous GC event which is populated at NotifySweepingFinished.
+ Event previous_;
+};
+
+template <typename Callback>
+void StatsCollector::ForAllAllocationObservers(Callback callback) {
+ for (AllocationObserver* observer : allocation_observers_) {
+ callback(observer);
+ }
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_STATS_COLLECTOR_H_
diff --git a/chromium/v8/src/heap/cppgc/sweeper.cc b/chromium/v8/src/heap/cppgc/sweeper.cc
index 77d2d3c33e7..98a3117a2d4 100644
--- a/chromium/v8/src/heap/cppgc/sweeper.cc
+++ b/chromium/v8/src/heap/cppgc/sweeper.cc
@@ -4,8 +4,13 @@
#include "src/heap/cppgc/sweeper.h"
+#include <atomic>
+#include <memory>
#include <vector>
+#include "include/cppgc/platform.h"
+#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/free-list.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
@@ -17,12 +22,16 @@
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sanitizers.h"
+#include "src/heap/cppgc/stats-collector.h"
+#include "src/heap/cppgc/task-handle.h"
namespace cppgc {
namespace internal {
namespace {
+using v8::base::Optional;
+
class ObjectStartBitmapVerifier
: private HeapVisitor<ObjectStartBitmapVerifier> {
friend class HeapVisitor<ObjectStartBitmapVerifier>;
@@ -54,15 +63,126 @@ class ObjectStartBitmapVerifier
HeapObjectHeader* prev_ = nullptr;
};
+template <typename T>
+class ThreadSafeStack {
+ public:
+ ThreadSafeStack() = default;
+
+ void Push(T t) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ vector_.push_back(std::move(t));
+ }
+
+ Optional<T> Pop() {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ if (vector_.empty()) return v8::base::nullopt;
+ T top = std::move(vector_.back());
+ vector_.pop_back();
+ // std::move is redundant but is needed to avoid the bug in gcc-7.
+ return std::move(top);
+ }
+
+ template <typename It>
+ void Insert(It begin, It end) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ vector_.insert(vector_.end(), begin, end);
+ }
+
+ bool IsEmpty() const {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ return vector_.empty();
+ }
+
+ private:
+ std::vector<T> vector_;
+ mutable v8::base::Mutex mutex_;
+};
+
struct SpaceState {
- BaseSpace::Pages unswept_pages;
+ struct SweptPageState {
+ BasePage* page = nullptr;
+ std::vector<HeapObjectHeader*> unfinalized_objects;
+ FreeList cached_free_list;
+ std::vector<FreeList::Block> unfinalized_free_list;
+ bool is_empty = false;
+ };
+
+ ThreadSafeStack<BasePage*> unswept_pages;
+ ThreadSafeStack<SweptPageState> swept_unfinalized_pages;
};
+
using SpaceStates = std::vector<SpaceState>;
-bool SweepNormalPage(NormalPage* page) {
+void StickyUnmark(HeapObjectHeader* header) {
+ // Young generation in Oilpan uses sticky mark bits.
+#if !defined(CPPGC_YOUNG_GENERATION)
+ header->Unmark<HeapObjectHeader::AccessMode::kAtomic>();
+#endif
+}
+
+// Builder that finalizes objects and adds freelist entries right away.
+class InlinedFinalizationBuilder final {
+ public:
+ using ResultType = bool;
+
+ explicit InlinedFinalizationBuilder(BasePage* page) : page_(page) {}
+
+ void AddFinalizer(HeapObjectHeader* header, size_t size) {
+ header->Finalize();
+ SET_MEMORY_INACCESIBLE(header, size);
+ }
+
+ void AddFreeListEntry(Address start, size_t size) {
+ auto* space = NormalPageSpace::From(page_->space());
+ space->free_list().Add({start, size});
+ }
+
+ ResultType GetResult(bool is_empty) { return is_empty; }
+
+ private:
+ BasePage* page_;
+};
+
+// Builder that produces results for deferred processing.
+class DeferredFinalizationBuilder final {
+ public:
+ using ResultType = SpaceState::SweptPageState;
+
+ explicit DeferredFinalizationBuilder(BasePage* page) { result_.page = page; }
+
+ void AddFinalizer(HeapObjectHeader* header, size_t size) {
+ if (header->IsFinalizable()) {
+ result_.unfinalized_objects.push_back({header});
+ found_finalizer_ = true;
+ } else {
+ SET_MEMORY_INACCESIBLE(header, size);
+ }
+ }
+
+ void AddFreeListEntry(Address start, size_t size) {
+ if (found_finalizer_) {
+ result_.unfinalized_free_list.push_back({start, size});
+ } else {
+ result_.cached_free_list.Add({start, size});
+ }
+ found_finalizer_ = false;
+ }
+
+ ResultType&& GetResult(bool is_empty) {
+ result_.is_empty = is_empty;
+ return std::move(result_);
+ }
+
+ private:
+ ResultType result_;
+ bool found_finalizer_ = false;
+};
+
+template <typename FinalizationBuilder>
+typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
constexpr auto kAtomicAccess = HeapObjectHeader::AccessMode::kAtomic;
+ FinalizationBuilder builder(page);
- auto* space = NormalPageSpace::From(page->space());
ObjectStartBitmap& bitmap = page->object_start_bitmap();
bitmap.Clear();
@@ -79,18 +199,18 @@ bool SweepNormalPage(NormalPage* page) {
}
// Check if object is not marked (not reachable).
if (!header->IsMarked<kAtomicAccess>()) {
- header->Finalize();
- SET_MEMORY_INACCESIBLE(header, size);
+ builder.AddFinalizer(header, size);
begin += size;
continue;
}
// The object is alive.
const Address header_address = reinterpret_cast<Address>(header);
if (start_of_gap != header_address) {
- space->AddToFreeList(start_of_gap,
- static_cast<size_t>(header_address - start_of_gap));
+ builder.AddFreeListEntry(
+ start_of_gap, static_cast<size_t>(header_address - start_of_gap));
+ bitmap.SetBit(start_of_gap);
}
- header->Unmark<kAtomicAccess>();
+ StickyUnmark(header);
bitmap.SetBit(begin);
begin += size;
start_of_gap = begin;
@@ -98,56 +218,150 @@ bool SweepNormalPage(NormalPage* page) {
if (start_of_gap != page->PayloadStart() &&
start_of_gap != page->PayloadEnd()) {
- space->AddToFreeList(
+ builder.AddFreeListEntry(
start_of_gap, static_cast<size_t>(page->PayloadEnd() - start_of_gap));
+ bitmap.SetBit(start_of_gap);
}
const bool is_empty = (start_of_gap == page->PayloadStart());
- return is_empty;
+ return builder.GetResult(is_empty);
}
-// This visitor:
-// - resets linear allocation buffers and clears free lists for all spaces;
-// - moves all Heap pages to local Sweeper's state (SpaceStates).
-class PrepareForSweepVisitor final
- : public HeapVisitor<PrepareForSweepVisitor> {
+// SweepFinalizer is responsible for heap/space/page finalization. Finalization
+// is defined as a step following concurrent sweeping which:
+// - calls finalizers;
+// - returns (unmaps) empty pages;
+// - merges freelists to the space's freelist.
+class SweepFinalizer final {
public:
- explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
+ explicit SweepFinalizer(cppgc::Platform* platform) : platform_(platform) {}
- bool VisitNormalPageSpace(NormalPageSpace* space) {
- space->ResetLinearAllocationBuffer();
- space->free_list().Clear();
- (*states_)[space->index()].unswept_pages = space->RemoveAllPages();
- return true;
+ void FinalizeHeap(SpaceStates* space_states) {
+ for (SpaceState& space_state : *space_states) {
+ FinalizeSpace(&space_state);
+ }
}
- bool VisitLargePageSpace(LargePageSpace* space) {
- (*states_)[space->index()].unswept_pages = space->RemoveAllPages();
+ void FinalizeSpace(SpaceState* space_state) {
+ while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
+ FinalizePage(&*page_state);
+ }
+ }
+
+ bool FinalizeSpaceWithDeadline(SpaceState* space_state,
+ double deadline_in_seconds) {
+ DCHECK(platform_);
+ static constexpr size_t kDeadlineCheckInterval = 8;
+ size_t page_count = 1;
+
+ while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
+ FinalizePage(&*page_state);
+
+ if (page_count % kDeadlineCheckInterval == 0 &&
+ deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
+ return false;
+ }
+
+ page_count++;
+ }
return true;
}
+ void FinalizePage(SpaceState::SweptPageState* page_state) {
+ DCHECK(page_state);
+ DCHECK(page_state->page);
+ BasePage* page = page_state->page;
+
+ // Call finalizers.
+ for (HeapObjectHeader* object : page_state->unfinalized_objects) {
+ object->Finalize();
+ }
+
+ // Unmap page if empty.
+ if (page_state->is_empty) {
+ BasePage::Destroy(page);
+ return;
+ }
+
+ DCHECK(!page->is_large());
+
+ // Merge freelists without finalizers.
+ FreeList& space_freelist =
+ NormalPageSpace::From(page->space())->free_list();
+ space_freelist.Append(std::move(page_state->cached_free_list));
+
+ // Merge freelist with finalizers.
+ for (auto entry : page_state->unfinalized_free_list) {
+ space_freelist.Add(std::move(entry));
+ }
+
+ // Add the page to the space.
+ page->space()->AddPage(page);
+ }
+
private:
- SpaceStates* states_;
+ cppgc::Platform* platform_;
};
-class MutatorThreadSweepVisitor final
- : private HeapVisitor<MutatorThreadSweepVisitor> {
- friend class HeapVisitor<MutatorThreadSweepVisitor>;
+class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
+ friend class HeapVisitor<MutatorThreadSweeper>;
public:
- explicit MutatorThreadSweepVisitor(SpaceStates* space_states) {
- for (SpaceState& state : *space_states) {
- for (BasePage* page : state.unswept_pages) {
- Traverse(page);
+ explicit MutatorThreadSweeper(SpaceStates* states, cppgc::Platform* platform)
+ : states_(states), platform_(platform) {}
+
+ void Sweep() {
+ for (SpaceState& state : *states_) {
+ while (auto page = state.unswept_pages.Pop()) {
+ Traverse(*page);
+ }
+ }
+ }
+
+ bool SweepWithDeadline(double deadline_in_seconds) {
+ DCHECK(platform_);
+ static constexpr double kSlackInSeconds = 0.001;
+ for (SpaceState& state : *states_) {
+ // FinalizeSpaceWithDeadline() and SweepSpaceWithDeadline() won't check
+ // the deadline until it sweeps 10 pages. So we give a small slack for
+ // safety.
+ const double remaining_budget = deadline_in_seconds - kSlackInSeconds -
+ platform_->MonotonicallyIncreasingTime();
+ if (remaining_budget <= 0.) return false;
+
+ // First, prioritize finalization of pages that were swept concurrently.
+ SweepFinalizer finalizer(platform_);
+ if (!finalizer.FinalizeSpaceWithDeadline(&state, deadline_in_seconds)) {
+ return false;
+ }
+
+ // Help out the concurrent sweeper.
+ if (!SweepSpaceWithDeadline(&state, deadline_in_seconds)) {
+ return false;
}
- state.unswept_pages.clear();
}
+ return true;
}
private:
+ bool SweepSpaceWithDeadline(SpaceState* state, double deadline_in_seconds) {
+ static constexpr size_t kDeadlineCheckInterval = 8;
+ size_t page_count = 1;
+ while (auto page = state->unswept_pages.Pop()) {
+ Traverse(*page);
+ if (page_count % kDeadlineCheckInterval == 0 &&
+ deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
+ return false;
+ }
+ page_count++;
+ }
+
+ return true;
+ }
+
bool VisitNormalPage(NormalPage* page) {
- const bool is_empty = SweepNormalPage(page);
+ const bool is_empty = SweepNormalPage<InlinedFinalizationBuilder>(page);
if (is_empty) {
NormalPage::Destroy(page);
} else {
@@ -157,23 +371,119 @@ class MutatorThreadSweepVisitor final
}
bool VisitLargePage(LargePage* page) {
- if (page->ObjectHeader()->IsMarked()) {
+ HeapObjectHeader* header = page->ObjectHeader();
+ if (header->IsMarked()) {
+ StickyUnmark(header);
page->space()->AddPage(page);
} else {
- page->ObjectHeader()->Finalize();
+ header->Finalize();
LargePage::Destroy(page);
}
return true;
}
+
+ SpaceStates* states_;
+ cppgc::Platform* platform_;
+};
+
+class ConcurrentSweepTask final : public v8::JobTask,
+ private HeapVisitor<ConcurrentSweepTask> {
+ friend class HeapVisitor<ConcurrentSweepTask>;
+
+ public:
+ explicit ConcurrentSweepTask(SpaceStates* states) : states_(states) {}
+
+ void Run(v8::JobDelegate* delegate) final {
+ for (SpaceState& state : *states_) {
+ while (auto page = state.unswept_pages.Pop()) {
+ Traverse(*page);
+ if (delegate->ShouldYield()) return;
+ }
+ }
+ is_completed_.store(true, std::memory_order_relaxed);
+ }
+
+ size_t GetMaxConcurrency() const final {
+ return is_completed_.load(std::memory_order_relaxed) ? 0 : 1;
+ }
+
+ private:
+ bool VisitNormalPage(NormalPage* page) {
+ SpaceState::SweptPageState sweep_result =
+ SweepNormalPage<DeferredFinalizationBuilder>(page);
+ const size_t space_index = page->space()->index();
+ DCHECK_GT(states_->size(), space_index);
+ SpaceState& space_state = (*states_)[space_index];
+ space_state.swept_unfinalized_pages.Push(std::move(sweep_result));
+ return true;
+ }
+
+ bool VisitLargePage(LargePage* page) {
+ HeapObjectHeader* header = page->ObjectHeader();
+ if (header->IsMarked()) {
+ StickyUnmark(header);
+ page->space()->AddPage(page);
+ return true;
+ }
+ if (!header->IsFinalizable()) {
+ LargePage::Destroy(page);
+ return true;
+ }
+ const size_t space_index = page->space()->index();
+ DCHECK_GT(states_->size(), space_index);
+ SpaceState& state = (*states_)[space_index];
+ state.swept_unfinalized_pages.Push(
+ {page, {page->ObjectHeader()}, {}, {}, true});
+ return true;
+ }
+
+ SpaceStates* states_;
+ std::atomic_bool is_completed_{false};
+};
+
+// This visitor:
+// - resets linear allocation buffers and clears free lists for all spaces;
+// - moves all Heap pages to local Sweeper's state (SpaceStates).
+class PrepareForSweepVisitor final
+ : public HeapVisitor<PrepareForSweepVisitor> {
+ public:
+ explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
+
+ bool VisitNormalPageSpace(NormalPageSpace* space) {
+ DCHECK(!space->linear_allocation_buffer().size());
+ space->free_list().Clear();
+ ExtractPages(space);
+ return true;
+ }
+
+ bool VisitLargePageSpace(LargePageSpace* space) {
+ ExtractPages(space);
+ return true;
+ }
+
+ private:
+ void ExtractPages(BaseSpace* space) {
+ BaseSpace::Pages space_pages = space->RemoveAllPages();
+ (*states_)[space->index()].unswept_pages.Insert(space_pages.begin(),
+ space_pages.end());
+ }
+
+ SpaceStates* states_;
};
} // namespace
class Sweeper::SweeperImpl final {
public:
- explicit SweeperImpl(RawHeap* heap) : heap_(heap) {
- space_states_.resize(heap_->size());
- }
+ SweeperImpl(RawHeap* heap, cppgc::Platform* platform,
+ StatsCollector* stats_collector)
+ : heap_(heap),
+ stats_collector_(stats_collector),
+ space_states_(heap->size()),
+ platform_(platform),
+ foreground_task_runner_(platform_->GetForegroundTaskRunner()) {}
+
+ ~SweeperImpl() { CancelSweepers(); }
void Start(Config config) {
is_in_progress_ = true;
@@ -181,29 +491,114 @@ class Sweeper::SweeperImpl final {
ObjectStartBitmapVerifier().Verify(heap_);
#endif
PrepareForSweepVisitor(&space_states_).Traverse(heap_);
+
if (config == Config::kAtomic) {
Finish();
} else {
DCHECK_EQ(Config::kIncrementalAndConcurrent, config);
- // TODO(chromium:1056170): Schedule concurrent sweeping.
+ ScheduleIncrementalSweeping();
+ ScheduleConcurrentSweeping();
}
}
void Finish() {
if (!is_in_progress_) return;
- MutatorThreadSweepVisitor s(&space_states_);
+ // First, call finalizers on the mutator thread.
+ SweepFinalizer finalizer(platform_);
+ finalizer.FinalizeHeap(&space_states_);
+
+ // Then, help out the concurrent thread.
+ MutatorThreadSweeper sweeper(&space_states_, platform_);
+ sweeper.Sweep();
+
+ // Synchronize with the concurrent sweeper and call remaining finalizers.
+ SynchronizeAndFinalizeConcurrentSweeping();
is_in_progress_ = false;
+
+ stats_collector_->NotifySweepingCompleted();
}
private:
- SpaceStates space_states_;
+ class IncrementalSweepTask : public v8::IdleTask {
+ public:
+ using Handle = SingleThreadedHandle;
+
+ explicit IncrementalSweepTask(SweeperImpl* sweeper)
+ : sweeper_(sweeper), handle_(Handle::NonEmptyTag{}) {}
+
+ static Handle Post(SweeperImpl* sweeper, v8::TaskRunner* runner) {
+ auto task = std::make_unique<IncrementalSweepTask>(sweeper);
+ auto handle = task->GetHandle();
+ runner->PostIdleTask(std::move(task));
+ return handle;
+ }
+
+ private:
+ void Run(double deadline_in_seconds) override {
+ if (handle_.IsCanceled() || !sweeper_->is_in_progress_) return;
+
+ MutatorThreadSweeper sweeper(&sweeper_->space_states_,
+ sweeper_->platform_);
+ const bool sweep_complete =
+ sweeper.SweepWithDeadline(deadline_in_seconds);
+
+ if (sweep_complete) {
+ sweeper_->SynchronizeAndFinalizeConcurrentSweeping();
+ } else {
+ sweeper_->ScheduleIncrementalSweeping();
+ }
+ }
+
+ Handle GetHandle() const { return handle_; }
+
+ SweeperImpl* sweeper_;
+ // TODO(chromium:1056170): Change to CancelableTask.
+ Handle handle_;
+ };
+
+ void ScheduleIncrementalSweeping() {
+ if (!platform_ || !foreground_task_runner_) return;
+
+ incremental_sweeper_handle_ =
+ IncrementalSweepTask::Post(this, foreground_task_runner_.get());
+ }
+
+ void ScheduleConcurrentSweeping() {
+ if (!platform_) return;
+
+ concurrent_sweeper_handle_ = platform_->PostJob(
+ v8::TaskPriority::kUserVisible,
+ std::make_unique<ConcurrentSweepTask>(&space_states_));
+ }
+
+ void CancelSweepers() {
+ if (incremental_sweeper_handle_) incremental_sweeper_handle_.Cancel();
+ if (concurrent_sweeper_handle_) concurrent_sweeper_handle_->Cancel();
+ }
+
+ void SynchronizeAndFinalizeConcurrentSweeping() {
+ CancelSweepers();
+
+ SweepFinalizer finalizer(platform_);
+ finalizer.FinalizeHeap(&space_states_);
+ }
+
RawHeap* heap_;
+ StatsCollector* stats_collector_;
+ SpaceStates space_states_;
+ cppgc::Platform* platform_;
+ std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
+ IncrementalSweepTask::Handle incremental_sweeper_handle_;
+ std::unique_ptr<v8::JobHandle> concurrent_sweeper_handle_;
bool is_in_progress_ = false;
};
-Sweeper::Sweeper(RawHeap* heap) : impl_(std::make_unique<SweeperImpl>(heap)) {}
+Sweeper::Sweeper(RawHeap* heap, cppgc::Platform* platform,
+ StatsCollector* stats_collector)
+ : impl_(std::make_unique<SweeperImpl>(heap, platform, stats_collector)) {}
+
Sweeper::~Sweeper() = default;
void Sweeper::Start(Config config) { impl_->Start(config); }
diff --git a/chromium/v8/src/heap/cppgc/sweeper.h b/chromium/v8/src/heap/cppgc/sweeper.h
index 3e387731686..6ce17ea8fc8 100644
--- a/chromium/v8/src/heap/cppgc/sweeper.h
+++ b/chromium/v8/src/heap/cppgc/sweeper.h
@@ -10,20 +10,25 @@
#include "src/base/macros.h"
namespace cppgc {
+
+class Platform;
+
namespace internal {
+class StatsCollector;
class RawHeap;
class V8_EXPORT_PRIVATE Sweeper final {
public:
enum class Config { kAtomic, kIncrementalAndConcurrent };
- explicit Sweeper(RawHeap*);
+ Sweeper(RawHeap*, cppgc::Platform*, StatsCollector*);
~Sweeper();
Sweeper(const Sweeper&) = delete;
Sweeper& operator=(const Sweeper&) = delete;
+ // Sweeper::Start assumes the heap holds no linear allocation buffers.
void Start(Config);
void Finish();
diff --git a/chromium/v8/src/heap/cppgc/task-handle.h b/chromium/v8/src/heap/cppgc/task-handle.h
new file mode 100644
index 00000000000..cbd8cc4a61f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/task-handle.h
@@ -0,0 +1,47 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_TASK_HANDLE_H_
+#define V8_HEAP_CPPGC_TASK_HANDLE_H_
+
+#include <memory>
+
+#include "src/base/logging.h"
+
+namespace cppgc {
+namespace internal {
+
+// A handle that is used for cancelling individual tasks.
+struct SingleThreadedHandle {
+ struct NonEmptyTag {};
+
+ // Default construction results in empty handle.
+ SingleThreadedHandle() = default;
+
+ explicit SingleThreadedHandle(NonEmptyTag)
+ : is_cancelled_(std::make_shared<bool>(false)) {}
+
+ void Cancel() {
+ DCHECK(is_cancelled_);
+ *is_cancelled_ = true;
+ }
+
+ bool IsCanceled() const {
+ DCHECK(is_cancelled_);
+ return *is_cancelled_;
+ }
+
+ // A handle is active if it is non-empty and not cancelled.
+ explicit operator bool() const {
+ return is_cancelled_.get() && !*is_cancelled_.get();
+ }
+
+ private:
+ std::shared_ptr<bool> is_cancelled_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_TASK_HANDLE_H_
diff --git a/chromium/v8/src/heap/cppgc/virtual-memory.cc b/chromium/v8/src/heap/cppgc/virtual-memory.cc
new file mode 100644
index 00000000000..070baa71192
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/virtual-memory.cc
@@ -0,0 +1,56 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/virtual-memory.h"
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+VirtualMemory::VirtualMemory(PageAllocator* page_allocator, size_t size,
+ size_t alignment, void* hint)
+ : page_allocator_(page_allocator) {
+ DCHECK_NOT_NULL(page_allocator);
+ DCHECK(IsAligned(size, page_allocator->CommitPageSize()));
+
+ const size_t page_size = page_allocator_->AllocatePageSize();
+ start_ = page_allocator->AllocatePages(hint, RoundUp(size, page_size),
+ RoundUp(alignment, page_size),
+ PageAllocator::kNoAccess);
+ if (start_) {
+ size_ = RoundUp(size, page_size);
+ }
+}
+
+VirtualMemory::~VirtualMemory() V8_NOEXCEPT {
+ if (IsReserved()) {
+ page_allocator_->FreePages(start_, size_);
+ }
+}
+
+VirtualMemory::VirtualMemory(VirtualMemory&& other) V8_NOEXCEPT
+ : page_allocator_(std::move(other.page_allocator_)),
+ start_(std::move(other.start_)),
+ size_(std::move(other.size_)) {
+ other.Reset();
+}
+
+VirtualMemory& VirtualMemory::operator=(VirtualMemory&& other) V8_NOEXCEPT {
+ DCHECK(!IsReserved());
+ page_allocator_ = std::move(other.page_allocator_);
+ start_ = std::move(other.start_);
+ size_ = std::move(other.size_);
+ other.Reset();
+ return *this;
+}
+
+void VirtualMemory::Reset() {
+ start_ = nullptr;
+ size_ = 0;
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/virtual-memory.h b/chromium/v8/src/heap/cppgc/virtual-memory.h
new file mode 100644
index 00000000000..1489abb9dea
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/virtual-memory.h
@@ -0,0 +1,60 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_VIRTUAL_MEMORY_H_
+#define V8_HEAP_CPPGC_VIRTUAL_MEMORY_H_
+
+#include <cstdint>
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+// Represents and controls an area of reserved memory.
+class V8_EXPORT_PRIVATE VirtualMemory {
+ public:
+ // Empty VirtualMemory object, controlling no reserved memory.
+ VirtualMemory() = default;
+
+ // Reserves virtual memory containing an area of the given size that is
+ // aligned per |alignment| rounded up to the |page_allocator|'s allocate page
+ // size. The |size| is aligned with |page_allocator|'s commit page size.
+ VirtualMemory(PageAllocator*, size_t size, size_t alignment,
+ void* hint = nullptr);
+
+ // Releases the reserved memory, if any, controlled by this VirtualMemory
+ // object.
+ ~VirtualMemory() V8_NOEXCEPT;
+
+ VirtualMemory(VirtualMemory&&) V8_NOEXCEPT;
+ VirtualMemory& operator=(VirtualMemory&&) V8_NOEXCEPT;
+
+ // Returns whether the memory has been reserved.
+ bool IsReserved() const { return start_ != nullptr; }
+
+ void* address() const {
+ DCHECK(IsReserved());
+ return start_;
+ }
+
+ size_t size() const {
+ DCHECK(IsReserved());
+ return size_;
+ }
+
+ private:
+ // Resets to the default state.
+ void Reset();
+
+ PageAllocator* page_allocator_ = nullptr;
+ void* start_ = nullptr;
+ size_t size_ = 0;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_VIRTUAL_MEMORY_H_
diff --git a/chromium/v8/src/heap/cppgc/visitor.cc b/chromium/v8/src/heap/cppgc/visitor.cc
new file mode 100644
index 00000000000..74cab257b6e
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/visitor.cc
@@ -0,0 +1,76 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/visitor.h"
+
+#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/page-memory-inl.h"
+#include "src/heap/cppgc/sanitizers.h"
+
+namespace cppgc {
+
+#ifdef V8_ENABLE_CHECKS
+void Visitor::CheckObjectNotInConstruction(const void* address) {
+ // TODO(chromium:1056170): |address| is an inner pointer of an object. Check
+ // that the object is not in construction.
+}
+#endif // V8_ENABLE_CHECKS
+
+namespace internal {
+
+ConservativeTracingVisitor::ConservativeTracingVisitor(
+ HeapBase& heap, PageBackend& page_backend)
+ : heap_(heap), page_backend_(page_backend) {}
+
+namespace {
+
+void TraceConservatively(ConservativeTracingVisitor* visitor,
+ const HeapObjectHeader& header) {
+ Address* payload = reinterpret_cast<Address*>(header.Payload());
+ const size_t payload_size = header.GetSize();
+ for (size_t i = 0; i < (payload_size / sizeof(Address)); ++i) {
+ Address maybe_ptr = payload[i];
+#if defined(MEMORY_SANITIZER)
+ // |payload| may be uninitialized by design or just contain padding bytes.
+ // Copy into a local variable that is not poisoned for conservative marking.
+ // Copy into a temporary variable to maintain the original MSAN state.
+ MSAN_UNPOISON(&maybe_ptr, sizeof(maybe_ptr));
+#endif
+ if (maybe_ptr) {
+ visitor->TraceConservativelyIfNeeded(maybe_ptr);
+ }
+ }
+}
+
+} // namespace
+
+void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
+ const void* address) {
+ // TODO(chromium:1056170): Add page bloom filter
+
+ const BasePage* page = reinterpret_cast<const BasePage*>(
+ page_backend_.Lookup(static_cast<ConstAddress>(address)));
+
+ if (!page) return;
+
+ DCHECK_EQ(&heap_, page->heap());
+
+ auto* header = page->TryObjectHeaderFromInnerAddress(
+ const_cast<Address>(reinterpret_cast<ConstAddress>(address)));
+
+ if (!header) return;
+
+ if (!header->IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>()) {
+ Visit(header->Payload(),
+ {header->Payload(),
+ GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex()).trace});
+ } else {
+ VisitConservatively(*header, TraceConservatively);
+ }
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/visitor.h b/chromium/v8/src/heap/cppgc/visitor.h
index caa840b4dc3..5003e31f8f4 100644
--- a/chromium/v8/src/heap/cppgc/visitor.h
+++ b/chromium/v8/src/heap/cppgc/visitor.h
@@ -5,16 +5,50 @@
#ifndef V8_HEAP_CPPGC_VISITOR_H_
#define V8_HEAP_CPPGC_VISITOR_H_
+#include "include/cppgc/persistent.h"
#include "include/cppgc/visitor.h"
+#include "src/heap/cppgc/heap-object-header.h"
namespace cppgc {
namespace internal {
+class HeapBase;
+class HeapObjectHeader;
+class PageBackend;
+
// Base visitor that is allowed to create a public cppgc::Visitor object and
// use its internals.
class VisitorBase : public cppgc::Visitor {
public:
VisitorBase() = default;
+
+ template <typename T>
+ void TraceRootForTesting(const Persistent<T>& p, const SourceLocation& loc) {
+ TraceRoot(p, loc);
+ }
+
+ template <typename T>
+ void TraceRootForTesting(const WeakPersistent<T>& p,
+ const SourceLocation& loc) {
+ TraceRoot(p, loc);
+ }
+};
+
+// Regular visitor that additionally allows for conservative tracing.
+class ConservativeTracingVisitor : public VisitorBase {
+ public:
+ ConservativeTracingVisitor(HeapBase&, PageBackend&);
+
+ void TraceConservativelyIfNeeded(const void*);
+
+ protected:
+ using TraceConservativelyCallback = void(ConservativeTracingVisitor*,
+ const HeapObjectHeader&);
+ virtual void VisitConservatively(HeapObjectHeader&,
+ TraceConservativelyCallback) {}
+
+ HeapBase& heap_;
+ PageBackend& page_backend_;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/write-barrier.cc b/chromium/v8/src/heap/cppgc/write-barrier.cc
new file mode 100644
index 00000000000..683a3fc091f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/write-barrier.cc
@@ -0,0 +1,84 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/write-barrier.h"
+
+#include "include/cppgc/internal/pointer-policies.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page-inl.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/marking-visitor.h"
+
+#if defined(CPPGC_CAGED_HEAP)
+#include "include/cppgc/internal/caged-heap-local-data.h"
+#endif
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+void MarkValue(const BasePage* page, Marker* marker, const void* value) {
+#if defined(CPPGC_CAGED_HEAP)
+ DCHECK(reinterpret_cast<CagedHeapLocalData*>(
+ reinterpret_cast<uintptr_t>(value) &
+ ~(kCagedHeapReservationAlignment - 1))
+ ->is_marking_in_progress);
+#endif
+ auto& header =
+ const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
+ if (!header.TryMarkAtomic()) return;
+
+ DCHECK(marker);
+
+ if (V8_UNLIKELY(MutatorThreadMarkingVisitor::IsInConstruction(header))) {
+ // It is assumed that objects on not_fully_constructed_worklist_ are not
+ // marked.
+ header.Unmark();
+ Marker::NotFullyConstructedWorklist::View not_fully_constructed_worklist(
+ marker->not_fully_constructed_worklist(), Marker::kMutatorThreadId);
+ not_fully_constructed_worklist.Push(header.Payload());
+ return;
+ }
+
+ Marker::WriteBarrierWorklist::View write_barrier_worklist(
+ marker->write_barrier_worklist(), Marker::kMutatorThreadId);
+ write_barrier_worklist.Push(&header);
+}
+
+} // namespace
+
+void WriteBarrier::MarkingBarrierSlowWithSentinelCheck(const void* value) {
+ if (!value || value == kSentinelPointer) return;
+
+ MarkingBarrierSlow(value);
+}
+
+void WriteBarrier::MarkingBarrierSlow(const void* value) {
+ const BasePage* page = BasePage::FromPayload(value);
+ const auto* heap = page->heap();
+
+ // Marker being not set up means that no incremental/concurrent marking is in
+ // progress.
+ if (!heap->marker()) return;
+
+ MarkValue(page, heap->marker(), value);
+}
+
+#if defined(CPPGC_YOUNG_GENERATION)
+void WriteBarrier::GenerationalBarrierSlow(CagedHeapLocalData* local_data,
+ const AgeTable& age_table,
+ const void* slot,
+ uintptr_t value_offset) {
+ if (age_table[value_offset] == AgeTable::Age::kOld) return;
+ // Record slot.
+ local_data->heap_base->remembered_slots().insert(const_cast<void*>(slot));
+}
+#endif
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/factory-base.cc b/chromium/v8/src/heap/factory-base.cc
index 028949e861d..5dd88f9fa55 100644
--- a/chromium/v8/src/heap/factory-base.cc
+++ b/chromium/v8/src/heap/factory-base.cc
@@ -722,7 +722,7 @@ HeapObject FactoryBase<Impl>::AllocateRawArray(int size,
AllocationType allocation) {
HeapObject result = AllocateRaw(size, allocation);
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
}
return result;
diff --git a/chromium/v8/src/heap/factory.cc b/chromium/v8/src/heap/factory.cc
index 25825f35f79..a9e11e51041 100644
--- a/chromium/v8/src/heap/factory.cc
+++ b/chromium/v8/src/heap/factory.cc
@@ -15,8 +15,10 @@
#include "src/builtins/constants-table-builder.h"
#include "src/codegen/compiler.h"
#include "src/common/globals.h"
+#include "src/diagnostics/basic-block-profiler.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/protectors-inl.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
@@ -118,6 +120,22 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
data_container->set_kind_specific_flags(kind_specific_flags_);
}
+ // Basic block profiling data for builtins is stored in the JS heap rather
+ // than in separately-allocated C++ objects. Allocate that data now if
+ // appropriate.
+ Handle<OnHeapBasicBlockProfilerData> on_heap_profiler_data;
+ if (profiler_data_ && isolate_->IsGeneratingEmbeddedBuiltins()) {
+ on_heap_profiler_data = profiler_data_->CopyToJSHeap(isolate_);
+
+ // Add the on-heap data to a global list, which keeps it alive and allows
+ // iteration.
+ Handle<ArrayList> list(isolate_->heap()->basic_block_profiling_data(),
+ isolate_);
+ Handle<ArrayList> new_list =
+ ArrayList::Add(isolate_, list, on_heap_profiler_data);
+ isolate_->heap()->SetBasicBlockProfilingData(new_list);
+ }
+
Handle<Code> code;
{
int object_size = ComputeCodeObjectSize(code_desc_);
@@ -189,6 +207,14 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
*(self_reference.location()) = code->ptr();
}
+ // Likewise, any references to the basic block counters marker need to be
+ // updated to point to the newly-allocated counters array.
+ if (!on_heap_profiler_data.is_null()) {
+ isolate_->builtins_constants_table_builder()
+ ->PatchBasicBlockCountersReference(
+ handle(on_heap_profiler_data->counts(), isolate_));
+ }
+
// Migrate generated code.
// The generated code can contain embedded objects (typically from handles)
// in a pointer-to-tagged-value format (i.e. with indirection like a handle)
@@ -211,6 +237,21 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
code->FlushICache();
}
+ if (profiler_data_ && FLAG_turbo_profiling_verbose) {
+#ifdef ENABLE_DISASSEMBLER
+ std::ostringstream os;
+ code->Disassemble(nullptr, os, isolate_);
+ if (!on_heap_profiler_data.is_null()) {
+ Handle<String> disassembly =
+ isolate_->factory()->NewStringFromAsciiChecked(os.str().c_str(),
+ AllocationType::kOld);
+ on_heap_profiler_data->set_code(*disassembly);
+ } else {
+ profiler_data_->SetCode(os);
+ }
+#endif // ENABLE_DISASSEMBLER
+ }
+
return code;
}
@@ -325,6 +366,13 @@ Handle<Oddball> Factory::NewSelfReferenceMarker() {
Oddball::kSelfReferenceMarker);
}
+Handle<Oddball> Factory::NewBasicBlockCountersMarker() {
+ return NewOddball(basic_block_counters_marker_map(),
+ "basic_block_counters_marker",
+ handle(Smi::FromInt(-1), isolate()), "undefined",
+ Oddball::kBasicBlockCountersMarker);
+}
+
Handle<PropertyArray> Factory::NewPropertyArray(int length) {
DCHECK_LE(0, length);
if (length == 0) return empty_property_array();
@@ -347,7 +395,7 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(
HeapObject result;
if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
}
result.set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
@@ -1136,8 +1184,8 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info,
Handle<JSReceiver> extension,
Handle<Context> wrapped,
- Handle<StringSet> blacklist) {
- STATIC_ASSERT(Context::BLACK_LIST_INDEX ==
+ Handle<StringSet> blocklist) {
+ STATIC_ASSERT(Context::BLOCK_LIST_INDEX ==
Context::MIN_CONTEXT_EXTENDED_SLOTS + 1);
DCHECK(scope_info->IsDebugEvaluateScope());
Handle<HeapObject> ext = extension.is_null()
@@ -1152,7 +1200,7 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
c->set_previous(*previous);
c->set_extension(*ext);
if (!wrapped.is_null()) c->set(Context::WRAPPED_CONTEXT_INDEX, *wrapped);
- if (!blacklist.is_null()) c->set(Context::BLACK_LIST_INDEX, *blacklist);
+ if (!blocklist.is_null()) c->set(Context::BLOCK_LIST_INDEX, *blocklist);
return c;
}
@@ -2772,8 +2820,12 @@ Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy(int size) {
map->set_is_access_check_needed(true);
map->set_may_have_interesting_symbols(true);
LOG(isolate(), MapDetails(*map));
- return Handle<JSGlobalProxy>::cast(
+ Handle<JSGlobalProxy> proxy = Handle<JSGlobalProxy>::cast(
NewJSObjectFromMap(map, AllocationType::kYoung));
+ // Create identity hash early in case there is any JS collection containing
+ // a global proxy key and needs to be rehashed after deserialization.
+ proxy->GetOrCreateIdentityHash(isolate());
+ return proxy;
}
void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
@@ -3074,9 +3126,7 @@ Handle<StackTraceFrame> Factory::NewStackTraceFrame(
frame->set_frame_index(index);
frame->set_frame_info(*undefined_value());
- int id = isolate()->last_stack_frame_info_id() + 1;
- isolate()->set_last_stack_frame_info_id(id);
- frame->set_id(id);
+ frame->set_id(isolate()->GetNextStackFrameInfoId());
return frame;
}
@@ -3100,7 +3150,7 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
// TODO(szuend): Adjust this, once it is decided what name to use in both
// "simple" and "detailed" stack traces. This code is for
// backwards compatibility to fullfill test expectations.
- auto function_name = frame->GetFunctionName();
+ Handle<PrimitiveHeapObject> function_name = frame->GetFunctionName();
bool is_user_java_script = false;
if (!is_wasm) {
Handle<Object> function = frame->GetFunction();
@@ -3111,11 +3161,11 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
}
}
- Handle<Object> method_name = undefined_value();
- Handle<Object> type_name = undefined_value();
- Handle<Object> eval_origin = frame->GetEvalOrigin();
- Handle<Object> wasm_module_name = frame->GetWasmModuleName();
- Handle<Object> wasm_instance = frame->GetWasmInstance();
+ Handle<PrimitiveHeapObject> method_name = undefined_value();
+ Handle<PrimitiveHeapObject> type_name = undefined_value();
+ Handle<PrimitiveHeapObject> eval_origin = frame->GetEvalOrigin();
+ Handle<PrimitiveHeapObject> wasm_module_name = frame->GetWasmModuleName();
+ Handle<HeapObject> wasm_instance = frame->GetWasmInstance();
// MethodName and TypeName are expensive to look up, so they are only
// included when they are strictly needed by the stack trace
@@ -3159,7 +3209,8 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
info->set_is_toplevel(is_toplevel);
info->set_is_async(frame->IsAsync());
info->set_is_promise_all(frame->IsPromiseAll());
- info->set_promise_all_index(frame->GetPromiseIndex());
+ info->set_is_promise_any(frame->IsPromiseAny());
+ info->set_promise_combinator_index(frame->GetPromiseIndex());
return info;
}
diff --git a/chromium/v8/src/heap/factory.h b/chromium/v8/src/heap/factory.h
index 2840c711cdf..bd1453bb441 100644
--- a/chromium/v8/src/heap/factory.h
+++ b/chromium/v8/src/heap/factory.h
@@ -26,6 +26,7 @@ namespace internal {
// Forward declarations.
class AliasedArgumentsEntry;
class ObjectBoilerplateDescription;
+class BasicBlockProfilerData;
class BreakPoint;
class BreakPointInfo;
class CallableTask;
@@ -119,6 +120,10 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Marks self references within code generation.
Handle<Oddball> NewSelfReferenceMarker();
+ // Marks references to a function's basic-block usage counters array during
+ // code generation.
+ Handle<Oddball> NewBasicBlockCountersMarker();
+
// Allocates a property array initialized with undefined values.
Handle<PropertyArray> NewPropertyArray(int length);
// Tries allocating a fixed array initialized with undefined values.
@@ -342,7 +347,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<ScopeInfo> scope_info,
Handle<JSReceiver> extension,
Handle<Context> wrapped,
- Handle<StringSet> whitelist);
+ Handle<StringSet> blocklist);
// Create a block context.
Handle<Context> NewBlockContext(Handle<Context> previous,
@@ -861,6 +866,11 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return *this;
}
+ CodeBuilder& set_profiler_data(BasicBlockProfilerData* profiler_data) {
+ profiler_data_ = profiler_data;
+ return *this;
+ }
+
private:
MaybeHandle<Code> BuildInternal(bool retry_allocation_or_fail);
@@ -875,6 +885,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<ByteArray> source_position_table_;
Handle<DeoptimizationData> deoptimization_data_ =
DeoptimizationData::Empty(isolate_);
+ BasicBlockProfilerData* profiler_data_ = nullptr;
bool is_executable_ = true;
bool read_only_data_container_ = false;
bool is_movable_ = true;
diff --git a/chromium/v8/src/heap/finalization-registry-cleanup-task.h b/chromium/v8/src/heap/finalization-registry-cleanup-task.h
index bb25c1abec9..e05c5afa957 100644
--- a/chromium/v8/src/heap/finalization-registry-cleanup-task.h
+++ b/chromium/v8/src/heap/finalization-registry-cleanup-task.h
@@ -18,12 +18,11 @@ class FinalizationRegistryCleanupTask : public CancelableTask {
public:
explicit FinalizationRegistryCleanupTask(Heap* heap);
~FinalizationRegistryCleanupTask() override = default;
-
- private:
FinalizationRegistryCleanupTask(const FinalizationRegistryCleanupTask&) =
delete;
void operator=(const FinalizationRegistryCleanupTask&) = delete;
+ private:
void RunInternal() override;
void SlowAssertNoActiveJavaScript();
diff --git a/chromium/v8/src/heap/free-list-inl.h b/chromium/v8/src/heap/free-list-inl.h
new file mode 100644
index 00000000000..bf60485fa8a
--- /dev/null
+++ b/chromium/v8/src/heap/free-list-inl.h
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_FREE_LIST_INL_H_
+#define V8_HEAP_FREE_LIST_INL_H_
+
+#include "src/heap/free-list.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+bool FreeListCategory::is_linked(FreeList* owner) const {
+ return prev_ != nullptr || next_ != nullptr ||
+ owner->categories_[type_] == this;
+}
+
+void FreeListCategory::UpdateCountersAfterAllocation(size_t allocation_size) {
+ available_ -= allocation_size;
+}
+
+Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
+ FreeListCategory* category_top = top(type);
+ if (category_top != nullptr) {
+ DCHECK(!category_top->top().is_null());
+ return Page::FromHeapObject(category_top->top());
+ } else {
+ return nullptr;
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_FREE_LIST_INL_H_
diff --git a/chromium/v8/src/heap/free-list.cc b/chromium/v8/src/heap/free-list.cc
new file mode 100644
index 00000000000..e9bf77d1711
--- /dev/null
+++ b/chromium/v8/src/heap/free-list.cc
@@ -0,0 +1,596 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/free-list.h"
+
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+#include "src/heap/free-list-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/memory-chunk-inl.h"
+#include "src/objects/free-space-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces implementation
+
+void FreeListCategory::Reset(FreeList* owner) {
+ if (is_linked(owner) && !top().is_null()) {
+ owner->DecreaseAvailableBytes(available_);
+ }
+ set_top(FreeSpace());
+ set_prev(nullptr);
+ set_next(nullptr);
+ available_ = 0;
+}
+
+FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
+ size_t* node_size) {
+ FreeSpace node = top();
+ DCHECK(!node.is_null());
+ DCHECK(Page::FromHeapObject(node)->CanAllocate());
+ if (static_cast<size_t>(node.Size()) < minimum_size) {
+ *node_size = 0;
+ return FreeSpace();
+ }
+ set_top(node.next());
+ *node_size = node.Size();
+ UpdateCountersAfterAllocation(*node_size);
+ return node;
+}
+
+FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
+ size_t* node_size) {
+ FreeSpace prev_non_evac_node;
+ for (FreeSpace cur_node = top(); !cur_node.is_null();
+ cur_node = cur_node.next()) {
+ DCHECK(Page::FromHeapObject(cur_node)->CanAllocate());
+ size_t size = cur_node.size();
+ if (size >= minimum_size) {
+ DCHECK_GE(available_, size);
+ UpdateCountersAfterAllocation(size);
+ if (cur_node == top()) {
+ set_top(cur_node.next());
+ }
+ if (!prev_non_evac_node.is_null()) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
+ if (chunk->owner_identity() == CODE_SPACE) {
+ chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
+ }
+ prev_non_evac_node.set_next(cur_node.next());
+ }
+ *node_size = size;
+ return cur_node;
+ }
+
+ prev_non_evac_node = cur_node;
+ }
+ return FreeSpace();
+}
+
+void FreeListCategory::Free(Address start, size_t size_in_bytes, FreeMode mode,
+ FreeList* owner) {
+ FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
+ free_space.set_next(top());
+ set_top(free_space);
+ available_ += size_in_bytes;
+ if (mode == kLinkCategory) {
+ if (is_linked(owner)) {
+ owner->IncreaseAvailableBytes(size_in_bytes);
+ } else {
+ owner->AddCategory(this);
+ }
+ }
+}
+
+void FreeListCategory::RepairFreeList(Heap* heap) {
+ Map free_space_map = ReadOnlyRoots(heap).free_space_map();
+ FreeSpace n = top();
+ while (!n.is_null()) {
+ ObjectSlot map_slot = n.map_slot();
+ if (map_slot.contains_value(kNullAddress)) {
+ map_slot.store(free_space_map);
+ } else {
+ DCHECK(map_slot.contains_value(free_space_map.ptr()));
+ }
+ n = n.next();
+ }
+}
+
+void FreeListCategory::Relink(FreeList* owner) {
+ DCHECK(!is_linked(owner));
+ owner->AddCategory(this);
+}
+
+// ------------------------------------------------
+// Generic FreeList methods (alloc/free related)
+
+FreeList* FreeList::CreateFreeList() { return new FreeListManyCachedOrigin(); }
+
+FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
+ size_t minimum_size, size_t* node_size) {
+ FreeListCategory* category = categories_[type];
+ if (category == nullptr) return FreeSpace();
+ FreeSpace node = category->PickNodeFromList(minimum_size, node_size);
+ if (!node.is_null()) {
+ DecreaseAvailableBytes(*node_size);
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ }
+ if (category->is_empty()) {
+ RemoveCategory(category);
+ }
+ return node;
+}
+
+FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
+ size_t minimum_size,
+ size_t* node_size) {
+ FreeListCategoryIterator it(this, type);
+ FreeSpace node;
+ while (it.HasNext()) {
+ FreeListCategory* current = it.Next();
+ node = current->SearchForNodeInList(minimum_size, node_size);
+ if (!node.is_null()) {
+ DecreaseAvailableBytes(*node_size);
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ if (current->is_empty()) {
+ RemoveCategory(current);
+ }
+ return node;
+ }
+ }
+ return node;
+}
+
+size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
+ Page* page = Page::FromAddress(start);
+ page->DecreaseAllocatedBytes(size_in_bytes);
+
+ // Blocks have to be a minimum size to hold free list items.
+ if (size_in_bytes < min_block_size_) {
+ page->add_wasted_memory(size_in_bytes);
+ wasted_bytes_ += size_in_bytes;
+ return size_in_bytes;
+ }
+
+ // Insert other blocks at the head of a free list of the appropriate
+ // magnitude.
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
+ DCHECK_EQ(page->AvailableInFreeList(),
+ page->AvailableInFreeListFromAllocatedBytes());
+ return 0;
+}
+
+// ------------------------------------------------
+// FreeListMany implementation
+
+constexpr unsigned int FreeListMany::categories_min[kNumberOfCategories];
+
+FreeListMany::FreeListMany() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = kNumberOfCategories;
+ last_category_ = number_of_categories_ - 1;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
+
+ Reset();
+}
+
+FreeListMany::~FreeListMany() { delete[] categories_; }
+
+size_t FreeListMany::GuaranteedAllocatable(size_t maximum_freed) {
+ if (maximum_freed < categories_min[0]) {
+ return 0;
+ }
+ for (int cat = kFirstCategory + 1; cat <= last_category_; cat++) {
+ if (maximum_freed < categories_min[cat]) {
+ return categories_min[cat - 1];
+ }
+ }
+ return maximum_freed;
+}
+
+Page* FreeListMany::GetPageForSize(size_t size_in_bytes) {
+ FreeListCategoryType minimum_category =
+ SelectFreeListCategoryType(size_in_bytes);
+ Page* page = nullptr;
+ for (int cat = minimum_category + 1; !page && cat <= last_category_; cat++) {
+ page = GetPageForCategoryType(cat);
+ }
+ if (!page) {
+ // Might return a page in which |size_in_bytes| will not fit.
+ page = GetPageForCategoryType(minimum_category);
+ }
+ return page;
+}
+
+FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size,
+ AllocationOrigin origin) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+ FreeSpace node;
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ for (int i = type; i < last_category_ && node.is_null(); i++) {
+ node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
+ node_size);
+ }
+
+ if (node.is_null()) {
+ // Searching each element of the last category.
+ node = SearchForNodeInList(last_category_, size_in_bytes, node_size);
+ }
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// FreeListManyCached implementation
+
+FreeListManyCached::FreeListManyCached() { ResetCache(); }
+
+void FreeListManyCached::Reset() {
+ ResetCache();
+ FreeListMany::Reset();
+}
+
+bool FreeListManyCached::AddCategory(FreeListCategory* category) {
+ bool was_added = FreeList::AddCategory(category);
+
+ // Updating cache
+ if (was_added) {
+ UpdateCacheAfterAddition(category->type_);
+ }
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+
+ return was_added;
+}
+
+void FreeListManyCached::RemoveCategory(FreeListCategory* category) {
+ FreeList::RemoveCategory(category);
+
+ // Updating cache
+ int type = category->type_;
+ if (categories_[type] == nullptr) {
+ UpdateCacheAfterRemoval(type);
+ }
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+}
+
+size_t FreeListManyCached::Free(Address start, size_t size_in_bytes,
+ FreeMode mode) {
+ Page* page = Page::FromAddress(start);
+ page->DecreaseAllocatedBytes(size_in_bytes);
+
+ // Blocks have to be a minimum size to hold free list items.
+ if (size_in_bytes < min_block_size_) {
+ page->add_wasted_memory(size_in_bytes);
+ wasted_bytes_ += size_in_bytes;
+ return size_in_bytes;
+ }
+
+ // Insert other blocks at the head of a free list of the appropriate
+ // magnitude.
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
+
+ // Updating cache
+ if (mode == kLinkCategory) {
+ UpdateCacheAfterAddition(type);
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+ }
+
+ DCHECK_EQ(page->AvailableInFreeList(),
+ page->AvailableInFreeListFromAllocatedBytes());
+ return 0;
+}
+
+FreeSpace FreeListManyCached::Allocate(size_t size_in_bytes, size_t* node_size,
+ AllocationOrigin origin) {
+ USE(origin);
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+
+ FreeSpace node;
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ type = next_nonempty_category[type];
+ for (; type < last_category_; type = next_nonempty_category[type + 1]) {
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ if (!node.is_null()) break;
+ }
+
+ if (node.is_null()) {
+ // Searching each element of the last category.
+ type = last_category_;
+ node = SearchForNodeInList(type, size_in_bytes, node_size);
+ }
+
+ // Updating cache
+ if (!node.is_null() && categories_[type] == nullptr) {
+ UpdateCacheAfterRemoval(type);
+ }
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// FreeListManyCachedFastPath implementation
+
+FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) {
+ USE(origin);
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+ FreeSpace node;
+
+ // Fast path part 1: searching the last categories
+ FreeListCategoryType first_category =
+ SelectFastAllocationFreeListCategoryType(size_in_bytes);
+ FreeListCategoryType type = first_category;
+ for (type = next_nonempty_category[type]; type <= last_category_;
+ type = next_nonempty_category[type + 1]) {
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ if (!node.is_null()) break;
+ }
+
+ // Fast path part 2: searching the medium categories for tiny objects
+ if (node.is_null()) {
+ if (size_in_bytes <= kTinyObjectMaxSize) {
+ for (type = next_nonempty_category[kFastPathFallBackTiny];
+ type < kFastPathFirstCategory;
+ type = next_nonempty_category[type + 1]) {
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ if (!node.is_null()) break;
+ }
+ }
+ }
+
+ // Searching the last category
+ if (node.is_null()) {
+ // Searching each element of the last category.
+ type = last_category_;
+ node = SearchForNodeInList(type, size_in_bytes, node_size);
+ }
+
+ // Finally, search the most precise category
+ if (node.is_null()) {
+ type = SelectFreeListCategoryType(size_in_bytes);
+ for (type = next_nonempty_category[type]; type < first_category;
+ type = next_nonempty_category[type + 1]) {
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ if (!node.is_null()) break;
+ }
+ }
+
+ // Updating cache
+ if (!node.is_null() && categories_[type] == nullptr) {
+ UpdateCacheAfterRemoval(type);
+ }
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// FreeListManyCachedOrigin implementation
+
+FreeSpace FreeListManyCachedOrigin::Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) {
+ if (origin == AllocationOrigin::kGC) {
+ return FreeListManyCached::Allocate(size_in_bytes, node_size, origin);
+ } else {
+ return FreeListManyCachedFastPath::Allocate(size_in_bytes, node_size,
+ origin);
+ }
+}
+
+// ------------------------------------------------
+// FreeListMap implementation
+
+FreeListMap::FreeListMap() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = 1;
+ last_category_ = kOnlyCategory;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
+
+ Reset();
+}
+
+size_t FreeListMap::GuaranteedAllocatable(size_t maximum_freed) {
+ return maximum_freed;
+}
+
+Page* FreeListMap::GetPageForSize(size_t size_in_bytes) {
+ return GetPageForCategoryType(kOnlyCategory);
+}
+
+FreeListMap::~FreeListMap() { delete[] categories_; }
+
+FreeSpace FreeListMap::Allocate(size_t size_in_bytes, size_t* node_size,
+ AllocationOrigin origin) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+
+ // The following DCHECK ensures that maps are allocated one by one (ie,
+ // without folding). This assumption currently holds. However, if it were to
+ // become untrue in the future, you'll get an error here. To fix it, I would
+ // suggest removing the DCHECK, and replacing TryFindNodeIn by
+ // SearchForNodeInList below.
+ DCHECK_EQ(size_in_bytes, Map::kSize);
+
+ FreeSpace node = TryFindNodeIn(kOnlyCategory, size_in_bytes, node_size);
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK_IMPLIES(node.is_null(), IsEmpty());
+ return node;
+}
+
+// ------------------------------------------------
+// Generic FreeList methods (non alloc/free related)
+
+void FreeList::Reset() {
+ ForAllFreeListCategories(
+ [this](FreeListCategory* category) { category->Reset(this); });
+ for (int i = kFirstCategory; i < number_of_categories_; i++) {
+ categories_[i] = nullptr;
+ }
+ wasted_bytes_ = 0;
+ available_ = 0;
+}
+
+size_t FreeList::EvictFreeListItems(Page* page) {
+ size_t sum = 0;
+ page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
+ sum += category->available();
+ RemoveCategory(category);
+ category->Reset(this);
+ });
+ return sum;
+}
+
+void FreeList::RepairLists(Heap* heap) {
+ ForAllFreeListCategories(
+ [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
+}
+
+bool FreeList::AddCategory(FreeListCategory* category) {
+ FreeListCategoryType type = category->type_;
+ DCHECK_LT(type, number_of_categories_);
+ FreeListCategory* top = categories_[type];
+
+ if (category->is_empty()) return false;
+ DCHECK_NE(top, category);
+
+ // Common double-linked list insertion.
+ if (top != nullptr) {
+ top->set_prev(category);
+ }
+ category->set_next(top);
+ categories_[type] = category;
+
+ IncreaseAvailableBytes(category->available());
+ return true;
+}
+
+void FreeList::RemoveCategory(FreeListCategory* category) {
+ FreeListCategoryType type = category->type_;
+ DCHECK_LT(type, number_of_categories_);
+ FreeListCategory* top = categories_[type];
+
+ if (category->is_linked(this)) {
+ DecreaseAvailableBytes(category->available());
+ }
+
+ // Common double-linked list removal.
+ if (top == category) {
+ categories_[type] = category->next();
+ }
+ if (category->prev() != nullptr) {
+ category->prev()->set_next(category->next());
+ }
+ if (category->next() != nullptr) {
+ category->next()->set_prev(category->prev());
+ }
+ category->set_next(nullptr);
+ category->set_prev(nullptr);
+}
+
+void FreeList::PrintCategories(FreeListCategoryType type) {
+ FreeListCategoryIterator it(this, type);
+ PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
+ static_cast<void*>(categories_[type]), type);
+ while (it.HasNext()) {
+ FreeListCategory* current = it.Next();
+ PrintF("%p -> ", static_cast<void*>(current));
+ }
+ PrintF("null\n");
+}
+
+size_t FreeListCategory::SumFreeList() {
+ size_t sum = 0;
+ FreeSpace cur = top();
+ while (!cur.is_null()) {
+ // We can't use "cur->map()" here because both cur's map and the
+ // root can be null during bootstrapping.
+ DCHECK(cur.map_slot().contains_value(Page::FromHeapObject(cur)
+ ->heap()
+ ->isolate()
+ ->root(RootIndex::kFreeSpaceMap)
+ .ptr()));
+ sum += cur.relaxed_read_size();
+ cur = cur.next();
+ }
+ return sum;
+}
+int FreeListCategory::FreeListLength() {
+ int length = 0;
+ FreeSpace cur = top();
+ while (!cur.is_null()) {
+ length++;
+ cur = cur.next();
+ }
+ return length;
+}
+
+#ifdef DEBUG
+bool FreeList::IsVeryLong() {
+ int len = 0;
+ for (int i = kFirstCategory; i < number_of_categories_; i++) {
+ FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
+ while (it.HasNext()) {
+ len += it.Next()->FreeListLength();
+ if (len >= FreeListCategory::kVeryLongFreeList) return true;
+ }
+ }
+ return false;
+}
+
+// This can take a very long time because it is linear in the number of entries
+// on the free list, so it should not be called if FreeListLength returns
+// kVeryLongFreeList.
+size_t FreeList::SumFreeLists() {
+ size_t sum = 0;
+ ForAllFreeListCategories(
+ [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
+ return sum;
+}
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/free-list.h b/chromium/v8/src/heap/free-list.h
new file mode 100644
index 00000000000..e2cd193905d
--- /dev/null
+++ b/chromium/v8/src/heap/free-list.h
@@ -0,0 +1,520 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_FREE_LIST_H_
+#define V8_HEAP_FREE_LIST_H_
+
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+#include "src/heap/memory-chunk.h"
+#include "src/objects/free-space.h"
+#include "src/objects/map.h"
+#include "src/utils/utils.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
+
+namespace v8 {
+namespace internal {
+
+namespace heap {
+class HeapTester;
+class TestCodePageAllocatorScope;
+} // namespace heap
+
+class AllocationObserver;
+class FreeList;
+class Isolate;
+class LargeObjectSpace;
+class LargePage;
+class LinearAllocationArea;
+class LocalArrayBufferTracker;
+class Page;
+class PagedSpace;
+class SemiSpace;
+
+using FreeListCategoryType = int32_t;
+
+static const FreeListCategoryType kFirstCategory = 0;
+static const FreeListCategoryType kInvalidCategory = -1;
+
+enum FreeMode { kLinkCategory, kDoNotLinkCategory };
+
+enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
+
+// A free list category maintains a linked list of free memory blocks.
+class FreeListCategory {
+ public:
+ void Initialize(FreeListCategoryType type) {
+ type_ = type;
+ available_ = 0;
+ prev_ = nullptr;
+ next_ = nullptr;
+ }
+
+ void Reset(FreeList* owner);
+
+ void RepairFreeList(Heap* heap);
+
+ // Relinks the category into the currently owning free list. Requires that the
+ // category is currently unlinked.
+ void Relink(FreeList* owner);
+
+ void Free(Address address, size_t size_in_bytes, FreeMode mode,
+ FreeList* owner);
+
+ // Performs a single try to pick a node of at least |minimum_size| from the
+ // category. Stores the actual size in |node_size|. Returns nullptr if no
+ // node is found.
+ FreeSpace PickNodeFromList(size_t minimum_size, size_t* node_size);
+
+ // Picks a node of at least |minimum_size| from the category. Stores the
+ // actual size in |node_size|. Returns nullptr if no node is found.
+ FreeSpace SearchForNodeInList(size_t minimum_size, size_t* node_size);
+
+ inline bool is_linked(FreeList* owner) const;
+ bool is_empty() { return top().is_null(); }
+ uint32_t available() const { return available_; }
+
+ size_t SumFreeList();
+ int FreeListLength();
+
+ private:
+ // For debug builds we accurately compute free lists lengths up until
+ // {kVeryLongFreeList} by manually walking the list.
+ static const int kVeryLongFreeList = 500;
+
+ // Updates |available_|, |length_| and free_list_->Available() after an
+ // allocation of size |allocation_size|.
+ inline void UpdateCountersAfterAllocation(size_t allocation_size);
+
+ FreeSpace top() { return top_; }
+ void set_top(FreeSpace top) { top_ = top; }
+ FreeListCategory* prev() { return prev_; }
+ void set_prev(FreeListCategory* prev) { prev_ = prev; }
+ FreeListCategory* next() { return next_; }
+ void set_next(FreeListCategory* next) { next_ = next; }
+
+ // |type_|: The type of this free list category.
+ FreeListCategoryType type_ = kInvalidCategory;
+
+ // |available_|: Total available bytes in all blocks of this free list
+ // category.
+ uint32_t available_ = 0;
+
+ // |top_|: Points to the top FreeSpace in the free list category.
+ FreeSpace top_;
+
+ FreeListCategory* prev_ = nullptr;
+ FreeListCategory* next_ = nullptr;
+
+ friend class FreeList;
+ friend class FreeListManyCached;
+ friend class PagedSpace;
+ friend class MapSpace;
+};
+
+// A free list maintains free blocks of memory. The free list is organized in
+// a way to encourage objects allocated around the same time to be near each
+// other. The normal way to allocate is intended to be by bumping a 'top'
+// pointer until it hits a 'limit' pointer. When the limit is hit we need to
+// find a new space to allocate from. This is done with the free list, which is
+// divided up into rough categories to cut down on waste. Having finer
+// categories would scatter allocation more.
+class FreeList {
+ public:
+ // Creates a Freelist of the default class (FreeListLegacy for now).
+ V8_EXPORT_PRIVATE static FreeList* CreateFreeList();
+
+ virtual ~FreeList() = default;
+
+ // Returns how much memory can be allocated after freeing maximum_freed
+ // memory.
+ virtual size_t GuaranteedAllocatable(size_t maximum_freed) = 0;
+
+ // Adds a node on the free list. The block of size {size_in_bytes} starting
+ // at {start} is placed on the free list. The return value is the number of
+ // bytes that were not added to the free list, because the freed memory block
+ // was too small. Bookkeeping information will be written to the block, i.e.,
+ // its contents will be destroyed. The start address should be word aligned,
+ // and the size should be a non-zero multiple of the word size.
+ virtual size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
+
+ // Allocates a free space node frome the free list of at least size_in_bytes
+ // bytes. Returns the actual node size in node_size which can be bigger than
+ // size_in_bytes. This method returns null if the allocation request cannot be
+ // handled by the free list.
+ virtual V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) = 0;
+
+ // Returns a page containing an entry for a given type, or nullptr otherwise.
+ V8_EXPORT_PRIVATE virtual Page* GetPageForSize(size_t size_in_bytes) = 0;
+
+ virtual void Reset();
+
+ // Return the number of bytes available on the free list.
+ size_t Available() {
+ DCHECK(available_ == SumFreeLists());
+ return available_;
+ }
+
+ // Update number of available bytes on the Freelists.
+ void IncreaseAvailableBytes(size_t bytes) { available_ += bytes; }
+ void DecreaseAvailableBytes(size_t bytes) { available_ -= bytes; }
+
+ bool IsEmpty() {
+ bool empty = true;
+ ForAllFreeListCategories([&empty](FreeListCategory* category) {
+ if (!category->is_empty()) empty = false;
+ });
+ return empty;
+ }
+
+ // Used after booting the VM.
+ void RepairLists(Heap* heap);
+
+ V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
+
+ int number_of_categories() { return number_of_categories_; }
+ FreeListCategoryType last_category() { return last_category_; }
+
+ size_t wasted_bytes() { return wasted_bytes_; }
+
+ template <typename Callback>
+ void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
+ FreeListCategory* current = categories_[type];
+ while (current != nullptr) {
+ FreeListCategory* next = current->next();
+ callback(current);
+ current = next;
+ }
+ }
+
+ template <typename Callback>
+ void ForAllFreeListCategories(Callback callback) {
+ for (int i = kFirstCategory; i < number_of_categories(); i++) {
+ ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
+ }
+ }
+
+ virtual bool AddCategory(FreeListCategory* category);
+ virtual V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
+ void PrintCategories(FreeListCategoryType type);
+
+ protected:
+ class FreeListCategoryIterator final {
+ public:
+ FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
+ : current_(free_list->categories_[type]) {}
+
+ bool HasNext() const { return current_ != nullptr; }
+
+ FreeListCategory* Next() {
+ DCHECK(HasNext());
+ FreeListCategory* tmp = current_;
+ current_ = current_->next();
+ return tmp;
+ }
+
+ private:
+ FreeListCategory* current_;
+ };
+
+#ifdef DEBUG
+ V8_EXPORT_PRIVATE size_t SumFreeLists();
+ bool IsVeryLong();
+#endif
+
+ // Tries to retrieve a node from the first category in a given |type|.
+ // Returns nullptr if the category is empty or the top entry is smaller
+ // than minimum_size.
+ FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
+
+ // Searches a given |type| for a node of at least |minimum_size|.
+ FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
+
+ // Returns the smallest category in which an object of |size_in_bytes| could
+ // fit.
+ virtual FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) = 0;
+
+ FreeListCategory* top(FreeListCategoryType type) const {
+ return categories_[type];
+ }
+
+ inline Page* GetPageForCategoryType(FreeListCategoryType type);
+
+ int number_of_categories_ = 0;
+ FreeListCategoryType last_category_ = 0;
+ size_t min_block_size_ = 0;
+
+ std::atomic<size_t> wasted_bytes_{0};
+ FreeListCategory** categories_ = nullptr;
+
+ // |available_|: The number of bytes in this freelist.
+ size_t available_ = 0;
+
+ friend class FreeListCategory;
+ friend class Page;
+ friend class MemoryChunk;
+ friend class ReadOnlyPage;
+ friend class MapSpace;
+};
+
+// FreeList used for spaces that don't have freelists
+// (only the LargeObject space for now).
+class NoFreeList final : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) final {
+ FATAL("NoFreeList can't be used as a standard FreeList. ");
+ }
+ size_t Free(Address start, size_t size_in_bytes, FreeMode mode) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+ Page* GetPageForSize(size_t size_in_bytes) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+
+ private:
+ FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+};
+
+// Use 24 Freelists: on per 16 bytes between 24 and 256, and then a few ones for
+// larger sizes. See the variable |categories_min| for the size of each
+// Freelist. Allocation is done using a best-fit strategy (considering only the
+// first element of each category though).
+// Performances are expected to be worst than FreeListLegacy, but memory
+// consumption should be lower (since fragmentation should be lower).
+class V8_EXPORT_PRIVATE FreeListMany : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) override;
+
+ Page* GetPageForSize(size_t size_in_bytes) override;
+
+ FreeListMany();
+ ~FreeListMany() override;
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+
+ protected:
+ static const size_t kMinBlockSize = 3 * kTaggedSize;
+
+ // This is a conservative upper bound. The actual maximum block size takes
+ // padding and alignment of data and code pages into account.
+ static const size_t kMaxBlockSize = MemoryChunk::kPageSize;
+ // Largest size for which categories are still precise, and for which we can
+ // therefore compute the category in constant time.
+ static const size_t kPreciseCategoryMaxSize = 256;
+
+ // Categories boundaries generated with:
+ // perl -E '
+ // @cat = (24, map {$_*16} 2..16, 48, 64);
+ // while ($cat[-1] <= 32768) {
+ // push @cat, $cat[-1]*2
+ // }
+ // say join ", ", @cat;
+ // say "\n", scalar @cat'
+ static const int kNumberOfCategories = 24;
+ static constexpr unsigned int categories_min[kNumberOfCategories] = {
+ 24, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192,
+ 208, 224, 240, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536};
+
+ // Return the smallest category that could hold |size_in_bytes| bytes.
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
+ if (size_in_bytes <= kPreciseCategoryMaxSize) {
+ if (size_in_bytes < categories_min[1]) return 0;
+ return static_cast<FreeListCategoryType>(size_in_bytes >> 4) - 1;
+ }
+ for (int cat = (kPreciseCategoryMaxSize >> 4) - 1; cat < last_category_;
+ cat++) {
+ if (size_in_bytes < categories_min[cat + 1]) {
+ return cat;
+ }
+ }
+ return last_category_;
+ }
+
+ FRIEND_TEST(SpacesTest, FreeListManySelectFreeListCategoryType);
+ FRIEND_TEST(SpacesTest, FreeListManyGuaranteedAllocatable);
+};
+
+// Same as FreeListMany but uses a cache to know which categories are empty.
+// The cache (|next_nonempty_category|) is maintained in a way such that for
+// each category c, next_nonempty_category[c] contains the first non-empty
+// category greater or equal to c, that may hold an object of size c.
+// Allocation is done using the same strategy as FreeListMany (ie, best fit).
+class V8_EXPORT_PRIVATE FreeListManyCached : public FreeListMany {
+ public:
+ FreeListManyCached();
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+
+ size_t Free(Address start, size_t size_in_bytes, FreeMode mode) override;
+
+ void Reset() override;
+
+ bool AddCategory(FreeListCategory* category) override;
+ void RemoveCategory(FreeListCategory* category) override;
+
+ protected:
+ // Updates the cache after adding something in the category |cat|.
+ void UpdateCacheAfterAddition(FreeListCategoryType cat) {
+ for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] > cat;
+ i--) {
+ next_nonempty_category[i] = cat;
+ }
+ }
+
+ // Updates the cache after emptying category |cat|.
+ void UpdateCacheAfterRemoval(FreeListCategoryType cat) {
+ for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] == cat;
+ i--) {
+ next_nonempty_category[i] = next_nonempty_category[cat + 1];
+ }
+ }
+
+#ifdef DEBUG
+ void CheckCacheIntegrity() {
+ for (int i = 0; i <= last_category_; i++) {
+ DCHECK(next_nonempty_category[i] == last_category_ + 1 ||
+ categories_[next_nonempty_category[i]] != nullptr);
+ for (int j = i; j < next_nonempty_category[i]; j++) {
+ DCHECK(categories_[j] == nullptr);
+ }
+ }
+ }
+#endif
+
+ // The cache is overallocated by one so that the last element is always
+ // defined, and when updating the cache, we can always use cache[i+1] as long
+ // as i is < kNumberOfCategories.
+ int next_nonempty_category[kNumberOfCategories + 1];
+
+ private:
+ void ResetCache() {
+ for (int i = 0; i < kNumberOfCategories; i++) {
+ next_nonempty_category[i] = kNumberOfCategories;
+ }
+ // Setting the after-last element as well, as explained in the cache's
+ // declaration.
+ next_nonempty_category[kNumberOfCategories] = kNumberOfCategories;
+ }
+};
+
+// Same as FreeListManyCached but uses a fast path.
+// The fast path overallocates by at least 1.85k bytes. The idea of this 1.85k
+// is: we want the fast path to always overallocate, even for larger
+// categories. Therefore, we have two choices: either overallocate by
+// "size_in_bytes * something" or overallocate by "size_in_bytes +
+// something". We choose the later, as the former will tend to overallocate too
+// much for larger objects. The 1.85k (= 2048 - 128) has been chosen such that
+// for tiny objects (size <= 128 bytes), the first category considered is the
+// 36th (which holds objects of 2k to 3k), while for larger objects, the first
+// category considered will be one that guarantees a 1.85k+ bytes
+// overallocation. Using 2k rather than 1.85k would have resulted in either a
+// more complex logic for SelectFastAllocationFreeListCategoryType, or the 36th
+// category (2k to 3k) not being used; both of which are undesirable.
+// A secondary fast path is used for tiny objects (size <= 128), in order to
+// consider categories from 256 to 2048 bytes for them.
+// Note that this class uses a precise GetPageForSize (inherited from
+// FreeListMany), which makes its fast path less fast in the Scavenger. This is
+// done on purpose, since this class's only purpose is to be used by
+// FreeListManyCachedOrigin, which is precise for the scavenger.
+class V8_EXPORT_PRIVATE FreeListManyCachedFastPath : public FreeListManyCached {
+ public:
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+
+ protected:
+ // Objects in the 18th category are at least 2048 bytes
+ static const FreeListCategoryType kFastPathFirstCategory = 18;
+ static const size_t kFastPathStart = 2048;
+ static const size_t kTinyObjectMaxSize = 128;
+ static const size_t kFastPathOffset = kFastPathStart - kTinyObjectMaxSize;
+ // Objects in the 15th category are at least 256 bytes
+ static const FreeListCategoryType kFastPathFallBackTiny = 15;
+
+ STATIC_ASSERT(categories_min[kFastPathFirstCategory] == kFastPathStart);
+ STATIC_ASSERT(categories_min[kFastPathFallBackTiny] ==
+ kTinyObjectMaxSize * 2);
+
+ FreeListCategoryType SelectFastAllocationFreeListCategoryType(
+ size_t size_in_bytes) {
+ DCHECK(size_in_bytes < kMaxBlockSize);
+
+ if (size_in_bytes >= categories_min[last_category_]) return last_category_;
+
+ size_in_bytes += kFastPathOffset;
+ for (int cat = kFastPathFirstCategory; cat < last_category_; cat++) {
+ if (size_in_bytes <= categories_min[cat]) {
+ return cat;
+ }
+ }
+ return last_category_;
+ }
+
+ FRIEND_TEST(
+ SpacesTest,
+ FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType);
+};
+
+// Uses FreeListManyCached if in the GC; FreeListManyCachedFastPath otherwise.
+// The reasonning behind this FreeList is the following: the GC runs in
+// parallel, and therefore, more expensive allocations there are less
+// noticeable. On the other hand, the generated code and runtime need to be very
+// fast. Therefore, the strategy for the former is one that is not very
+// efficient, but reduces fragmentation (FreeListManyCached), while the strategy
+// for the later is one that is very efficient, but introduces some
+// fragmentation (FreeListManyCachedFastPath).
+class V8_EXPORT_PRIVATE FreeListManyCachedOrigin
+ : public FreeListManyCachedFastPath {
+ public:
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+};
+
+// FreeList for maps: since maps are all the same size, uses a single freelist.
+class V8_EXPORT_PRIVATE FreeListMap : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) override;
+
+ Page* GetPageForSize(size_t size_in_bytes) override;
+
+ FreeListMap();
+ ~FreeListMap() override;
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+
+ private:
+ static const size_t kMinBlockSize = Map::kSize;
+ static const size_t kMaxBlockSize = MemoryChunk::kPageSize;
+ static const FreeListCategoryType kOnlyCategory = 0;
+
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
+ return kOnlyCategory;
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_FREE_LIST_H_
diff --git a/chromium/v8/src/heap/heap-inl.h b/chromium/v8/src/heap/heap-inl.h
index 39f5ec6c66e..6e42cf74527 100644
--- a/chromium/v8/src/heap/heap-inl.h
+++ b/chromium/v8/src/heap/heap-inl.h
@@ -23,7 +23,11 @@
// leak heap internals to users of this interface!
#include "src/execution/isolate-data.h"
#include "src/execution/isolate.h"
+#include "src/heap/code-object-registry.h"
+#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk.h"
+#include "src/heap/new-spaces-inl.h"
+#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/allocation-site-inl.h"
@@ -237,8 +241,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(!large_object);
DCHECK(CanAllocateInReadOnlySpace());
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
- allocation =
- read_only_space_->AllocateRaw(size_in_bytes, alignment, origin);
+ allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
} else {
UNREACHABLE();
}
@@ -397,7 +400,8 @@ bool Heap::InYoungGeneration(MaybeObject object) {
// static
bool Heap::InYoungGeneration(HeapObject heap_object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false;
- bool result = MemoryChunk::FromHeapObject(heap_object)->InYoungGeneration();
+ bool result =
+ BasicMemoryChunk::FromHeapObject(heap_object)->InYoungGeneration();
#ifdef DEBUG
// If in the young generation, then check we're either not in the middle of
// GC or the object is in to-space.
@@ -425,7 +429,7 @@ bool Heap::InFromPage(MaybeObject object) {
// static
bool Heap::InFromPage(HeapObject heap_object) {
- return MemoryChunk::FromHeapObject(heap_object)->IsFromPage();
+ return BasicMemoryChunk::FromHeapObject(heap_object)->IsFromPage();
}
// static
@@ -442,7 +446,7 @@ bool Heap::InToPage(MaybeObject object) {
// static
bool Heap::InToPage(HeapObject heap_object) {
- return MemoryChunk::FromHeapObject(heap_object)->IsToPage();
+ return BasicMemoryChunk::FromHeapObject(heap_object)->IsToPage();
}
bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
@@ -452,7 +456,7 @@ Heap* Heap::FromWritableHeapObject(HeapObject obj) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return Heap::GetIsolateFromWritableObject(obj)->heap();
}
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(obj);
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case.
@@ -540,7 +544,7 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
PretenuringFeedbackMap* pretenuring_feedback) {
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
#ifdef DEBUG
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
DCHECK_IMPLIES(chunk->IsToPage(),
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
DCHECK_IMPLIES(!chunk->InYoungGeneration(),
@@ -709,24 +713,24 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code)
: chunk_(nullptr), scope_active_(false) {}
#else
CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code)
- : CodePageMemoryModificationScope(MemoryChunk::FromHeapObject(code)) {}
+ : CodePageMemoryModificationScope(BasicMemoryChunk::FromHeapObject(code)) {}
#endif
CodePageMemoryModificationScope::CodePageMemoryModificationScope(
- MemoryChunk* chunk)
+ BasicMemoryChunk* chunk)
: chunk_(chunk),
scope_active_(chunk_->heap()->write_protect_code_memory() &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (scope_active_) {
- DCHECK(chunk_->owner_identity() == CODE_SPACE ||
- (chunk_->owner_identity() == CODE_LO_SPACE));
- chunk_->SetReadAndWritable();
+ DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
+ (chunk_->owner()->identity() == CODE_LO_SPACE));
+ MemoryChunk::cast(chunk_)->SetReadAndWritable();
}
}
CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
if (scope_active_) {
- chunk_->SetDefaultCodePermissions();
+ MemoryChunk::cast(chunk_)->SetDefaultCodePermissions();
}
}
diff --git a/chromium/v8/src/heap/heap.cc b/chromium/v8/src/heap/heap.cc
index 606ba0fe65f..4d23e084b95 100644
--- a/chromium/v8/src/heap/heap.cc
+++ b/chromium/v8/src/heap/heap.cc
@@ -6,6 +6,7 @@
#include <cinttypes>
#include <iomanip>
+#include <memory>
#include <unordered_map>
#include <unordered_set>
@@ -30,8 +31,10 @@
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/barrier.h"
+#include "src/heap/code-object-registry.h"
#include "src/heap/code-stats.h"
#include "src/heap/combined-heap.h"
+#include "src/heap/concurrent-allocator.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/finalization-registry-cleanup-task.h"
@@ -51,8 +54,9 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
-#include "src/heap/remembered-set-inl.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/safepoint.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
@@ -174,6 +178,10 @@ void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
set_serialized_global_proxy_sizes(sizes);
}
+void Heap::SetBasicBlockProfilingData(Handle<ArrayList> list) {
+ set_basic_block_profiling_data(*list);
+}
+
bool Heap::GCCallbackTuple::operator==(
const Heap::GCCallbackTuple& other) const {
return other.callback == callback && other.data == data;
@@ -415,7 +423,12 @@ bool Heap::CanExpandOldGeneration(size_t size) {
return memory_allocator()->Size() + size <= MaxReserved();
}
-bool Heap::HasBeenSetUp() {
+bool Heap::CanExpandOldGenerationBackground(size_t size) {
+ if (force_oom_) return false;
+ return memory_allocator()->Size() + size <= MaxReserved();
+}
+
+bool Heap::HasBeenSetUp() const {
// We will always have a new space when the heap is set up.
return new_space_ != nullptr;
}
@@ -470,8 +483,7 @@ void Heap::PrintShortHeapStatistics() {
"Read-only space, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
- read_only_space_->Size() / KB,
- read_only_space_->Available() / KB,
+ read_only_space_->Size() / KB, size_t{0},
read_only_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
"New space, used: %6zu KB"
@@ -522,8 +534,8 @@ void Heap::PrintShortHeapStatistics() {
"All spaces, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
- (this->SizeOfObjects() + ro_space->SizeOfObjects()) / KB,
- (this->Available() + ro_space->Available()) / KB,
+ (this->SizeOfObjects() + ro_space->Size()) / KB,
+ (this->Available()) / KB,
(this->CommittedMemory() + ro_space->CommittedMemory()) / KB);
PrintIsolate(isolate_,
"Unmapper buffering %zu chunks of committed: %6zu KB\n",
@@ -631,7 +643,8 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
std::stringstream stream;
stream << DICT(
MEMBER("name")
- << ESCAPE(GetSpaceName(static_cast<AllocationSpace>(space_index)))
+ << ESCAPE(BaseSpace::GetSpaceName(
+ static_cast<AllocationSpace>(space_index)))
<< ","
MEMBER("size") << space_stats.space_size() << ","
MEMBER("used_size") << space_stats.space_used_size() << ","
@@ -849,7 +862,6 @@ void Heap::GarbageCollectionPrologue() {
} else {
maximum_size_scavenges_ = 0;
}
- CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter();
if (FLAG_track_retaining_path) {
retainer_.clear();
@@ -859,6 +871,10 @@ void Heap::GarbageCollectionPrologue() {
memory_allocator()->unmapper()->PrepareForGC();
}
+void Heap::GarbageCollectionPrologueInSafepoint() {
+ CheckNewSpaceExpansionCriteria();
+}
+
size_t Heap::SizeOfObjects() {
size_t total = 0;
@@ -876,29 +892,6 @@ size_t Heap::UsedGlobalHandlesSize() {
return isolate_->global_handles()->UsedSize();
}
-// static
-const char* Heap::GetSpaceName(AllocationSpace space) {
- switch (space) {
- case NEW_SPACE:
- return "new_space";
- case OLD_SPACE:
- return "old_space";
- case MAP_SPACE:
- return "map_space";
- case CODE_SPACE:
- return "code_space";
- case LO_SPACE:
- return "large_object_space";
- case NEW_LO_SPACE:
- return "new_large_object_space";
- case CODE_LO_SPACE:
- return "code_large_object_space";
- case RO_SPACE:
- return "read_only_space";
- }
- UNREACHABLE();
-}
-
void Heap::MergeAllocationSitePretenuringFeedback(
const PretenuringFeedbackMap& local_pretenuring_feedback) {
AllocationSite site;
@@ -1651,8 +1644,12 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
if (collector == MARK_COMPACTOR) {
- size_t committed_memory_after = CommittedOldGenerationMemory();
+ // Calculate used memory first, then committed memory. Following code
+ // assumes that committed >= used, which might not hold when this is
+ // calculated in the wrong order and background threads allocate
+ // in-between.
size_t used_memory_after = OldGenerationSizeOfObjects();
+ size_t committed_memory_after = CommittedOldGenerationMemory();
MemoryReducer::Event event;
event.type = MemoryReducer::kMarkCompact;
event.time_ms = MonotonicallyIncreasingTimeInMs();
@@ -1681,7 +1678,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
isolate()->CountUsage(v8::Isolate::kForcedGC);
}
- collection_barrier_.Increment();
+ collection_barrier_.CollectionPerformed();
// Start incremental marking for the next cycle. We do this only for scavenger
// to avoid a loop where mark-compact causes another mark-compact.
@@ -1750,10 +1747,12 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
}
const size_t old_generation_space_available = OldGenerationSpaceAvailable();
- const size_t global_memory_available = GlobalMemoryAvailable();
+ const base::Optional<size_t> global_memory_available =
+ GlobalMemoryAvailable();
if (old_generation_space_available < new_space_->Capacity() ||
- global_memory_available < new_space_->Capacity()) {
+ (global_memory_available &&
+ *global_memory_available < new_space_->Capacity())) {
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
}
}
@@ -1955,6 +1954,9 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
#else
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
+ } else if (space == RO_SPACE) {
+ allocation = read_only_space()->AllocateRaw(
+ size, AllocationAlignment::kWordAligned);
} else {
// The deserializer will update the skip list.
allocation = paged_space(space)->AllocateRawUnaligned(size);
@@ -2013,21 +2015,29 @@ void Heap::EnsureFromSpaceIsCommitted() {
FatalProcessOutOfMemory("Committing semi space failed.");
}
-void Heap::CollectionBarrier::Increment() {
+void Heap::CollectionBarrier::CollectionPerformed() {
base::MutexGuard guard(&mutex_);
- requested_ = false;
+ gc_requested_ = false;
+ cond_.NotifyAll();
+}
+
+void Heap::CollectionBarrier::ShutdownRequested() {
+ base::MutexGuard guard(&mutex_);
+ shutdown_requested_ = true;
cond_.NotifyAll();
}
void Heap::CollectionBarrier::Wait() {
base::MutexGuard guard(&mutex_);
- if (!requested_) {
+ if (shutdown_requested_) return;
+
+ if (!gc_requested_) {
heap_->MemoryPressureNotification(MemoryPressureLevel::kCritical, false);
- requested_ = true;
+ gc_requested_ = true;
}
- while (requested_) {
+ while (gc_requested_ && !shutdown_requested_) {
cond_.Wait(&mutex_);
}
}
@@ -2062,9 +2072,6 @@ size_t Heap::PerformGarbageCollection(
base::Optional<SafepointScope> optional_safepoint_scope;
if (FLAG_local_heaps) {
optional_safepoint_scope.emplace(this);
- // Fill and reset all LABs
- safepoint()->IterateLocalHeaps(
- [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -2073,6 +2080,8 @@ size_t Heap::PerformGarbageCollection(
#endif
tracer()->StartInSafepoint();
+ GarbageCollectionPrologueInSafepoint();
+
EnsureFromSpaceIsCommitted();
size_t start_young_generation_size =
@@ -3000,10 +3009,12 @@ HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
#ifdef DEBUG
void VerifyNoNeedToClearSlots(Address start, Address end) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(start);
+ BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromAddress(start);
+ if (basic_chunk->InReadOnlySpace()) return;
+ MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
// TODO(ulan): Support verification of large pages.
if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
- Space* space = chunk->owner();
+ BaseSpace* space = chunk->owner();
if (static_cast<PagedSpace*>(space)->is_off_thread_space()) return;
space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
}
@@ -3024,6 +3035,13 @@ HeapObject Heap::CreateFillerObjectAt(ReadOnlyRoots roots, Address addr,
return filler;
}
+void Heap::CreateFillerObjectAtBackground(
+ Address addr, int size, ClearFreedMemoryMode clear_memory_mode) {
+ CreateFillerObjectAtImpl(ReadOnlyRoots(this), addr, size, clear_memory_mode);
+ // Do not verify whether slots are cleared here: the concurrent sweeper is not
+ // allowed to access the main thread's remembered set.
+}
+
HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
ClearRecordedSlots clear_slots_mode) {
if (size == 0) return HeapObject();
@@ -3059,7 +3077,7 @@ bool Heap::InOffThreadSpace(HeapObject heap_object) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
return false; // currently unsupported
#else
- Space* owner = MemoryChunk::FromHeapObject(heap_object)->owner();
+ BaseSpace* owner = BasicMemoryChunk::FromHeapObject(heap_object)->owner();
if (owner->identity() == OLD_SPACE) {
// TODO(leszeks): Should we exclude compaction spaces here?
return static_cast<PagedSpace*>(owner)->is_off_thread_space();
@@ -3078,12 +3096,12 @@ bool Heap::IsImmovable(HeapObject object) {
return true;
}
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
return chunk->NeverEvacuate() || IsLargeObject(object);
}
bool Heap::IsLargeObject(HeapObject object) {
- return MemoryChunk::FromHeapObject(object)->IsLargePage();
+ return BasicMemoryChunk::FromHeapObject(object)->IsLargePage();
}
#ifdef ENABLE_SLOW_DCHECKS
@@ -3112,8 +3130,9 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
namespace {
bool MayContainRecordedSlots(HeapObject object) {
// New space object do not have recorded slots.
- if (MemoryChunk::FromHeapObject(object)->InYoungGeneration()) return false;
- // Whitelist objects that definitely do not have pointers.
+ if (BasicMemoryChunk::FromHeapObject(object)->InYoungGeneration())
+ return false;
+ // Allowlist objects that definitely do not have pointers.
if (object.IsByteArray() || object.IsFixedDoubleArray()) return false;
// Conservatively return true for other objects.
return true;
@@ -3225,6 +3244,7 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
if (FLAG_enable_slow_asserts) {
// Make sure the stack or other roots (e.g., Handles) don't contain pointers
// to the original FixedArray (which is now the filler object).
+ SafepointScope scope(this);
LeftTrimmerVerifierRootVisitor root_visitor(object);
ReadOnlyRoots(this).Iterate(&root_visitor);
IterateRoots(&root_visitor, {});
@@ -3335,6 +3355,15 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
void Heap::MakeHeapIterable() {
mark_compact_collector()->EnsureSweepingCompleted();
+
+ MakeLocalHeapLabsIterable();
+}
+
+void Heap::MakeLocalHeapLabsIterable() {
+ if (!FLAG_local_heaps) return;
+ safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->MakeLinearAllocationAreaIterable();
+ });
}
namespace {
@@ -4079,7 +4108,7 @@ const char* Heap::GarbageCollectionReasonToString(
UNREACHABLE();
}
-bool Heap::Contains(HeapObject value) {
+bool Heap::Contains(HeapObject value) const {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true;
}
@@ -4096,7 +4125,7 @@ bool Heap::Contains(HeapObject value) {
new_lo_space_->Contains(value));
}
-bool Heap::InSpace(HeapObject value, AllocationSpace space) {
+bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
return false;
}
@@ -4123,7 +4152,7 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) {
UNREACHABLE();
}
-bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
+bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
return false;
}
@@ -4167,32 +4196,13 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
}
#ifdef VERIFY_HEAP
-class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
- public:
- explicit VerifyReadOnlyPointersVisitor(Heap* heap)
- : VerifyPointersVisitor(heap) {}
-
- protected:
- void VerifyPointers(HeapObject host, MaybeObjectSlot start,
- MaybeObjectSlot end) override {
- if (!host.is_null()) {
- CHECK(ReadOnlyHeap::Contains(host.map()));
- }
- VerifyPointersVisitor::VerifyPointers(host, start, end);
-
- for (MaybeObjectSlot current = start; current < end; ++current) {
- HeapObject heap_object;
- if ((*current)->GetHeapObject(&heap_object)) {
- CHECK(ReadOnlyHeap::Contains(heap_object));
- }
- }
- }
-};
-
void Heap::Verify() {
CHECK(HasBeenSetUp());
+ SafepointScope safepoint_scope(this);
HandleScope scope(isolate());
+ MakeLocalHeapLabsIterable();
+
// We have to wait here for the sweeper threads to have an iterable heap.
mark_compact_collector()->EnsureSweepingCompleted();
array_buffer_sweeper()->EnsureFinished();
@@ -4225,8 +4235,7 @@ void Heap::Verify() {
void Heap::VerifyReadOnlyHeap() {
CHECK(!read_only_space_->writable());
- VerifyReadOnlyPointersVisitor read_only_visitor(this);
- read_only_space_->Verify(isolate(), &read_only_visitor);
+ read_only_space_->Verify(isolate());
}
class SlotVerifyingVisitor : public ObjectVisitor {
@@ -4311,21 +4320,20 @@ class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
ObjectSlot target) override {
VisitPointer(host, target);
- if (FLAG_minor_mc) {
- VisitPointer(host, target);
- } else {
- // Keys are handled separately and should never appear in this set.
- CHECK(!InUntypedSet(key));
- Object k = *key;
- if (!ObjectInYoungGeneration(host) && ObjectInYoungGeneration(k)) {
- EphemeronHashTable table = EphemeronHashTable::cast(host);
- auto it = ephemeron_remembered_set_->find(table);
- CHECK(it != ephemeron_remembered_set_->end());
- int slot_index =
- EphemeronHashTable::SlotToIndex(table.address(), key.address());
- InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
- CHECK(it->second.find(entry.as_int()) != it->second.end());
- }
+#ifdef ENABLE_MINOR_MC
+ if (FLAG_minor_mc) return VisitPointer(host, target);
+#endif
+ // Keys are handled separately and should never appear in this set.
+ CHECK(!InUntypedSet(key));
+ Object k = *key;
+ if (!ObjectInYoungGeneration(host) && ObjectInYoungGeneration(k)) {
+ EphemeronHashTable table = EphemeronHashTable::cast(host);
+ auto it = ephemeron_remembered_set_->find(table);
+ CHECK(it != ephemeron_remembered_set_->end());
+ int slot_index =
+ EphemeronHashTable::SlotToIndex(table.address(), key.address());
+ InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
+ CHECK(it->second.find(entry.as_int()) != it->second.end());
}
}
@@ -4390,12 +4398,7 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
#ifdef DEBUG
void Heap::VerifyCountersAfterSweeping() {
- if (FLAG_local_heaps) {
- // Ensure heap is iterable
- safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
- local_heap->MakeLinearAllocationAreaIterable();
- });
- }
+ MakeLocalHeapLabsIterable();
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
@@ -4768,6 +4771,10 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
RoundDown<Page::kPageSize>(initial_semispace_size_);
}
+ if (FLAG_lazy_new_space_shrinking) {
+ initial_semispace_size_ = max_semi_space_size_;
+ }
+
// Initialize initial_old_space_size_.
{
initial_old_generation_size_ = kMaxInitialOldGenerationSize;
@@ -4993,6 +5000,11 @@ bool Heap::IsRetryOfFailedAllocation(LocalHeap* local_heap) {
return local_heap->allocation_failed_;
}
+void Heap::AlwaysAllocateAfterTearDownStarted() {
+ always_allocate_scope_count_++;
+ collection_barrier_.ShutdownRequested();
+}
+
Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
if (ShouldReduceMemory() || FLAG_stress_compaction) {
return Heap::HeapGrowingMode::kMinimal;
@@ -5009,12 +5021,15 @@ Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
return Heap::HeapGrowingMode::kDefault;
}
-size_t Heap::GlobalMemoryAvailable() {
- return UseGlobalMemoryScheduling()
- ? GlobalSizeOfObjects() < global_allocation_limit_
- ? global_allocation_limit_ - GlobalSizeOfObjects()
- : 0
- : new_space_->Capacity() + 1;
+base::Optional<size_t> Heap::GlobalMemoryAvailable() {
+ if (!UseGlobalMemoryScheduling()) return {};
+
+ size_t global_size = GlobalSizeOfObjects();
+
+ if (global_size < global_allocation_limit_)
+ return global_allocation_limit_ - global_size;
+
+ return 0;
}
double Heap::PercentToOldGenerationLimit() {
@@ -5097,10 +5112,12 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
}
size_t old_generation_space_available = OldGenerationSpaceAvailable();
- const size_t global_memory_available = GlobalMemoryAvailable();
+ const base::Optional<size_t> global_memory_available =
+ GlobalMemoryAvailable();
if (old_generation_space_available > new_space_->Capacity() &&
- (global_memory_available > new_space_->Capacity())) {
+ (!global_memory_available ||
+ global_memory_available > new_space_->Capacity())) {
return IncrementalMarkingLimit::kNoLimit;
}
if (ShouldOptimizeForMemoryUsage()) {
@@ -5112,7 +5129,7 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (old_generation_space_available == 0) {
return IncrementalMarkingLimit::kHardLimit;
}
- if (global_memory_available == 0) {
+ if (global_memory_available && *global_memory_available == 0) {
return IncrementalMarkingLimit::kHardLimit;
}
return IncrementalMarkingLimit::kSoftLimit;
@@ -5155,7 +5172,7 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
if (!Heap::IsImmovable(heap_object)) {
if (isolate()->serializer_enabled() ||
code_space_->first_page()->Contains(heap_object.address())) {
- MemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
+ BasicMemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
} else {
// Discard the first code allocation, which was on a page where it could
// be moved.
@@ -5298,13 +5315,15 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
DCHECK_NOT_NULL(ro_heap);
DCHECK_IMPLIES(read_only_space_ != nullptr,
read_only_space_ == ro_heap->read_only_space());
- space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
+ space_[RO_SPACE] = nullptr;
+ read_only_space_ = ro_heap->read_only_space();
}
void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
CHECK(V8_SHARED_RO_HEAP_BOOL);
delete read_only_space_;
- space_[RO_SPACE] = read_only_space_ = space;
+
+ read_only_space_ = space;
}
void Heap::SetUpSpaces() {
@@ -5432,6 +5451,10 @@ void Heap::NotifyDeserializationComplete() {
#endif // DEBUG
}
+ if (FLAG_stress_concurrent_allocation) {
+ StressConcurrentAllocatorTask::Schedule(isolate());
+ }
+
deserialization_complete_ = true;
}
@@ -5443,7 +5466,15 @@ void Heap::NotifyBootstrapComplete() {
}
}
-void Heap::NotifyOldGenerationExpansion() {
+void Heap::NotifyOldGenerationExpansion(AllocationSpace space,
+ MemoryChunk* chunk) {
+ // Pages created during bootstrapping may contain immortal immovable objects.
+ if (!deserialization_complete()) {
+ chunk->MarkNeverEvacuate();
+ }
+ if (space == CODE_SPACE || space == CODE_LO_SPACE) {
+ isolate()->AddCodeMemoryChunk(chunk);
+ }
const size_t kMemoryReducerActivationThreshold = 1 * MB;
if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
@@ -5494,6 +5525,14 @@ void Heap::RegisterExternallyReferencedObject(Address* location) {
void Heap::StartTearDown() {
SetGCState(TEAR_DOWN);
+
+ // Background threads may allocate and block until GC is performed. However
+ // this might never happen when the main thread tries to quit and doesn't
+ // process the event queue anymore. Avoid this deadlock by allowing all
+ // allocations after tear down was requested to make sure all background
+ // threads finish.
+ AlwaysAllocateAfterTearDownStarted();
+
#ifdef VERIFY_HEAP
// {StartTearDown} is called fairly early during Isolate teardown, so it's
// a good time to run heap verification (if requested), before starting to
@@ -5585,7 +5624,7 @@ void Heap::TearDown() {
tracer_.reset();
isolate()->read_only_heap()->OnHeapTearDown();
- space_[RO_SPACE] = read_only_space_ = nullptr;
+ read_only_space_ = nullptr;
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i];
space_[i] = nullptr;
@@ -5919,14 +5958,14 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
bool SkipObject(HeapObject object) override {
if (object.IsFreeSpaceOrFiller()) return true;
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
if (reachable_.count(chunk) == 0) return true;
return reachable_[chunk]->count(object) == 0;
}
private:
bool MarkAsReachable(HeapObject object) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
if (reachable_.count(chunk) == 0) {
reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
}
@@ -6008,7 +6047,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
Heap* heap_;
DisallowHeapAllocation no_allocation_;
- std::unordered_map<MemoryChunk*,
+ std::unordered_map<BasicMemoryChunk*,
std::unordered_set<HeapObject, Object::Hasher>*>
reachable_;
};
@@ -6016,6 +6055,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
HeapObjectIterator::HeapObjectIterator(
Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
: heap_(heap),
+ safepoint_scope_(std::make_unique<SafepointScope>(heap)),
filtering_(filtering),
filter_(nullptr),
space_iterator_(nullptr),
@@ -6794,7 +6834,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true;
}
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
heap_internals::MemoryChunk* slim_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
@@ -6803,7 +6843,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING),
slim_chunk->IsMarking());
- AllocationSpace identity = chunk->owner_identity();
+ AllocationSpace identity = chunk->owner()->identity();
// Generation consistency.
CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,
diff --git a/chromium/v8/src/heap/heap.h b/chromium/v8/src/heap/heap.h
index 888d174c02f..91214f40398 100644
--- a/chromium/v8/src/heap/heap.h
+++ b/chromium/v8/src/heap/heap.h
@@ -63,6 +63,7 @@ using v8::MemoryPressureLevel;
class AllocationObserver;
class ArrayBufferCollector;
class ArrayBufferSweeper;
+class BasicMemoryChunk;
class CodeLargeObjectSpace;
class ConcurrentMarking;
class GCIdleTimeHandler;
@@ -88,6 +89,7 @@ class Page;
class PagedSpace;
class ReadOnlyHeap;
class RootVisitor;
+class SafepointScope;
class ScavengeJob;
class Scavenger;
class ScavengerCollector;
@@ -449,7 +451,7 @@ class Heap {
void NotifyBootstrapComplete();
- void NotifyOldGenerationExpansion();
+ void NotifyOldGenerationExpansion(AllocationSpace space, MemoryChunk* chunk);
inline Address* NewSpaceAllocationTopAddress();
inline Address* NewSpaceAllocationLimitAddress();
@@ -458,8 +460,9 @@ class Heap {
// Move len non-weak tagged elements from src_slot to dst_slot of dst_object.
// The source and destination memory ranges can overlap.
- void MoveRange(HeapObject dst_object, ObjectSlot dst_slot,
- ObjectSlot src_slot, int len, WriteBarrierMode mode);
+ V8_EXPORT_PRIVATE void MoveRange(HeapObject dst_object, ObjectSlot dst_slot,
+ ObjectSlot src_slot, int len,
+ WriteBarrierMode mode);
// Copy len non-weak tagged elements from src_slot to dst_slot of dst_object.
// The source and destination memory ranges must not overlap.
@@ -474,6 +477,9 @@ class Heap {
V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt(
Address addr, int size, ClearRecordedSlots clear_slots_mode);
+ void CreateFillerObjectAtBackground(Address addr, int size,
+ ClearFreedMemoryMode clear_memory_mode);
+
template <typename T>
void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim);
@@ -663,6 +669,8 @@ class Heap {
void SetSerializedObjects(FixedArray objects);
void SetSerializedGlobalProxySizes(FixedArray sizes);
+ void SetBasicBlockProfilingData(Handle<ArrayList> list);
+
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
@@ -749,7 +757,7 @@ class Heap {
void TearDown();
// Returns whether SetUp has been called.
- bool HasBeenSetUp();
+ bool HasBeenSetUp() const;
// ===========================================================================
// Getters for spaces. =======================================================
@@ -769,9 +777,6 @@ class Heap {
inline PagedSpace* paged_space(int idx);
inline Space* space(int idx);
- // Returns name of the space.
- V8_EXPORT_PRIVATE static const char* GetSpaceName(AllocationSpace space);
-
// ===========================================================================
// Getters to other components. ==============================================
// ===========================================================================
@@ -779,6 +784,9 @@ class Heap {
GCTracer* tracer() { return tracer_.get(); }
MemoryAllocator* memory_allocator() { return memory_allocator_.get(); }
+ const MemoryAllocator* memory_allocator() const {
+ return memory_allocator_.get();
+ }
inline Isolate* isolate();
@@ -1056,7 +1064,7 @@ class Heap {
return local_embedder_heap_tracer_.get();
}
- void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+ V8_EXPORT_PRIVATE void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
EmbedderHeapTracer* GetEmbedderHeapTracer() const;
void RegisterExternallyReferencedObject(Address* location);
@@ -1107,15 +1115,15 @@ class Heap {
// Checks whether an address/object is in the non-read-only heap (including
// auxiliary area and unused area). Use IsValidHeapObject if checking both
// heaps is required.
- V8_EXPORT_PRIVATE bool Contains(HeapObject value);
+ V8_EXPORT_PRIVATE bool Contains(HeapObject value) const;
// Checks whether an address/object in a space.
// Currently used by tests, serialization and heap verification only.
- V8_EXPORT_PRIVATE bool InSpace(HeapObject value, AllocationSpace space);
+ V8_EXPORT_PRIVATE bool InSpace(HeapObject value, AllocationSpace space) const;
// Slow methods that can be used for verification as they can also be used
// with off-heap Addresses.
- V8_EXPORT_PRIVATE bool InSpaceSlow(Address addr, AllocationSpace space);
+ V8_EXPORT_PRIVATE bool InSpaceSlow(Address addr, AllocationSpace space) const;
static inline Heap* FromWritableHeapObject(HeapObject obj);
@@ -1539,12 +1547,15 @@ class Heap {
Heap* heap_;
base::Mutex mutex_;
base::ConditionVariable cond_;
- bool requested_;
+ bool gc_requested_;
+ bool shutdown_requested_;
public:
- explicit CollectionBarrier(Heap* heap) : heap_(heap), requested_(false) {}
+ explicit CollectionBarrier(Heap* heap)
+ : heap_(heap), gc_requested_(false), shutdown_requested_(false) {}
- void Increment();
+ void CollectionPerformed();
+ void ShutdownRequested();
void Wait();
};
@@ -1635,6 +1646,9 @@ class Heap {
// over all objects. May cause a GC.
void MakeHeapIterable();
+ // Ensure that LABs of local heaps are iterable.
+ void MakeLocalHeapLabsIterable();
+
// Performs garbage collection in a safepoint.
// Returns the number of freed global handles.
size_t PerformGarbageCollection(
@@ -1771,6 +1785,7 @@ class Heap {
// Code that should be run before and after each GC. Includes some
// reporting/verification activities when compiled with DEBUG set.
void GarbageCollectionPrologue();
+ void GarbageCollectionPrologueInSafepoint();
void GarbageCollectionEpilogue();
void GarbageCollectionEpilogueInSafepoint();
@@ -1851,11 +1866,14 @@ class Heap {
bool always_allocate() { return always_allocate_scope_count_ != 0; }
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
+ V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(size_t size);
bool ShouldExpandOldGenerationOnSlowAllocation(
LocalHeap* local_heap = nullptr);
bool IsRetryOfFailedAllocation(LocalHeap* local_heap);
+ void AlwaysAllocateAfterTearDownStarted();
+
HeapGrowingMode CurrentHeapGrowingMode();
double PercentToOldGenerationLimit();
@@ -1867,7 +1885,7 @@ class Heap {
return FLAG_global_gc_scheduling && local_embedder_heap_tracer();
}
- size_t GlobalMemoryAvailable();
+ base::Optional<size_t> GlobalMemoryAvailable();
void RecomputeLimits(GarbageCollector collector);
@@ -2269,6 +2287,7 @@ class Heap {
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class OffThreadHeap;
+ friend class OffThreadSpace;
friend class OldLargeObjectSpace;
template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase;
@@ -2389,12 +2408,12 @@ class CodePageCollectionMemoryModificationScope {
// was registered to be executable. It can be used by concurrent threads.
class CodePageMemoryModificationScope {
public:
- explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk);
+ explicit inline CodePageMemoryModificationScope(BasicMemoryChunk* chunk);
explicit inline CodePageMemoryModificationScope(Code object);
inline ~CodePageMemoryModificationScope();
private:
- MemoryChunk* chunk_;
+ BasicMemoryChunk* chunk_;
bool scope_active_;
// Disallow any GCs inside this scope, as a relocation of the underlying
@@ -2497,6 +2516,7 @@ class V8_EXPORT_PRIVATE HeapObjectIterator {
DISALLOW_HEAP_ALLOCATION(no_heap_allocation_)
Heap* heap_;
+ std::unique_ptr<SafepointScope> safepoint_scope_;
HeapObjectsFiltering filtering_;
HeapObjectsFilter* filter_;
// Space iterator for iterating all the spaces.
diff --git a/chromium/v8/src/heap/incremental-marking.cc b/chromium/v8/src/heap/incremental-marking.cc
index 8fb1492fe16..cb1eff27b27 100644
--- a/chromium/v8/src/heap/incremental-marking.cc
+++ b/chromium/v8/src/heap/incremental-marking.cc
@@ -93,6 +93,13 @@ void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
collector_->VisitObject(obj);
}
+void IncrementalMarking::MarkBlackBackground(HeapObject obj, int object_size) {
+ MarkBit mark_bit = atomic_marking_state()->MarkBitFrom(obj);
+ Marking::MarkBlack<AccessMode::ATOMIC>(mark_bit);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
+ IncrementLiveBytesBackground(chunk, static_cast<intptr_t>(object_size));
+}
+
void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
DCHECK(IsMarking());
DCHECK(MemoryChunk::FromHeapObject(from)->SweepingDone());
@@ -367,6 +374,11 @@ void IncrementalMarking::StartBlackAllocation() {
heap()->old_space()->MarkLinearAllocationAreaBlack();
heap()->map_space()->MarkLinearAllocationAreaBlack();
heap()->code_space()->MarkLinearAllocationAreaBlack();
+ if (FLAG_local_heaps) {
+ heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->MarkLinearAllocationAreaBlack();
+ });
+ }
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation started\n");
@@ -378,6 +390,11 @@ void IncrementalMarking::PauseBlackAllocation() {
heap()->old_space()->UnmarkLinearAllocationArea();
heap()->map_space()->UnmarkLinearAllocationArea();
heap()->code_space()->UnmarkLinearAllocationArea();
+ if (FLAG_local_heaps) {
+ heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->UnmarkLinearAllocationArea();
+ });
+ }
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation paused\n");
@@ -728,10 +745,13 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
}
}
}
+ // |deadline - heap_->MonotonicallyIncreasingTimeInMs()| could be negative,
+ // which means |local_tracer| won't do any actual tracing, so there is no
+ // need to check for |deadline <= heap_->MonotonicallyIncreasingTimeInMs()|.
bool remote_tracing_done =
local_tracer->Trace(deadline - heap_->MonotonicallyIncreasingTimeInMs());
double current = heap_->MonotonicallyIncreasingTimeInMs();
- local_tracer->SetEmbedderWorklistEmpty(true);
+ local_tracer->SetEmbedderWorklistEmpty(empty_worklist);
*duration_ms = current - start;
return (empty_worklist && remote_tracing_done)
? StepResult::kNoImmediateWork
@@ -790,6 +810,20 @@ void IncrementalMarking::Stop() {
SetState(STOPPED);
is_compacting_ = false;
FinishBlackAllocation();
+
+ if (FLAG_local_heaps) {
+ // Merge live bytes counters of background threads
+ for (auto pair : background_live_bytes_) {
+ MemoryChunk* memory_chunk = pair.first;
+ intptr_t live_bytes = pair.second;
+
+ if (live_bytes) {
+ marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
+ }
+ }
+
+ background_live_bytes_.clear();
+ }
}
@@ -958,24 +992,32 @@ StepResult IncrementalMarking::AdvanceWithDeadline(
void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
-#ifdef DEBUG
- // Enforce safepoint here such that background threads cannot allocate between
- // completing sweeping and VerifyCountersAfterSweeping().
- SafepointScope scope(heap());
-#endif
- if (collector_->sweeping_in_progress() &&
- (!FLAG_concurrent_sweeping ||
- !collector_->sweeper()->AreSweeperTasksRunning())) {
- collector_->EnsureSweepingCompleted();
+ if (ContinueConcurrentSweeping()) {
+ if (FLAG_stress_incremental_marking) {
+ // To start concurrent marking a bit earlier, support concurrent sweepers
+ // from main thread by sweeping some pages.
+ SupportConcurrentSweeping();
+ }
+ return;
}
- if (!collector_->sweeping_in_progress()) {
+
+ SafepointScope scope(heap());
+ collector_->EnsureSweepingCompleted();
+ DCHECK(!collector_->sweeping_in_progress());
#ifdef DEBUG
- heap_->VerifyCountersAfterSweeping();
-#else
- SafepointScope scope(heap());
+ heap_->VerifyCountersAfterSweeping();
#endif
- StartMarking();
- }
+ StartMarking();
+}
+
+bool IncrementalMarking::ContinueConcurrentSweeping() {
+ if (!collector_->sweeping_in_progress()) return false;
+ return FLAG_concurrent_sweeping &&
+ collector_->sweeper()->AreSweeperTasksRunning();
+}
+
+void IncrementalMarking::SupportConcurrentSweeping() {
+ collector_->sweeper()->SupportConcurrentSweeping();
}
size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
diff --git a/chromium/v8/src/heap/incremental-marking.h b/chromium/v8/src/heap/incremental-marking.h
index 7d06c086499..c507c022a70 100644
--- a/chromium/v8/src/heap/incremental-marking.h
+++ b/chromium/v8/src/heap/incremental-marking.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_INCREMENTAL_MARKING_H_
#define V8_HEAP_INCREMENTAL_MARKING_H_
+#include "src/base/platform/mutex.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-job.h"
#include "src/heap/mark-compact.h"
@@ -168,6 +169,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
StepOrigin step_origin);
void FinalizeSweeping();
+ bool ContinueConcurrentSweeping();
+ void SupportConcurrentSweeping();
StepResult Step(double max_step_size_in_ms, CompletionAction action,
StepOrigin step_origin);
@@ -205,6 +208,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// the concurrent marker.
void MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj);
+ void MarkBlackBackground(HeapObject obj, int object_size);
+
bool IsCompacting() { return IsMarking() && is_compacting_; }
void ProcessBlackAllocatedObject(HeapObject obj);
@@ -235,6 +240,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
bool IsBelowActivationThresholds() const;
+ void IncrementLiveBytesBackground(MemoryChunk* chunk, intptr_t by) {
+ base::MutexGuard guard(&background_live_bytes_mutex_);
+ background_live_bytes_[chunk] += by;
+ }
+
private:
class Observer : public AllocationObserver {
public:
@@ -337,6 +347,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
AtomicMarkingState atomic_marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
+ base::Mutex background_live_bytes_mutex_;
+ std::unordered_map<MemoryChunk*, intptr_t> background_live_bytes_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
} // namespace internal
diff --git a/chromium/v8/src/heap/large-spaces.cc b/chromium/v8/src/heap/large-spaces.cc
index 40363919497..0becaec35a5 100644
--- a/chromium/v8/src/heap/large-spaces.cc
+++ b/chromium/v8/src/heap/large-spaces.cc
@@ -9,8 +9,9 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/list.h"
#include "src/heap/marking.h"
+#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
-#include "src/heap/remembered-set-inl.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces-inl.h"
#include "src/logging/log.h"
@@ -134,7 +135,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
- heap()->NotifyOldGenerationExpansion();
+ heap()->NotifyOldGenerationExpansion(identity(), page);
AllocationStep(object_size, object.address(), object_size);
return object;
}
@@ -163,7 +164,7 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() {
}
LargePage* CodeLargeObjectSpace::FindPage(Address a) {
- const Address key = MemoryChunk::FromAddress(a)->address();
+ const Address key = BasicMemoryChunk::FromAddress(a)->address();
auto it = chunk_map_.find(key);
if (it != chunk_map_.end()) {
LargePage* page = it->second;
@@ -223,7 +224,8 @@ void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
page_count_++;
memory_chunk_list_.PushBack(page);
page->set_owner(this);
- page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ page->SetOldGenerationPageFlags(!is_off_thread() &&
+ heap()->incremental_marking()->IsMarking());
}
void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
@@ -273,7 +275,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
bool LargeObjectSpace::Contains(HeapObject object) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
bool owned = (chunk->owner() == this);
@@ -514,7 +516,6 @@ AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
OldLargeObjectSpace::AddPage(page, object_size);
InsertChunkMapEntries(page);
- heap()->isolate()->AddCodeMemoryChunk(page);
}
void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
diff --git a/chromium/v8/src/heap/list.h b/chromium/v8/src/heap/list.h
index 5ab9a03610f..a8a75045074 100644
--- a/chromium/v8/src/heap/list.h
+++ b/chromium/v8/src/heap/list.h
@@ -68,8 +68,8 @@ class List {
element->list_node().set_next(nullptr);
}
- bool Contains(T* element) {
- T* it = front_;
+ bool Contains(T* element) const {
+ const T* it = front_;
while (it) {
if (it == element) return true;
it = it->list_node().next();
@@ -77,11 +77,14 @@ class List {
return false;
}
- bool Empty() { return !front_ && !back_; }
+ bool Empty() const { return !front_ && !back_; }
T* front() { return front_; }
T* back() { return back_; }
+ const T* front() const { return front_; }
+ const T* back() const { return back_; }
+
private:
void AddFirstElement(T* element) {
DCHECK(!back_);
@@ -129,6 +132,9 @@ class ListNode {
T* next() { return next_; }
T* prev() { return prev_; }
+ const T* next() const { return next_; }
+ const T* prev() const { return prev_; }
+
void Initialize() {
next_ = nullptr;
prev_ = nullptr;
diff --git a/chromium/v8/src/heap/local-allocator.h b/chromium/v8/src/heap/local-allocator.h
index ba8cd2e610b..9e4d5f688cb 100644
--- a/chromium/v8/src/heap/local-allocator.h
+++ b/chromium/v8/src/heap/local-allocator.h
@@ -7,6 +7,8 @@
#include "src/common/globals.h"
#include "src/heap/heap.h"
+#include "src/heap/new-spaces.h"
+#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
namespace v8 {
diff --git a/chromium/v8/src/heap/local-heap.cc b/chromium/v8/src/heap/local-heap.cc
index 3aea67411dd..55076bee25d 100644
--- a/chromium/v8/src/heap/local-heap.cc
+++ b/chromium/v8/src/heap/local-heap.cc
@@ -107,5 +107,13 @@ void LocalHeap::MakeLinearAllocationAreaIterable() {
old_space_allocator_.MakeLinearAllocationAreaIterable();
}
+void LocalHeap::MarkLinearAllocationAreaBlack() {
+ old_space_allocator_.MarkLinearAllocationAreaBlack();
+}
+
+void LocalHeap::UnmarkLinearAllocationArea() {
+ old_space_allocator_.UnmarkLinearAllocationArea();
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/local-heap.h b/chromium/v8/src/heap/local-heap.h
index 31c66bc2be5..8406c39042d 100644
--- a/chromium/v8/src/heap/local-heap.h
+++ b/chromium/v8/src/heap/local-heap.h
@@ -48,6 +48,17 @@ class LocalHeap {
ConcurrentAllocator* old_space_allocator() { return &old_space_allocator_; }
+ // Mark/Unmark linear allocation areas black. Used for black allocation.
+ void MarkLinearAllocationAreaBlack();
+ void UnmarkLinearAllocationArea();
+
+ // Give up linear allocation areas. Used for mark-compact GC.
+ void FreeLinearAllocationArea();
+
+ // Create filler object in linear allocation areas. Verifying requires
+ // iterable heap.
+ void MakeLinearAllocationAreaIterable();
+
private:
enum class ThreadState {
// Threads in this state need to be stopped in a safepoint.
@@ -68,9 +79,6 @@ class LocalHeap {
void EnterSafepoint();
- void FreeLinearAllocationArea();
- void MakeLinearAllocationAreaIterable();
-
Heap* heap_;
base::Mutex state_mutex_;
@@ -107,6 +115,19 @@ class ParkedScope {
LocalHeap* local_heap_;
};
+class ParkedMutexGuard {
+ base::Mutex* guard_;
+
+ public:
+ explicit ParkedMutexGuard(LocalHeap* local_heap, base::Mutex* guard)
+ : guard_(guard) {
+ ParkedScope scope(local_heap);
+ guard_->Lock();
+ }
+
+ ~ParkedMutexGuard() { guard_->Unlock(); }
+};
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/mark-compact-inl.h b/chromium/v8/src/heap/mark-compact-inl.h
index 7c06286f97a..e554601b4a4 100644
--- a/chromium/v8/src/heap/mark-compact-inl.h
+++ b/chromium/v8/src/heap/mark-compact-inl.h
@@ -65,7 +65,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, ObjectSlot slot,
void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
HeapObject target) {
- MemoryChunk* target_page = MemoryChunk::FromHeapObject(target);
+ BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(target);
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
@@ -76,7 +76,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
HeapObjectSlot slot, HeapObject target) {
- MemoryChunk* target_page = MemoryChunk::FromHeapObject(target);
+ BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(target);
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>()) {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(source_page,
slot.address());
@@ -215,7 +215,7 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// Note that we know that we are at a one word filler when
// object_start + object_size - kTaggedSize == object_start.
if (addr != end) {
- DCHECK_EQ(chunk_, MemoryChunk::FromAddress(end));
+ DCHECK_EQ(chunk_, BasicMemoryChunk::FromAddress(end));
uint32_t end_mark_bit_index = chunk_->AddressToMarkbitIndex(end);
unsigned int end_cell_index =
end_mark_bit_index >> Bitmap::kBitsPerCellLog2;
diff --git a/chromium/v8/src/heap/mark-compact.cc b/chromium/v8/src/heap/mark-compact.cc
index 7b609ab22a4..4e594c7f5d1 100644
--- a/chromium/v8/src/heap/mark-compact.cc
+++ b/chromium/v8/src/heap/mark-compact.cc
@@ -16,6 +16,7 @@
#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/code-object-registry.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"
@@ -31,6 +32,7 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/read-only-spaces.h"
+#include "src/heap/safepoint.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
@@ -868,6 +870,13 @@ void MarkCompactCollector::Prepare() {
space = spaces.Next()) {
space->PrepareForMarkCompact();
}
+
+ if (FLAG_local_heaps) {
+ // Fill and reset all background thread LABs
+ heap_->safepoint()->IterateLocalHeaps(
+ [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
+ }
+
heap()->account_external_memory_concurrently_freed();
}
@@ -1223,7 +1232,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) {
if (value->IsStrongOrWeak()) {
- MemoryChunk* p = MemoryChunk::FromAddress(value.ptr());
+ BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
if (p->InYoungGeneration()) {
DCHECK_IMPLIES(
p->IsToPage(),
@@ -2713,8 +2722,6 @@ static inline SlotCallbackResult UpdateStrongSlot(TSlot slot) {
// It does not expect to encounter pointers to dead objects.
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
- PointersUpdatingVisitor() {}
-
void VisitPointer(HeapObject host, ObjectSlot p) override {
UpdateStrongSlotInternal(p);
}
@@ -4410,7 +4417,7 @@ class YoungGenerationRecordMigratedSlotVisitor final
inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) final {
if (value->IsStrongOrWeak()) {
- MemoryChunk* p = MemoryChunk::FromAddress(value.ptr());
+ BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
if (p->InYoungGeneration()) {
DCHECK_IMPLIES(
p->IsToPage(),
@@ -4712,6 +4719,7 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
PageRange(new_space->first_allocatable_address(), new_space->top())) {
new_space_evacuation_pages_.push_back(p);
}
+
new_space->Flip();
new_space->ResetLinearAllocationArea();
@@ -4984,6 +4992,10 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
&root_visitor, &IsUnmarkedObjectForYoungGeneration);
DrainMarkingWorklist();
}
+
+ if (FLAG_minor_mc_trace_fragmentation) {
+ TraceFragmentation();
+ }
}
void MinorMarkCompactCollector::DrainMarkingWorklist() {
@@ -4999,6 +5011,57 @@ void MinorMarkCompactCollector::DrainMarkingWorklist() {
DCHECK(marking_worklist.IsLocalEmpty());
}
+void MinorMarkCompactCollector::TraceFragmentation() {
+ NewSpace* new_space = heap()->new_space();
+ const std::array<size_t, 4> free_size_class_limits = {0, 1024, 2048, 4096};
+ size_t free_bytes_of_class[free_size_class_limits.size()] = {0};
+ size_t live_bytes = 0;
+ size_t allocatable_bytes = 0;
+ for (Page* p :
+ PageRange(new_space->first_allocatable_address(), new_space->top())) {
+ Address free_start = p->area_start();
+ for (auto object_and_size : LiveObjectRange<kGreyObjects>(
+ p, non_atomic_marking_state()->bitmap(p))) {
+ HeapObject const object = object_and_size.first;
+ Address free_end = object.address();
+ if (free_end != free_start) {
+ size_t free_bytes = free_end - free_start;
+ int free_bytes_index = 0;
+ for (auto free_size_class_limit : free_size_class_limits) {
+ if (free_bytes >= free_size_class_limit) {
+ free_bytes_of_class[free_bytes_index] += free_bytes;
+ }
+ free_bytes_index++;
+ }
+ }
+ Map map = object.synchronized_map();
+ int size = object.SizeFromMap(map);
+ live_bytes += size;
+ free_start = free_end + size;
+ }
+ size_t area_end =
+ p->Contains(new_space->top()) ? new_space->top() : p->area_end();
+ if (free_start != area_end) {
+ size_t free_bytes = area_end - free_start;
+ int free_bytes_index = 0;
+ for (auto free_size_class_limit : free_size_class_limits) {
+ if (free_bytes >= free_size_class_limit) {
+ free_bytes_of_class[free_bytes_index] += free_bytes;
+ }
+ free_bytes_index++;
+ }
+ }
+ allocatable_bytes += area_end - p->area_start();
+ CHECK_EQ(allocatable_bytes, live_bytes + free_bytes_of_class[0]);
+ }
+ PrintIsolate(
+ isolate(),
+ "Minor Mark-Compact Fragmentation: allocatable_bytes=%zu live_bytes=%zu "
+ "free_bytes=%zu free_bytes_1K=%zu free_bytes_2K=%zu free_bytes_4K=%zu\n",
+ allocatable_bytes, live_bytes, free_bytes_of_class[0],
+ free_bytes_of_class[1], free_bytes_of_class[2], free_bytes_of_class[3]);
+}
+
void MinorMarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
base::MutexGuard guard(heap()->relocation_mutex());
diff --git a/chromium/v8/src/heap/mark-compact.h b/chromium/v8/src/heap/mark-compact.h
index 30723ede385..35a5a85e91f 100644
--- a/chromium/v8/src/heap/mark-compact.h
+++ b/chromium/v8/src/heap/mark-compact.h
@@ -247,8 +247,10 @@ class MarkCompactCollectorBase {
class MinorMarkingState final
: public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
public:
- ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
- return chunk->young_generation_bitmap<AccessMode::ATOMIC>();
+ ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
+ const BasicMemoryChunk* chunk) const {
+ return MemoryChunk::cast(chunk)
+ ->young_generation_bitmap<AccessMode::ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
@@ -269,8 +271,9 @@ class MinorNonAtomicMarkingState final
AccessMode::NON_ATOMIC> {
public:
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
- const MemoryChunk* chunk) const {
- return chunk->young_generation_bitmap<AccessMode::NON_ATOMIC>();
+ const BasicMemoryChunk* chunk) const {
+ return MemoryChunk::cast(chunk)
+ ->young_generation_bitmap<AccessMode::NON_ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
@@ -293,25 +296,26 @@ class MinorNonAtomicMarkingState final
class MajorMarkingState final
: public MarkingStateBase<MajorMarkingState, AccessMode::ATOMIC> {
public:
- ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
+ ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
+ const BasicMemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
- MemoryChunk::kMarkBitmapOffset);
+ BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
// Concurrent marking uses local live bytes so we may do these accesses
// non-atomically.
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- chunk->live_byte_count_ += by;
+ chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
}
intptr_t live_bytes(MemoryChunk* chunk) const {
- return chunk->live_byte_count_;
+ return chunk->live_byte_count_.load(std::memory_order_relaxed);
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
- chunk->live_byte_count_ = value;
+ chunk->live_byte_count_.store(value, std::memory_order_relaxed);
}
};
@@ -320,16 +324,16 @@ class MajorMarkingState final
class MajorAtomicMarkingState final
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
public:
- ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
+ ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
+ const BasicMemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
- MemoryChunk::kMarkBitmapOffset);
+ BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- std::atomic_fetch_add(
- reinterpret_cast<std::atomic<intptr_t>*>(&chunk->live_byte_count_), by);
+ chunk->live_byte_count_.fetch_add(by);
}
};
@@ -338,23 +342,23 @@ class MajorNonAtomicMarkingState final
AccessMode::NON_ATOMIC> {
public:
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
- const MemoryChunk* chunk) const {
+ const BasicMemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
- MemoryChunk::kMarkBitmapOffset);
+ BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- chunk->live_byte_count_ += by;
+ chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
}
intptr_t live_bytes(MemoryChunk* chunk) const {
- return chunk->live_byte_count_;
+ return chunk->live_byte_count_.load(std::memory_order_relaxed);
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
- chunk->live_byte_count_ = value;
+ chunk->live_byte_count_.store(value, std::memory_order_relaxed);
}
};
@@ -515,9 +519,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
HeapObjectSlot slot, HeapObject target);
void RecordLiveSlotsOnPage(Page* page);
- void UpdateSlots(SlotsBuffer* buffer);
- void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
-
bool is_compacting() const { return compacting_; }
// Ensures that sweeping is finished.
@@ -567,7 +568,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void VerifyMarking();
#ifdef VERIFY_HEAP
- void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
void VerifyMarkbitsAreDirty(ReadOnlySpace* space);
void VerifyMarkbitsAreClean(PagedSpace* space);
@@ -856,6 +856,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
V8_INLINE void MarkRootObject(HeapObject obj);
void DrainMarkingWorklist() override;
+ void TraceFragmentation();
void ClearNonLiveReferences() override;
void EvacuatePrologue() override;
diff --git a/chromium/v8/src/heap/marking-visitor.h b/chromium/v8/src/heap/marking-visitor.h
index a4c2a9f522c..3010445eefa 100644
--- a/chromium/v8/src/heap/marking-visitor.h
+++ b/chromium/v8/src/heap/marking-visitor.h
@@ -73,11 +73,11 @@ template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
public:
V8_INLINE MarkBit MarkBitFrom(HeapObject obj) {
- return MarkBitFrom(MemoryChunk::FromHeapObject(obj), obj.ptr());
+ return MarkBitFrom(BasicMemoryChunk::FromHeapObject(obj), obj.ptr());
}
// {addr} may be tagged or aligned.
- V8_INLINE MarkBit MarkBitFrom(MemoryChunk* p, Address addr) {
+ V8_INLINE MarkBit MarkBitFrom(BasicMemoryChunk* p, Address addr) {
return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
p->AddressToMarkbitIndex(addr));
}
@@ -115,10 +115,11 @@ class MarkingStateBase {
}
V8_INLINE bool GreyToBlack(HeapObject obj) {
- MemoryChunk* p = MemoryChunk::FromHeapObject(obj);
- MarkBit markbit = MarkBitFrom(p, obj.address());
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(obj);
+ MarkBit markbit = MarkBitFrom(chunk, obj.address());
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
- static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj.Size());
+ static_cast<ConcreteState*>(this)->IncrementLiveBytes(
+ MemoryChunk::cast(chunk), obj.Size());
return true;
}
diff --git a/chromium/v8/src/heap/memory-allocator.cc b/chromium/v8/src/heap/memory-allocator.cc
new file mode 100644
index 00000000000..f1047e2248f
--- /dev/null
+++ b/chromium/v8/src/heap/memory-allocator.cc
@@ -0,0 +1,778 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/memory-allocator.h"
+
+#include <cinttypes>
+
+#include "src/base/address-region.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
+#include "src/heap/gc-tracer.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/read-only-spaces.h"
+#include "src/logging/log.h"
+
+namespace v8 {
+namespace internal {
+
+static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
+ LAZY_INSTANCE_INITIALIZER;
+
+Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
+ base::MutexGuard guard(&mutex_);
+ auto it = recently_freed_.find(code_range_size);
+ if (it == recently_freed_.end() || it->second.empty()) {
+ return reinterpret_cast<Address>(GetRandomMmapAddr());
+ }
+ Address result = it->second.back();
+ it->second.pop_back();
+ return result;
+}
+
+void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
+ size_t code_range_size) {
+ base::MutexGuard guard(&mutex_);
+ recently_freed_[code_range_size].push_back(code_range_start);
+}
+
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+//
+
+MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
+ size_t code_range_size)
+ : isolate_(isolate),
+ data_page_allocator_(isolate->page_allocator()),
+ code_page_allocator_(nullptr),
+ capacity_(RoundUp(capacity, Page::kPageSize)),
+ size_(0),
+ size_executable_(0),
+ lowest_ever_allocated_(static_cast<Address>(-1ll)),
+ highest_ever_allocated_(kNullAddress),
+ unmapper_(isolate->heap(), this) {
+ InitializeCodePageAllocator(data_page_allocator_, code_range_size);
+}
+
+void MemoryAllocator::InitializeCodePageAllocator(
+ v8::PageAllocator* page_allocator, size_t requested) {
+ DCHECK_NULL(code_page_allocator_instance_.get());
+
+ code_page_allocator_ = page_allocator;
+
+ if (requested == 0) {
+ if (!isolate_->RequiresCodeRange()) return;
+ // When a target requires the code range feature, we put all code objects
+ // in a kMaximalCodeRangeSize range of virtual address space, so that
+ // they can call each other with near calls.
+ requested = kMaximalCodeRangeSize;
+ } else if (requested <= kMinimumCodeRangeSize) {
+ requested = kMinimumCodeRangeSize;
+ }
+
+ const size_t reserved_area =
+ kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
+ if (requested < (kMaximalCodeRangeSize - reserved_area)) {
+ requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
+ // Fullfilling both reserved pages requirement and huge code area
+ // alignments is not supported (requires re-implementation).
+ DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
+ }
+ DCHECK(!isolate_->RequiresCodeRange() || requested <= kMaximalCodeRangeSize);
+
+ Address hint =
+ RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
+ page_allocator->AllocatePageSize());
+ VirtualMemory reservation(
+ page_allocator, requested, reinterpret_cast<void*>(hint),
+ Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
+ if (!reservation.IsReserved()) {
+ V8::FatalProcessOutOfMemory(isolate_,
+ "CodeRange setup: allocate virtual memory");
+ }
+ code_range_ = reservation.region();
+ isolate_->AddCodeRange(code_range_.begin(), code_range_.size());
+
+ // We are sure that we have mapped a block of requested addresses.
+ DCHECK_GE(reservation.size(), requested);
+ Address base = reservation.address();
+
+ // On some platforms, specifically Win64, we need to reserve some pages at
+ // the beginning of an executable space. See
+ // https://cs.chromium.org/chromium/src/components/crash/content/
+ // app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
+ // for details.
+ if (reserved_area > 0) {
+ if (!reservation.SetPermissions(base, reserved_area,
+ PageAllocator::kReadWrite))
+ V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
+
+ base += reserved_area;
+ }
+ Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
+ size_t size =
+ RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
+ MemoryChunk::kPageSize);
+ DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
+
+ LOG(isolate_,
+ NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
+ requested));
+
+ code_reservation_ = std::move(reservation);
+ code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
+ page_allocator, aligned_base, size,
+ static_cast<size_t>(MemoryChunk::kAlignment));
+ code_page_allocator_ = code_page_allocator_instance_.get();
+}
+
+void MemoryAllocator::TearDown() {
+ unmapper()->TearDown();
+
+ // Check that spaces were torn down before MemoryAllocator.
+ DCHECK_EQ(size_, 0u);
+ // TODO(gc) this will be true again when we fix FreeMemory.
+ // DCHECK_EQ(0, size_executable_);
+ capacity_ = 0;
+
+ if (last_chunk_.IsReserved()) {
+ last_chunk_.Free();
+ }
+
+ if (code_page_allocator_instance_.get()) {
+ DCHECK(!code_range_.is_empty());
+ code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
+ code_range_.size());
+ code_range_ = base::AddressRegion();
+ code_page_allocator_instance_.reset();
+ }
+ code_page_allocator_ = nullptr;
+ data_page_allocator_ = nullptr;
+}
+
+class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
+ public:
+ explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
+ : CancelableTask(isolate),
+ unmapper_(unmapper),
+ tracer_(isolate->heap()->tracer()) {}
+
+ private:
+ void RunInternal() override {
+ TRACE_BACKGROUND_GC(tracer_,
+ GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
+ unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
+ unmapper_->active_unmapping_tasks_--;
+ unmapper_->pending_unmapping_tasks_semaphore_.Signal();
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(unmapper_->heap_->isolate(),
+ "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
+ }
+ }
+
+ Unmapper* const unmapper_;
+ GCTracer* const tracer_;
+ DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
+};
+
+void MemoryAllocator::Unmapper::FreeQueuedChunks() {
+ if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
+ if (!MakeRoomForNewTasks()) {
+ // kMaxUnmapperTasks are already running. Avoid creating any more.
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(heap_->isolate(),
+ "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
+ kMaxUnmapperTasks);
+ }
+ return;
+ }
+ auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(heap_->isolate(),
+ "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
+ task->id());
+ }
+ DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
+ DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
+ DCHECK_GE(active_unmapping_tasks_, 0);
+ active_unmapping_tasks_++;
+ task_ids_[pending_unmapping_tasks_++] = task->id();
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
+ } else {
+ PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
+ }
+}
+
+void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
+ for (int i = 0; i < pending_unmapping_tasks_; i++) {
+ if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
+ TryAbortResult::kTaskAborted) {
+ pending_unmapping_tasks_semaphore_.Wait();
+ }
+ }
+ pending_unmapping_tasks_ = 0;
+ active_unmapping_tasks_ = 0;
+
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(
+ heap_->isolate(),
+ "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
+ }
+}
+
+void MemoryAllocator::Unmapper::PrepareForGC() {
+ // Free non-regular chunks because they cannot be re-used.
+ PerformFreeMemoryOnQueuedNonRegularChunks();
+}
+
+void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
+ CancelAndWaitForPendingTasks();
+ PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
+}
+
+bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
+ DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
+
+ if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
+ // All previous unmapping tasks have been run to completion.
+ // Finalize those tasks to make room for new ones.
+ CancelAndWaitForPendingTasks();
+ }
+ return pending_unmapping_tasks_ != kMaxUnmapperTasks;
+}
+
+void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
+ MemoryChunk* chunk = nullptr;
+ while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
+ allocator_->PerformFreeMemory(chunk);
+ }
+}
+
+template <MemoryAllocator::Unmapper::FreeMode mode>
+void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
+ MemoryChunk* chunk = nullptr;
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(
+ heap_->isolate(),
+ "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
+ NumberOfChunks());
+ }
+ // Regular chunks.
+ while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
+ bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
+ allocator_->PerformFreeMemory(chunk);
+ if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
+ }
+ if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
+ // The previous loop uncommitted any pages marked as pooled and added them
+ // to the pooled list. In case of kReleasePooled we need to free them
+ // though.
+ while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
+ allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
+ }
+ }
+ PerformFreeMemoryOnQueuedNonRegularChunks();
+}
+
+void MemoryAllocator::Unmapper::TearDown() {
+ CHECK_EQ(0, pending_unmapping_tasks_);
+ PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
+ for (int i = 0; i < kNumberOfChunkQueues; i++) {
+ DCHECK(chunks_[i].empty());
+ }
+}
+
+size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
+ base::MutexGuard guard(&mutex_);
+ return chunks_[kRegular].size() + chunks_[kNonRegular].size();
+}
+
+int MemoryAllocator::Unmapper::NumberOfChunks() {
+ base::MutexGuard guard(&mutex_);
+ size_t result = 0;
+ for (int i = 0; i < kNumberOfChunkQueues; i++) {
+ result += chunks_[i].size();
+ }
+ return static_cast<int>(result);
+}
+
+size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
+ base::MutexGuard guard(&mutex_);
+
+ size_t sum = 0;
+ // kPooled chunks are already uncommited. We only have to account for
+ // kRegular and kNonRegular chunks.
+ for (auto& chunk : chunks_[kRegular]) {
+ sum += chunk->size();
+ }
+ for (auto& chunk : chunks_[kNonRegular]) {
+ sum += chunk->size();
+ }
+ return sum;
+}
+
+bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
+ Address base = reservation->address();
+ size_t size = reservation->size();
+ if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
+ return false;
+ }
+ UpdateAllocatedSpaceLimits(base, base + size);
+ return true;
+}
+
+bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
+ size_t size = reservation->size();
+ if (!reservation->SetPermissions(reservation->address(), size,
+ PageAllocator::kNoAccess)) {
+ return false;
+ }
+ return true;
+}
+
+void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
+ Address base, size_t size) {
+ CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
+}
+
+Address MemoryAllocator::AllocateAlignedMemory(
+ size_t reserve_size, size_t commit_size, size_t alignment,
+ Executability executable, void* hint, VirtualMemory* controller) {
+ v8::PageAllocator* page_allocator = this->page_allocator(executable);
+ DCHECK(commit_size <= reserve_size);
+ VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
+ if (!reservation.IsReserved()) return kNullAddress;
+ Address base = reservation.address();
+ size_ += reservation.size();
+
+ if (executable == EXECUTABLE) {
+ if (!CommitExecutableMemory(&reservation, base, commit_size,
+ reserve_size)) {
+ base = kNullAddress;
+ }
+ } else {
+ if (reservation.SetPermissions(base, commit_size,
+ PageAllocator::kReadWrite)) {
+ UpdateAllocatedSpaceLimits(base, base + commit_size);
+ } else {
+ base = kNullAddress;
+ }
+ }
+
+ if (base == kNullAddress) {
+ // Failed to commit the body. Free the mapping and any partially committed
+ // regions inside it.
+ reservation.Free();
+ size_ -= reserve_size;
+ return kNullAddress;
+ }
+
+ *controller = std::move(reservation);
+ return base;
+}
+
+V8_EXPORT_PRIVATE BasicMemoryChunk* MemoryAllocator::AllocateBasicChunk(
+ size_t reserve_area_size, size_t commit_area_size, Executability executable,
+ BaseSpace* owner) {
+ DCHECK_LE(commit_area_size, reserve_area_size);
+
+ size_t chunk_size;
+ Heap* heap = isolate_->heap();
+ Address base = kNullAddress;
+ VirtualMemory reservation;
+ Address area_start = kNullAddress;
+ Address area_end = kNullAddress;
+ void* address_hint =
+ AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
+
+ //
+ // MemoryChunk layout:
+ //
+ // Executable
+ // +----------------------------+<- base aligned with MemoryChunk::kAlignment
+ // | Header |
+ // +----------------------------+<- base + CodePageGuardStartOffset
+ // | Guard |
+ // +----------------------------+<- area_start_
+ // | Area |
+ // +----------------------------+<- area_end_ (area_start + commit_area_size)
+ // | Committed but not used |
+ // +----------------------------+<- aligned at OS page boundary
+ // | Reserved but not committed |
+ // +----------------------------+<- aligned at OS page boundary
+ // | Guard |
+ // +----------------------------+<- base + chunk_size
+ //
+ // Non-executable
+ // +----------------------------+<- base aligned with MemoryChunk::kAlignment
+ // | Header |
+ // +----------------------------+<- area_start_ (base + area_start_)
+ // | Area |
+ // +----------------------------+<- area_end_ (area_start + commit_area_size)
+ // | Committed but not used |
+ // +----------------------------+<- aligned at OS page boundary
+ // | Reserved but not committed |
+ // +----------------------------+<- base + chunk_size
+ //
+
+ if (executable == EXECUTABLE) {
+ chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
+ reserve_area_size +
+ MemoryChunkLayout::CodePageGuardSize(),
+ GetCommitPageSize());
+
+ // Size of header (not executable) plus area (executable).
+ size_t commit_size = ::RoundUp(
+ MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
+ GetCommitPageSize());
+ base =
+ AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
+ executable, address_hint, &reservation);
+ if (base == kNullAddress) return nullptr;
+ // Update executable memory size.
+ size_executable_ += reservation.size();
+
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
+ ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
+ commit_area_size, kZapValue);
+ }
+
+ area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
+ area_end = area_start + commit_area_size;
+ } else {
+ chunk_size = ::RoundUp(
+ MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
+ GetCommitPageSize());
+ size_t commit_size = ::RoundUp(
+ MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
+ GetCommitPageSize());
+ base =
+ AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
+ executable, address_hint, &reservation);
+
+ if (base == kNullAddress) return nullptr;
+
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(
+ base,
+ MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
+ kZapValue);
+ }
+
+ area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
+ area_end = area_start + commit_area_size;
+ }
+
+ // Use chunk_size for statistics because we assume that treat reserved but
+ // not-yet committed memory regions of chunks as allocated.
+ LOG(isolate_,
+ NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
+
+ // We cannot use the last chunk in the address space because we would
+ // overflow when comparing top and limit if this chunk is used for a
+ // linear allocation area.
+ if ((base + chunk_size) == 0u) {
+ CHECK(!last_chunk_.IsReserved());
+ last_chunk_ = std::move(reservation);
+ UncommitMemory(&last_chunk_);
+ size_ -= chunk_size;
+ if (executable == EXECUTABLE) {
+ size_executable_ -= chunk_size;
+ }
+ CHECK(last_chunk_.IsReserved());
+ return AllocateBasicChunk(reserve_area_size, commit_area_size, executable,
+ owner);
+ }
+
+ BasicMemoryChunk* chunk =
+ BasicMemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
+ owner, std::move(reservation));
+
+ return chunk;
+}
+
+MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
+ size_t commit_area_size,
+ Executability executable,
+ BaseSpace* owner) {
+ BasicMemoryChunk* basic_chunk = AllocateBasicChunk(
+ reserve_area_size, commit_area_size, executable, owner);
+
+ if (basic_chunk == nullptr) return nullptr;
+
+ MemoryChunk* chunk =
+ MemoryChunk::Initialize(basic_chunk, isolate_->heap(), executable);
+
+ if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
+ return chunk;
+}
+
+void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
+ Address start_free,
+ size_t bytes_to_free,
+ Address new_area_end) {
+ VirtualMemory* reservation = chunk->reserved_memory();
+ DCHECK(reservation->IsReserved());
+ chunk->set_size(chunk->size() - bytes_to_free);
+ chunk->set_area_end(new_area_end);
+ if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ // Add guard page at the end.
+ size_t page_size = GetCommitPageSize();
+ DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
+ DCHECK_EQ(chunk->address() + chunk->size(),
+ chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
+ reservation->SetPermissions(chunk->area_end(), page_size,
+ PageAllocator::kNoAccess);
+ }
+ // On e.g. Windows, a reservation may be larger than a page and releasing
+ // partially starting at |start_free| will also release the potentially
+ // unused part behind the current page.
+ const size_t released_bytes = reservation->Release(start_free);
+ DCHECK_GE(size_, released_bytes);
+ size_ -= released_bytes;
+}
+
+void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
+ Executability executable) {
+ DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
+ VirtualMemory* reservation = chunk->reserved_memory();
+ const size_t size =
+ reservation->IsReserved() ? reservation->size() : chunk->size();
+ DCHECK_GE(size_, static_cast<size_t>(size));
+ size_ -= size;
+ if (executable == EXECUTABLE) {
+ DCHECK_GE(size_executable_, size);
+ size_executable_ -= size;
+ UnregisterExecutableMemoryChunk(static_cast<MemoryChunk*>(chunk));
+ }
+ chunk->SetFlag(MemoryChunk::UNREGISTERED);
+}
+
+void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
+ UnregisterMemory(chunk, chunk->executable());
+}
+
+void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
+ DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
+ LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
+ UnregisterMemory(chunk);
+ chunk->SetFlag(MemoryChunk::PRE_FREED);
+ chunk->ReleaseMarkingBitmap();
+
+ VirtualMemory* reservation = chunk->reserved_memory();
+ if (reservation->IsReserved()) {
+ reservation->Free();
+ } else {
+ // Only read-only pages can have non-initialized reservation object.
+ FreeMemory(page_allocator(NOT_EXECUTABLE), chunk->address(), chunk->size());
+ }
+}
+
+void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
+ DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
+ LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
+ UnregisterMemory(chunk);
+ isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
+ chunk->IsEvacuationCandidate());
+ chunk->SetFlag(MemoryChunk::PRE_FREED);
+}
+
+void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
+ DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
+ DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
+ DCHECK(!chunk->InReadOnlySpace());
+ chunk->ReleaseAllAllocatedMemory();
+
+ VirtualMemory* reservation = chunk->reserved_memory();
+ if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
+ UncommitMemory(reservation);
+ } else {
+ DCHECK(reservation->IsReserved());
+ reservation->Free();
+ }
+}
+
+template <MemoryAllocator::FreeMode mode>
+void MemoryAllocator::Free(MemoryChunk* chunk) {
+ switch (mode) {
+ case kFull:
+ PreFreeMemory(chunk);
+ PerformFreeMemory(chunk);
+ break;
+ case kAlreadyPooled:
+ // Pooled pages cannot be touched anymore as their memory is uncommitted.
+ // Pooled pages are not-executable.
+ FreeMemory(data_page_allocator(), chunk->address(),
+ static_cast<size_t>(MemoryChunk::kPageSize));
+ break;
+ case kPooledAndQueue:
+ DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
+ DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
+ chunk->SetFlag(MemoryChunk::POOLED);
+ V8_FALLTHROUGH;
+ case kPreFreeAndQueue:
+ PreFreeMemory(chunk);
+ // The chunks added to this queue will be freed by a concurrent thread.
+ unmapper()->AddMemoryChunkSafe(chunk);
+ break;
+ }
+}
+
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kFull>(MemoryChunk* chunk);
+
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
+
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
+
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
+
+template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
+Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
+ Executability executable) {
+ MemoryChunk* chunk = nullptr;
+ if (alloc_mode == kPooled) {
+ DCHECK_EQ(size, static_cast<size_t>(
+ MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
+ owner->identity())));
+ DCHECK_EQ(executable, NOT_EXECUTABLE);
+ chunk = AllocatePagePooled(owner);
+ }
+ if (chunk == nullptr) {
+ chunk = AllocateChunk(size, size, executable, owner);
+ }
+ if (chunk == nullptr) return nullptr;
+ return owner->InitializePage(chunk);
+}
+
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+ size_t size, PagedSpace* owner, Executability executable);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+
+ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
+ ReadOnlySpace* owner) {
+ BasicMemoryChunk* chunk = nullptr;
+ if (chunk == nullptr) {
+ chunk = AllocateBasicChunk(size, size, NOT_EXECUTABLE, owner);
+ }
+ if (chunk == nullptr) return nullptr;
+ return owner->InitializePage(chunk);
+}
+
+LargePage* MemoryAllocator::AllocateLargePage(size_t size,
+ LargeObjectSpace* owner,
+ Executability executable) {
+ MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
+ if (chunk == nullptr) return nullptr;
+ return LargePage::Initialize(isolate_->heap(), chunk, executable);
+}
+
+template <typename SpaceType>
+MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
+ MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
+ if (chunk == nullptr) return nullptr;
+ const int size = MemoryChunk::kPageSize;
+ const Address start = reinterpret_cast<Address>(chunk);
+ const Address area_start =
+ start +
+ MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
+ const Address area_end = start + size;
+ // Pooled pages are always regular data pages.
+ DCHECK_NE(CODE_SPACE, owner->identity());
+ VirtualMemory reservation(data_page_allocator(), start, size);
+ if (!CommitMemory(&reservation)) return nullptr;
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(start, size, kZapValue);
+ }
+ BasicMemoryChunk* basic_chunk =
+ BasicMemoryChunk::Initialize(isolate_->heap(), start, size, area_start,
+ area_end, owner, std::move(reservation));
+ MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE);
+ size_ += size;
+ return chunk;
+}
+
+void MemoryAllocator::ZapBlock(Address start, size_t size,
+ uintptr_t zap_value) {
+ DCHECK(IsAligned(start, kTaggedSize));
+ DCHECK(IsAligned(size, kTaggedSize));
+ MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
+ size >> kTaggedSizeLog2);
+}
+
+intptr_t MemoryAllocator::GetCommitPageSize() {
+ if (FLAG_v8_os_page_size != 0) {
+ DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
+ return FLAG_v8_os_page_size * KB;
+ } else {
+ return CommitPageSize();
+ }
+}
+
+base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
+ size_t size) {
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ if (size < page_size + FreeSpace::kSize) {
+ return base::AddressRegion(0, 0);
+ }
+ Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
+ Address discardable_end = RoundDown(addr + size, page_size);
+ if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
+ return base::AddressRegion(discardable_start,
+ discardable_end - discardable_start);
+}
+
+bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
+ size_t commit_size,
+ size_t reserved_size) {
+ const size_t page_size = GetCommitPageSize();
+ // All addresses and sizes must be aligned to the commit page size.
+ DCHECK(IsAligned(start, page_size));
+ DCHECK_EQ(0, commit_size % page_size);
+ DCHECK_EQ(0, reserved_size % page_size);
+ const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
+ const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
+ const size_t code_area_offset =
+ MemoryChunkLayout::ObjectStartOffsetInCodePage();
+ // reserved_size includes two guard regions, commit_size does not.
+ DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
+ const Address pre_guard_page = start + pre_guard_offset;
+ const Address code_area = start + code_area_offset;
+ const Address post_guard_page = start + reserved_size - guard_size;
+ // Commit the non-executable header, from start to pre-code guard page.
+ if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
+ // Create the pre-code guard page, following the header.
+ if (vm->SetPermissions(pre_guard_page, page_size,
+ PageAllocator::kNoAccess)) {
+ // Commit the executable code body.
+ if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
+ PageAllocator::kReadWrite)) {
+ // Create the post-code guard page.
+ if (vm->SetPermissions(post_guard_page, page_size,
+ PageAllocator::kNoAccess)) {
+ UpdateAllocatedSpaceLimits(start, code_area + commit_size);
+ return true;
+ }
+ vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
+ }
+ }
+ vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
+ }
+ return false;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/memory-allocator.h b/chromium/v8/src/heap/memory-allocator.h
new file mode 100644
index 00000000000..558e11aa02e
--- /dev/null
+++ b/chromium/v8/src/heap/memory-allocator.h
@@ -0,0 +1,451 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_MEMORY_ALLOCATOR_H_
+#define V8_HEAP_MEMORY_ALLOCATOR_H_
+
+#include <atomic>
+#include <memory>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "src/base/bounded-page-allocator.h"
+#include "src/base/export-template.h"
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
+#include "src/heap/heap.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/spaces.h"
+#include "src/tasks/cancelable-task.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class Isolate;
+class ReadOnlyPage;
+
+// The process-wide singleton that keeps track of code range regions with the
+// intention to reuse free code range regions as a workaround for CFG memory
+// leaks (see crbug.com/870054).
+class CodeRangeAddressHint {
+ public:
+ // Returns the most recently freed code range start address for the given
+ // size. If there is no such entry, then a random address is returned.
+ V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
+
+ V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
+ size_t code_range_size);
+
+ private:
+ base::Mutex mutex_;
+ // A map from code range size to an array of recently freed code range
+ // addresses. There should be O(1) different code range sizes.
+ // The length of each array is limited by the peak number of code ranges,
+ // which should be also O(1).
+ std::unordered_map<size_t, std::vector<Address>> recently_freed_;
+};
+
+// ----------------------------------------------------------------------------
+// A space acquires chunks of memory from the operating system. The memory
+// allocator allocates and deallocates pages for the paged heap spaces and large
+// pages for large object space.
+class MemoryAllocator {
+ public:
+ // Unmapper takes care of concurrently unmapping and uncommitting memory
+ // chunks.
+ class Unmapper {
+ public:
+ class UnmapFreeMemoryTask;
+
+ Unmapper(Heap* heap, MemoryAllocator* allocator)
+ : heap_(heap),
+ allocator_(allocator),
+ pending_unmapping_tasks_semaphore_(0),
+ pending_unmapping_tasks_(0),
+ active_unmapping_tasks_(0) {
+ chunks_[kRegular].reserve(kReservedQueueingSlots);
+ chunks_[kPooled].reserve(kReservedQueueingSlots);
+ }
+
+ void AddMemoryChunkSafe(MemoryChunk* chunk) {
+ if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
+ AddMemoryChunkSafe<kRegular>(chunk);
+ } else {
+ AddMemoryChunkSafe<kNonRegular>(chunk);
+ }
+ }
+
+ MemoryChunk* TryGetPooledMemoryChunkSafe() {
+ // Procedure:
+ // (1) Try to get a chunk that was declared as pooled and already has
+ // been uncommitted.
+ // (2) Try to steal any memory chunk of kPageSize that would've been
+ // unmapped.
+ MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
+ if (chunk == nullptr) {
+ chunk = GetMemoryChunkSafe<kRegular>();
+ if (chunk != nullptr) {
+ // For stolen chunks we need to manually free any allocated memory.
+ chunk->ReleaseAllAllocatedMemory();
+ }
+ }
+ return chunk;
+ }
+
+ V8_EXPORT_PRIVATE void FreeQueuedChunks();
+ void CancelAndWaitForPendingTasks();
+ void PrepareForGC();
+ V8_EXPORT_PRIVATE void EnsureUnmappingCompleted();
+ V8_EXPORT_PRIVATE void TearDown();
+ size_t NumberOfCommittedChunks();
+ V8_EXPORT_PRIVATE int NumberOfChunks();
+ size_t CommittedBufferedMemory();
+
+ private:
+ static const int kReservedQueueingSlots = 64;
+ static const int kMaxUnmapperTasks = 4;
+
+ enum ChunkQueueType {
+ kRegular, // Pages of kPageSize that do not live in a CodeRange and
+ // can thus be used for stealing.
+ kNonRegular, // Large chunks and executable chunks.
+ kPooled, // Pooled chunks, already uncommited and ready for reuse.
+ kNumberOfChunkQueues,
+ };
+
+ enum class FreeMode {
+ kUncommitPooled,
+ kReleasePooled,
+ };
+
+ template <ChunkQueueType type>
+ void AddMemoryChunkSafe(MemoryChunk* chunk) {
+ base::MutexGuard guard(&mutex_);
+ chunks_[type].push_back(chunk);
+ }
+
+ template <ChunkQueueType type>
+ MemoryChunk* GetMemoryChunkSafe() {
+ base::MutexGuard guard(&mutex_);
+ if (chunks_[type].empty()) return nullptr;
+ MemoryChunk* chunk = chunks_[type].back();
+ chunks_[type].pop_back();
+ return chunk;
+ }
+
+ bool MakeRoomForNewTasks();
+
+ template <FreeMode mode>
+ void PerformFreeMemoryOnQueuedChunks();
+
+ void PerformFreeMemoryOnQueuedNonRegularChunks();
+
+ Heap* const heap_;
+ MemoryAllocator* const allocator_;
+ base::Mutex mutex_;
+ std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
+ CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
+ base::Semaphore pending_unmapping_tasks_semaphore_;
+ intptr_t pending_unmapping_tasks_;
+ std::atomic<intptr_t> active_unmapping_tasks_;
+
+ friend class MemoryAllocator;
+ };
+
+ enum AllocationMode {
+ kRegular,
+ kPooled,
+ };
+
+ enum FreeMode {
+ kFull,
+ kAlreadyPooled,
+ kPreFreeAndQueue,
+ kPooledAndQueue,
+ };
+
+ V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize();
+
+ // Computes the memory area of discardable memory within a given memory area
+ // [addr, addr+size) and returns the result as base::AddressRegion. If the
+ // memory is not discardable base::AddressRegion is an empty region.
+ V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
+ Address addr, size_t size);
+
+ V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate, size_t max_capacity,
+ size_t code_range_size);
+
+ V8_EXPORT_PRIVATE void TearDown();
+
+ // Allocates a Page from the allocator. AllocationMode is used to indicate
+ // whether pooled allocation, which only works for MemoryChunk::kPageSize,
+ // should be tried first.
+ template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
+ typename SpaceType>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
+
+ LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
+ Executability executable);
+
+ ReadOnlyPage* AllocateReadOnlyPage(size_t size, ReadOnlySpace* owner);
+
+ template <MemoryAllocator::FreeMode mode = kFull>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ void Free(MemoryChunk* chunk);
+ void FreeReadOnlyPage(ReadOnlyPage* chunk);
+
+ // Returns allocated spaces in bytes.
+ size_t Size() const { return size_; }
+
+ // Returns allocated executable spaces in bytes.
+ size_t SizeExecutable() const { return size_executable_; }
+
+ // Returns the maximum available bytes of heaps.
+ size_t Available() const {
+ const size_t size = Size();
+ return capacity_ < size ? 0 : capacity_ - size;
+ }
+
+ // Returns an indication of whether a pointer is in a space that has
+ // been allocated by this MemoryAllocator.
+ V8_INLINE bool IsOutsideAllocatedSpace(Address address) const {
+ return address < lowest_ever_allocated_ ||
+ address >= highest_ever_allocated_;
+ }
+
+ // Returns a BasicMemoryChunk in which the memory region from commit_area_size
+ // to reserve_area_size of the chunk area is reserved but not committed, it
+ // could be committed later by calling MemoryChunk::CommitArea.
+ V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
+ size_t reserve_area_size, size_t commit_area_size,
+ Executability executable, BaseSpace* space);
+
+ // Returns a MemoryChunk in which the memory region from commit_area_size to
+ // reserve_area_size of the chunk area is reserved but not committed, it
+ // could be committed later by calling MemoryChunk::CommitArea.
+ V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
+ size_t commit_area_size,
+ Executability executable,
+ BaseSpace* space);
+
+ Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
+ size_t alignment, Executability executable,
+ void* hint, VirtualMemory* controller);
+
+ void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
+
+ // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
+ // internally memory is freed from |start_free| to the end of the reservation.
+ // Additional memory beyond the page is not accounted though, so
+ // |bytes_to_free| is computed by the caller.
+ void PartialFreeMemory(BasicMemoryChunk* chunk, Address start_free,
+ size_t bytes_to_free, Address new_area_end);
+
+ // Checks if an allocated MemoryChunk was intended to be used for executable
+ // memory.
+ bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
+ return executable_memory_.find(chunk) != executable_memory_.end();
+ }
+
+ // Commit memory region owned by given reservation object. Returns true if
+ // it succeeded and false otherwise.
+ bool CommitMemory(VirtualMemory* reservation);
+
+ // Uncommit memory region owned by given reservation object. Returns true if
+ // it succeeded and false otherwise.
+ bool UncommitMemory(VirtualMemory* reservation);
+
+ // Zaps a contiguous block of memory [start..(start+size)[ with
+ // a given zap value.
+ void ZapBlock(Address start, size_t size, uintptr_t zap_value);
+
+ V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
+ Address start,
+ size_t commit_size,
+ size_t reserved_size);
+
+ // Page allocator instance for allocating non-executable pages.
+ // Guaranteed to be a valid pointer.
+ v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
+
+ // Page allocator instance for allocating executable pages.
+ // Guaranteed to be a valid pointer.
+ v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
+
+ // Returns page allocator suitable for allocating pages with requested
+ // executability.
+ v8::PageAllocator* page_allocator(Executability executable) {
+ return executable == EXECUTABLE ? code_page_allocator_
+ : data_page_allocator_;
+ }
+
+ // A region of memory that may contain executable code including reserved
+ // OS page with read-write access in the beginning.
+ const base::AddressRegion& code_range() const {
+ // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
+ DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
+ DCHECK_IMPLIES(!code_range_.is_empty(),
+ code_range_.contains(code_page_allocator_instance_->begin(),
+ code_page_allocator_instance_->size()));
+ return code_range_;
+ }
+
+ Unmapper* unmapper() { return &unmapper_; }
+
+ // Performs all necessary bookkeeping to free the memory, but does not free
+ // it.
+ void UnregisterMemory(MemoryChunk* chunk);
+ void UnregisterMemory(BasicMemoryChunk* chunk,
+ Executability executable = NOT_EXECUTABLE);
+
+ private:
+ void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
+ size_t requested);
+
+ // PreFreeMemory logically frees the object, i.e., it unregisters the
+ // memory, logs a delete event and adds the chunk to remembered unmapped
+ // pages.
+ void PreFreeMemory(MemoryChunk* chunk);
+
+ // PerformFreeMemory can be called concurrently when PreFree was executed
+ // before.
+ void PerformFreeMemory(MemoryChunk* chunk);
+
+ // See AllocatePage for public interface. Note that currently we only
+ // support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
+ template <typename SpaceType>
+ MemoryChunk* AllocatePagePooled(SpaceType* owner);
+
+ // Initializes pages in a chunk. Returns the first page address.
+ // This function and GetChunkId() are provided for the mark-compact
+ // collector to rebuild page headers in the from space, which is
+ // used as a marking stack and its page headers are destroyed.
+ Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
+ PagedSpace* owner);
+
+ void UpdateAllocatedSpaceLimits(Address low, Address high) {
+ // The use of atomic primitives does not guarantee correctness (wrt.
+ // desired semantics) by default. The loop here ensures that we update the
+ // values only if they did not change in between.
+ Address ptr = lowest_ever_allocated_.load(std::memory_order_relaxed);
+ while ((low < ptr) && !lowest_ever_allocated_.compare_exchange_weak(
+ ptr, low, std::memory_order_acq_rel)) {
+ }
+ ptr = highest_ever_allocated_.load(std::memory_order_relaxed);
+ while ((high > ptr) && !highest_ever_allocated_.compare_exchange_weak(
+ ptr, high, std::memory_order_acq_rel)) {
+ }
+ }
+
+ void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
+ base::MutexGuard guard(&executable_memory_mutex_);
+ DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
+ executable_memory_.insert(chunk);
+ }
+
+ void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
+ base::MutexGuard guard(&executable_memory_mutex_);
+ DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
+ executable_memory_.erase(chunk);
+ chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
+ }
+
+ Isolate* isolate_;
+
+ // This object controls virtual space reserved for code on the V8 heap. This
+ // is only valid for 64-bit architectures where kRequiresCodeRange.
+ VirtualMemory code_reservation_;
+
+ // Page allocator used for allocating data pages. Depending on the
+ // configuration it may be a page allocator instance provided by
+ // v8::Platform or a BoundedPageAllocator (when pointer compression is
+ // enabled).
+ v8::PageAllocator* data_page_allocator_;
+
+ // Page allocator used for allocating code pages. Depending on the
+ // configuration it may be a page allocator instance provided by
+ // v8::Platform or a BoundedPageAllocator (when pointer compression is
+ // enabled or on those 64-bit architectures where pc-relative 32-bit
+ // displacement can be used for call and jump instructions).
+ v8::PageAllocator* code_page_allocator_;
+
+ // A part of the |code_reservation_| that may contain executable code
+ // including reserved page with read-write access in the beginning.
+ // See details below.
+ base::AddressRegion code_range_;
+
+ // This unique pointer owns the instance of bounded code allocator
+ // that controls executable pages allocation. It does not control the
+ // optionally existing page in the beginning of the |code_range_|.
+ // So, summarizing all above, the following conditions hold:
+ // 1) |code_reservation_| >= |code_range_|
+ // 2) |code_range_| >= |optional RW pages| +
+ // |code_page_allocator_instance_|. 3) |code_reservation_| is
+ // AllocatePageSize()-aligned 4) |code_page_allocator_instance_| is
+ // MemoryChunk::kAlignment-aligned 5) |code_range_| is
+ // CommitPageSize()-aligned
+ std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
+
+ // Maximum space size in bytes.
+ size_t capacity_;
+
+ // Allocated space size in bytes.
+ std::atomic<size_t> size_;
+ // Allocated executable space size in bytes.
+ std::atomic<size_t> size_executable_;
+
+ // We keep the lowest and highest addresses allocated as a quick way
+ // of determining that pointers are outside the heap. The estimate is
+ // conservative, i.e. not all addresses in 'allocated' space are allocated
+ // to our heap. The range is [lowest, highest[, inclusive on the low end
+ // and exclusive on the high end.
+ std::atomic<Address> lowest_ever_allocated_;
+ std::atomic<Address> highest_ever_allocated_;
+
+ VirtualMemory last_chunk_;
+ Unmapper unmapper_;
+
+ // Data structure to remember allocated executable memory chunks.
+ std::unordered_set<MemoryChunk*> executable_memory_;
+ base::Mutex executable_memory_mutex_;
+
+ friend class heap::TestCodePageAllocatorScope;
+ friend class heap::TestMemoryAllocatorScope;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
+};
+
+extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+ size_t size, PagedSpace* owner, Executability executable);
+extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+
+extern template EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) void MemoryAllocator::
+ Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
+extern template EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) void MemoryAllocator::
+ Free<MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
+extern template EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) void MemoryAllocator::
+ Free<MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
+extern template EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) void MemoryAllocator::
+ Free<MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_MEMORY_ALLOCATOR_H_
diff --git a/chromium/v8/src/heap/memory-chunk.cc b/chromium/v8/src/heap/memory-chunk.cc
index 865e6f1a72b..4e10719fc3c 100644
--- a/chromium/v8/src/heap/memory-chunk.cc
+++ b/chromium/v8/src/heap/memory-chunk.cc
@@ -4,8 +4,13 @@
#include "src/heap/memory-chunk.h"
+#include "src/base/platform/platform.h"
+#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/code-object-registry.h"
+#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/spaces.h"
+#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
@@ -77,14 +82,6 @@ size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
return AllocatableMemoryInDataPage();
}
-#ifdef THREAD_SANITIZER
-void MemoryChunk::SynchronizedHeapLoad() {
- CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
- reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
- InReadOnlySpace());
-}
-#endif
-
void MemoryChunk::InitializationMemoryFence() {
base::SeqCst_MemoryFence();
#ifdef THREAD_SANITIZER
@@ -153,5 +150,299 @@ void MemoryChunk::SetReadAndWritable() {
}
}
+namespace {
+
+PageAllocator::Permission DefaultWritableCodePermissions() {
+ return FLAG_jitless ? PageAllocator::kReadWrite
+ : PageAllocator::kReadWriteExecute;
+}
+
+} // namespace
+
+MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
+ Executability executable) {
+ MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
+
+ base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
+ nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
+ nullptr);
+ chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
+ chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
+ chunk->progress_bar_ = 0;
+ chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
+ chunk->page_protection_change_mutex_ = new base::Mutex();
+ chunk->write_unprotect_counter_ = 0;
+ chunk->mutex_ = new base::Mutex();
+ chunk->young_generation_bitmap_ = nullptr;
+ chunk->local_tracker_ = nullptr;
+
+ chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
+ 0;
+ chunk->external_backing_store_bytes_
+ [ExternalBackingStoreType::kExternalString] = 0;
+
+ chunk->categories_ = nullptr;
+
+ heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
+ 0);
+ if (executable == EXECUTABLE) {
+ chunk->SetFlag(IS_EXECUTABLE);
+ if (heap->write_protect_code_memory()) {
+ chunk->write_unprotect_counter_ =
+ heap->code_space_memory_modification_scope_depth();
+ } else {
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ DCHECK(IsAligned(chunk->area_start(), page_size));
+ size_t area_size =
+ RoundUp(chunk->area_end() - chunk->area_start(), page_size);
+ CHECK(chunk->reservation_.SetPermissions(
+ chunk->area_start(), area_size, DefaultWritableCodePermissions()));
+ }
+ }
+
+ if (chunk->owner()->identity() == CODE_SPACE) {
+ chunk->code_object_registry_ = new CodeObjectRegistry();
+ } else {
+ chunk->code_object_registry_ = nullptr;
+ }
+
+ chunk->possibly_empty_buckets_.Initialize();
+
+ return chunk;
+}
+
+size_t MemoryChunk::CommittedPhysicalMemory() {
+ if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
+ return size();
+ return high_water_mark_;
+}
+
+void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
+ if (is_marking) {
+ SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::INCREMENTAL_MARKING);
+ } else {
+ ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
+ }
+}
+
+void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
+ SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ if (is_marking) {
+ SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::INCREMENTAL_MARKING);
+ } else {
+ ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
+ }
+}
+// -----------------------------------------------------------------------------
+// MemoryChunk implementation
+
+void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
+ if (mutex_ != nullptr) {
+ delete mutex_;
+ mutex_ = nullptr;
+ }
+ if (page_protection_change_mutex_ != nullptr) {
+ delete page_protection_change_mutex_;
+ page_protection_change_mutex_ = nullptr;
+ }
+ if (code_object_registry_ != nullptr) {
+ delete code_object_registry_;
+ code_object_registry_ = nullptr;
+ }
+
+ possibly_empty_buckets_.Release();
+ ReleaseSlotSet<OLD_TO_NEW>();
+ ReleaseSweepingSlotSet();
+ ReleaseSlotSet<OLD_TO_OLD>();
+ ReleaseTypedSlotSet<OLD_TO_NEW>();
+ ReleaseTypedSlotSet<OLD_TO_OLD>();
+ ReleaseInvalidatedSlots<OLD_TO_NEW>();
+ ReleaseInvalidatedSlots<OLD_TO_OLD>();
+
+ if (local_tracker_ != nullptr) ReleaseLocalTracker();
+ if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
+
+ if (!IsLargePage()) {
+ Page* page = static_cast<Page*>(this);
+ page->ReleaseFreeListCategories();
+ }
+}
+
+void MemoryChunk::ReleaseAllAllocatedMemory() {
+ ReleaseAllocatedMemoryNeededForWritableChunk();
+ if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
+}
+
+template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
+template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
+
+template <RememberedSetType type>
+SlotSet* MemoryChunk::AllocateSlotSet() {
+ return AllocateSlotSet(&slot_set_[type]);
+}
+
+SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
+ return AllocateSlotSet(&sweeping_slot_set_);
+}
+
+SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
+ SlotSet* new_slot_set = SlotSet::Allocate(buckets());
+ SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap(
+ slot_set, nullptr, new_slot_set);
+ if (old_slot_set != nullptr) {
+ SlotSet::Delete(new_slot_set, buckets());
+ new_slot_set = old_slot_set;
+ }
+ DCHECK(new_slot_set);
+ return new_slot_set;
+}
+
+template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
+template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
+
+template <RememberedSetType type>
+void MemoryChunk::ReleaseSlotSet() {
+ ReleaseSlotSet(&slot_set_[type]);
+}
+
+void MemoryChunk::ReleaseSweepingSlotSet() {
+ ReleaseSlotSet(&sweeping_slot_set_);
+}
+
+void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
+ if (*slot_set) {
+ SlotSet::Delete(*slot_set, buckets());
+ *slot_set = nullptr;
+ }
+}
+
+template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
+template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
+
+template <RememberedSetType type>
+TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
+ TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
+ TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
+ &typed_slot_set_[type], nullptr, typed_slot_set);
+ if (old_value != nullptr) {
+ delete typed_slot_set;
+ typed_slot_set = old_value;
+ }
+ DCHECK(typed_slot_set);
+ return typed_slot_set;
+}
+
+template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
+template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
+
+template <RememberedSetType type>
+void MemoryChunk::ReleaseTypedSlotSet() {
+ TypedSlotSet* typed_slot_set = typed_slot_set_[type];
+ if (typed_slot_set) {
+ typed_slot_set_[type] = nullptr;
+ delete typed_slot_set;
+ }
+}
+
+template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_NEW>();
+template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_OLD>();
+
+template <RememberedSetType type>
+InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
+ DCHECK_NULL(invalidated_slots_[type]);
+ invalidated_slots_[type] = new InvalidatedSlots();
+ return invalidated_slots_[type];
+}
+
+template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_NEW>();
+template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_OLD>();
+
+template <RememberedSetType type>
+void MemoryChunk::ReleaseInvalidatedSlots() {
+ if (invalidated_slots_[type]) {
+ delete invalidated_slots_[type];
+ invalidated_slots_[type] = nullptr;
+ }
+}
+
+template V8_EXPORT_PRIVATE void
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
+template V8_EXPORT_PRIVATE void
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
+
+template <RememberedSetType type>
+void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
+ bool skip_slot_recording;
+
+ if (type == OLD_TO_NEW) {
+ skip_slot_recording = InYoungGeneration();
+ } else {
+ skip_slot_recording = ShouldSkipEvacuationSlotRecording();
+ }
+
+ if (skip_slot_recording) {
+ return;
+ }
+
+ if (invalidated_slots<type>() == nullptr) {
+ AllocateInvalidatedSlots<type>();
+ }
+
+ invalidated_slots<type>()->insert(object);
+}
+
+void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
+ if (V8_DISABLE_WRITE_BARRIERS_BOOL) return;
+ if (heap()->incremental_marking()->IsCompacting()) {
+ // We cannot check slot_set_[OLD_TO_OLD] here, since the
+ // concurrent markers might insert slots concurrently.
+ RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
+ }
+
+ if (!FLAG_always_promote_young_mc || slot_set_[OLD_TO_NEW] != nullptr)
+ RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
+}
+
+template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
+ HeapObject object);
+template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(
+ HeapObject object);
+
+template <RememberedSetType type>
+bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
+ if (invalidated_slots<type>() == nullptr) {
+ return false;
+ }
+ return invalidated_slots<type>()->find(object) !=
+ invalidated_slots<type>()->end();
+}
+
+void MemoryChunk::ReleaseLocalTracker() {
+ DCHECK_NOT_NULL(local_tracker_);
+ delete local_tracker_;
+ local_tracker_ = nullptr;
+}
+
+void MemoryChunk::AllocateYoungGenerationBitmap() {
+ DCHECK_NULL(young_generation_bitmap_);
+ young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
+}
+
+void MemoryChunk::ReleaseYoungGenerationBitmap() {
+ DCHECK_NOT_NULL(young_generation_bitmap_);
+ free(young_generation_bitmap_);
+ young_generation_bitmap_ = nullptr;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/memory-chunk.h b/chromium/v8/src/heap/memory-chunk.h
index 4381a229ab2..3fffbcb7d7b 100644
--- a/chromium/v8/src/heap/memory-chunk.h
+++ b/chromium/v8/src/heap/memory-chunk.h
@@ -5,14 +5,17 @@
#ifndef V8_HEAP_MEMORY_CHUNK_H_
#define V8_HEAP_MEMORY_CHUNK_H_
-#include <set>
-#include <vector>
+#include <atomic>
#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/list.h"
+#include "src/heap/marking.h"
+#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
@@ -34,36 +37,18 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
};
+enum RememberedSetType {
+ OLD_TO_NEW,
+ OLD_TO_OLD,
+ NUMBER_OF_REMEMBERED_SET_TYPES
+};
+
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
// any heap object.
class MemoryChunk : public BasicMemoryChunk {
public:
- // Use with std data structures.
- struct Hasher {
- size_t operator()(MemoryChunk* const chunk) const {
- return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
- }
- };
-
- using Flags = uintptr_t;
-
- static const Flags kPointersToHereAreInterestingMask =
- POINTERS_TO_HERE_ARE_INTERESTING;
-
- static const Flags kPointersFromHereAreInterestingMask =
- POINTERS_FROM_HERE_ARE_INTERESTING;
-
- static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
-
- static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
-
- static const Flags kIsLargePageMask = LARGE_PAGE;
-
- static const Flags kSkipEvacuationSlotsRecordingMask =
- kEvacuationCandidateMask | kIsInYoungGenerationMask;
-
// |kDone|: The page state when sweeping is complete or sweeping must not be
// performed on that page. Sweeper threads that are done with their work
// will set this value and not touch the page anymore.
@@ -76,17 +61,15 @@ class MemoryChunk : public BasicMemoryChunk {
};
static const size_t kHeaderSize =
- BasicMemoryChunk::kHeaderSize // Parent size.
- + 3 * kSystemPointerSize // VirtualMemory reservation_
- + kSystemPointerSize // Address owner_
- + kSizetSize // size_t progress_bar_
- + kIntptrSize // intptr_t live_byte_count_
- + kSystemPointerSize // SlotSet* sweeping_slot_set_
+ BasicMemoryChunk::kHeaderSize // Parent size.
+ + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ + kSizetSize // size_t progress_bar_
+ + kIntptrSize // intptr_t live_byte_count_
+ + kSystemPointerSize // SlotSet* sweeping_slot_set_
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // InvalidatedSlots* array
- + kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ kSystemPointerSize // base::Mutex* mutex_
+ kSystemPointerSize // std::atomic<ConcurrentSweepingState>
// concurrent_sweeping_
@@ -94,8 +77,6 @@ class MemoryChunk : public BasicMemoryChunk {
+ kSystemPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize * ExternalBackingStoreType::kNumTypes
// std::atomic<size_t> external_backing_store_bytes_
- + kSizetSize // size_t allocated_bytes_
- + kSizetSize // size_t wasted_memory_
+ kSystemPointerSize * 2 // heap::ListNode
+ kSystemPointerSize // FreeListCategory** categories__
+ kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
@@ -104,6 +85,8 @@ class MemoryChunk : public BasicMemoryChunk {
+ kSystemPointerSize // CodeObjectRegistry* code_object_registry_
+ kSystemPointerSize; // PossiblyEmptyBuckets possibly_empty_buckets_
+ static const intptr_t kOldToNewSlotSetOffset = BasicMemoryChunk::kHeaderSize;
+
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@@ -112,32 +95,30 @@ class MemoryChunk : public BasicMemoryChunk {
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
- DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
- return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
+ return cast(BasicMemoryChunk::FromAddress(a));
}
+
// Only works if the object is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromHeapObject(HeapObject o) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
- return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
+ return cast(BasicMemoryChunk::FromHeapObject(o));
}
- void SetOldGenerationPageFlags(bool is_marking);
- void SetYoungGenerationPageFlags(bool is_marking);
+ static MemoryChunk* cast(BasicMemoryChunk* chunk) {
+ SLOW_DCHECK(!chunk->InReadOnlySpace());
+ return static_cast<MemoryChunk*>(chunk);
+ }
- static inline void UpdateHighWaterMark(Address mark) {
- if (mark == kNullAddress) return;
- // Need to subtract one from the mark because when a chunk is full the
- // top points to the next address after the chunk, which effectively belongs
- // to another chunk. See the comment to Page::FromAllocationAreaAddress.
- MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
- intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
- intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
- while ((new_mark > old_mark) &&
- !chunk->high_water_mark_.compare_exchange_weak(
- old_mark, new_mark, std::memory_order_acq_rel)) {
- }
+ static const MemoryChunk* cast(const BasicMemoryChunk* chunk) {
+ SLOW_DCHECK(!chunk->InReadOnlySpace());
+ return static_cast<const MemoryChunk*>(chunk);
}
+ size_t buckets() const { return SlotSet::BucketsForSize(size()); }
+
+ void SetOldGenerationPageFlags(bool is_marking);
+ void SetYoungGenerationPageFlags(bool is_marking);
+
static inline void MoveExternalBackingStoreBytes(
ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
size_t amount);
@@ -158,18 +139,6 @@ class MemoryChunk : public BasicMemoryChunk {
return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
}
- inline Heap* heap() const {
- DCHECK_NOT_NULL(heap_);
- return heap_;
- }
-
-#ifdef THREAD_SANITIZER
- // Perform a dummy acquire load to tell TSAN that there is no data race in
- // mark-bit initialization. See MemoryChunk::Initialize for the corresponding
- // release store.
- void SynchronizedHeapLoad();
-#endif
-
template <RememberedSetType type>
bool ContainsSlots() {
return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
@@ -237,8 +206,6 @@ class MemoryChunk : public BasicMemoryChunk {
// Approximate amount of physical memory committed for this chunk.
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
- Address HighWaterMark() { return address() + high_water_mark_; }
-
size_t ProgressBar() {
DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
return progress_bar_.load(std::memory_order_acquire);
@@ -266,64 +233,8 @@ class MemoryChunk : public BasicMemoryChunk {
return external_backing_store_bytes_[type];
}
- // Some callers rely on the fact that this can operate on both
- // tagged and aligned object addresses.
- inline uint32_t AddressToMarkbitIndex(Address addr) const {
- return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
- }
-
- inline Address MarkbitIndexToAddress(uint32_t index) const {
- return this->address() + (index << kTaggedSizeLog2);
- }
-
- bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
-
- void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
-
- bool CanAllocate() {
- return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
- }
-
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool IsEvacuationCandidate() {
- DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
- IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
- return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
- }
-
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool ShouldSkipEvacuationSlotRecording() {
- uintptr_t flags = GetFlags<access_mode>();
- return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
- ((flags & COMPACTION_WAS_ABORTED) == 0);
- }
-
- Executability executable() {
- return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
- }
-
- bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
- bool IsToPage() const { return IsFlagSet(TO_PAGE); }
- bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
- bool InYoungGeneration() const {
- return (GetFlags() & kIsInYoungGenerationMask) != 0;
- }
- bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
- bool InNewLargeObjectSpace() const {
- return InYoungGeneration() && IsLargePage();
- }
- bool InOldSpace() const;
- V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
-
- // Gets the chunk's owner or null if the space has been detached.
- Space* owner() const { return owner_; }
-
- void set_owner(Space* space) { owner_ = space; }
-
- bool IsWritable() const {
- // If this is a read-only space chunk but heap_ is non-null, it has not yet
- // been sealed and can be written to.
- return !InReadOnlySpace() || heap_ != nullptr;
+ Space* owner() const {
+ return reinterpret_cast<Space*>(BasicMemoryChunk::owner());
}
// Gets the chunk's allocation space, potentially dealing with a null owner_
@@ -347,6 +258,7 @@ class MemoryChunk : public BasicMemoryChunk {
}
heap::ListNode<MemoryChunk>& list_node() { return list_node_; }
+ const heap::ListNode<MemoryChunk>& list_node() const { return list_node_; }
CodeObjectRegistry* GetCodeObjectRegistry() { return code_object_registry_; }
@@ -359,10 +271,8 @@ class MemoryChunk : public BasicMemoryChunk {
void ReleaseAllocatedMemoryNeededForWritableChunk();
protected:
- static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
- Address area_start, Address area_end,
- Executability executable, Space* owner,
- VirtualMemory reservation);
+ static MemoryChunk* Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
+ Executability executable);
// Release all memory allocated by the chunk. Should be called when memory
// chunk is about to be freed.
@@ -373,30 +283,22 @@ class MemoryChunk : public BasicMemoryChunk {
void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
PageAllocator::Permission permission);
- VirtualMemory* reserved_memory() { return &reservation_; }
-
- template <AccessMode mode>
- ConcurrentBitmap<mode>* marking_bitmap() const {
- return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
- }
-
template <AccessMode mode>
ConcurrentBitmap<mode>* young_generation_bitmap() const {
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
}
- // If the chunk needs to remember its memory reservation, it is stored here.
- VirtualMemory reservation_;
-
- // The space owning this memory chunk.
- std::atomic<Space*> owner_;
+ // A single slot set for small pages (of size kPageSize) or an array of slot
+ // set for large pages. In the latter case the number of entries in the array
+ // is ceil(size() / kPageSize).
+ SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
std::atomic<size_t> progress_bar_;
// Count of bytes marked black on page.
- intptr_t live_byte_count_;
+ std::atomic<intptr_t> live_byte_count_;
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
@@ -405,10 +307,6 @@ class MemoryChunk : public BasicMemoryChunk {
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
- // Assuming the initial allocation on a page is sequential,
- // count highest number of bytes ever allocated on the page.
- std::atomic<intptr_t> high_water_mark_;
-
base::Mutex* mutex_;
std::atomic<ConcurrentSweepingState> concurrent_sweeping_;
@@ -429,16 +327,9 @@ class MemoryChunk : public BasicMemoryChunk {
// counter.
uintptr_t write_unprotect_counter_;
- // Byte allocated on the page, which includes all objects on the page
- // and the linear allocation area.
- size_t allocated_bytes_;
-
// Tracks off-heap memory used by this memory chunk.
std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
- // Freed memory that was not added to the free list.
- size_t wasted_memory_;
-
heap::ListNode<MemoryChunk> list_node_;
FreeListCategory** categories_;
@@ -453,8 +344,6 @@ class MemoryChunk : public BasicMemoryChunk {
PossiblyEmptyBuckets possibly_empty_buckets_;
private:
- void InitializeReservedMemory() { reservation_.Reset(); }
-
friend class ConcurrentMarkingState;
friend class MajorMarkingState;
friend class MajorAtomicMarkingState;
diff --git a/chromium/v8/src/heap/memory-measurement.cc b/chromium/v8/src/heap/memory-measurement.cc
index 2a59ac5a4d1..e3661da45ab 100644
--- a/chromium/v8/src/heap/memory-measurement.cc
+++ b/chromium/v8/src/heap/memory-measurement.cc
@@ -95,7 +95,7 @@ class V8_EXPORT_PRIVATE MeasureMemoryDelegate
public:
MeasureMemoryDelegate(Isolate* isolate, Handle<NativeContext> context,
Handle<JSPromise> promise, v8::MeasureMemoryMode mode);
- ~MeasureMemoryDelegate();
+ ~MeasureMemoryDelegate() override;
// v8::MeasureMemoryDelegate overrides:
bool ShouldMeasure(v8::Local<v8::Context> context) override;
@@ -165,7 +165,12 @@ void MeasureMemoryDelegate::MeasurementComplete(
JSPromise::Resolve(promise_, result).ToHandleChecked();
}
-MemoryMeasurement::MemoryMeasurement(Isolate* isolate) : isolate_(isolate) {}
+MemoryMeasurement::MemoryMeasurement(Isolate* isolate)
+ : isolate_(isolate), random_number_generator_() {
+ if (FLAG_random_seed) {
+ random_number_generator_.SetSeed(FLAG_random_seed);
+ }
+}
bool MemoryMeasurement::EnqueueRequest(
std::unique_ptr<v8::MeasureMemoryDelegate> delegate,
@@ -286,10 +291,15 @@ void MemoryMeasurement::ScheduleGCTask(v8::MeasureMemoryExecution execution) {
if (execution == v8::MeasureMemoryExecution::kEager) {
taskrunner->PostTask(std::move(task));
} else {
- taskrunner->PostDelayedTask(std::move(task), kGCTaskDelayInSeconds);
+ taskrunner->PostDelayedTask(std::move(task), NextGCTaskDelayInSeconds());
}
}
+int MemoryMeasurement::NextGCTaskDelayInSeconds() {
+ return kGCTaskDelayInSeconds +
+ random_number_generator_.NextInt(kGCTaskDelayInSeconds);
+}
+
void MemoryMeasurement::ReportResults() {
while (!done_.empty()) {
Request request = std::move(done_.front());
diff --git a/chromium/v8/src/heap/memory-measurement.h b/chromium/v8/src/heap/memory-measurement.h
index d72dd1eba97..e71bdc1cfe8 100644
--- a/chromium/v8/src/heap/memory-measurement.h
+++ b/chromium/v8/src/heap/memory-measurement.h
@@ -9,6 +9,7 @@
#include <unordered_map>
#include "src/base/platform/elapsed-timer.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/common/globals.h"
#include "src/objects/contexts.h"
#include "src/objects/map.h"
@@ -49,6 +50,7 @@ class MemoryMeasurement {
bool IsGCTaskPending(v8::MeasureMemoryExecution execution);
void SetGCTaskPending(v8::MeasureMemoryExecution execution);
void SetGCTaskDone(v8::MeasureMemoryExecution execution);
+ int NextGCTaskDelayInSeconds();
std::list<Request> received_;
std::list<Request> processing_;
@@ -57,6 +59,7 @@ class MemoryMeasurement {
bool reporting_task_pending_ = false;
bool delayed_gc_task_pending_ = false;
bool eager_gc_task_pending_ = false;
+ base::RandomNumberGenerator random_number_generator_;
};
// Infers the native context for some of the heap objects.
diff --git a/chromium/v8/src/heap/new-spaces-inl.h b/chromium/v8/src/heap/new-spaces-inl.h
new file mode 100644
index 00000000000..8020c0dfddb
--- /dev/null
+++ b/chromium/v8/src/heap/new-spaces-inl.h
@@ -0,0 +1,179 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_NEW_SPACES_INL_H_
+#define V8_HEAP_NEW_SPACES_INL_H_
+
+#include "src/heap/new-spaces.h"
+#include "src/heap/spaces-inl.h"
+#include "src/objects/tagged-impl.h"
+#include "src/sanitizer/msan.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// SemiSpace
+
+bool SemiSpace::Contains(HeapObject o) const {
+ BasicMemoryChunk* memory_chunk = BasicMemoryChunk::FromHeapObject(o);
+ if (memory_chunk->IsLargePage()) return false;
+ return id_ == kToSpace ? memory_chunk->IsToPage()
+ : memory_chunk->IsFromPage();
+}
+
+bool SemiSpace::Contains(Object o) const {
+ return o.IsHeapObject() && Contains(HeapObject::cast(o));
+}
+
+bool SemiSpace::ContainsSlow(Address a) const {
+ for (const Page* p : *this) {
+ if (p == BasicMemoryChunk::FromAddress(a)) return true;
+ }
+ return false;
+}
+
+// --------------------------------------------------------------------------
+// NewSpace
+
+bool NewSpace::Contains(Object o) const {
+ return o.IsHeapObject() && Contains(HeapObject::cast(o));
+}
+
+bool NewSpace::Contains(HeapObject o) const {
+ return BasicMemoryChunk::FromHeapObject(o)->InNewSpace();
+}
+
+bool NewSpace::ContainsSlow(Address a) const {
+ return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
+}
+
+bool NewSpace::ToSpaceContainsSlow(Address a) const {
+ return to_space_.ContainsSlow(a);
+}
+
+bool NewSpace::ToSpaceContains(Object o) const { return to_space_.Contains(o); }
+bool NewSpace::FromSpaceContains(Object o) const {
+ return from_space_.Contains(o);
+}
+
+// -----------------------------------------------------------------------------
+// SemiSpaceObjectIterator
+
+HeapObject SemiSpaceObjectIterator::Next() {
+ while (current_ != limit_) {
+ if (Page::IsAlignedToPageSize(current_)) {
+ Page* page = Page::FromAllocationAreaAddress(current_);
+ page = page->next_page();
+ DCHECK(page);
+ current_ = page->area_start();
+ if (current_ == limit_) return HeapObject();
+ }
+ HeapObject object = HeapObject::FromAddress(current_);
+ current_ += object.Size();
+ if (!object.IsFreeSpaceOrFiller()) {
+ return object;
+ }
+ }
+ return HeapObject();
+}
+
+// -----------------------------------------------------------------------------
+// NewSpace
+
+AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ Address top = allocation_info_.top();
+ int filler_size = Heap::GetFillToAlign(top, alignment);
+ int aligned_size_in_bytes = size_in_bytes + filler_size;
+
+ if (allocation_info_.limit() - top <
+ static_cast<uintptr_t>(aligned_size_in_bytes)) {
+ // See if we can create room.
+ if (!EnsureAllocation(size_in_bytes, alignment)) {
+ return AllocationResult::Retry();
+ }
+
+ top = allocation_info_.top();
+ filler_size = Heap::GetFillToAlign(top, alignment);
+ aligned_size_in_bytes = size_in_bytes + filler_size;
+ }
+
+ HeapObject obj = HeapObject::FromAddress(top);
+ allocation_info_.set_top(top + aligned_size_in_bytes);
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+ if (filler_size > 0) {
+ obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
+ }
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
+ return obj;
+}
+
+AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
+ AllocationOrigin origin) {
+ Address top = allocation_info_.top();
+ if (allocation_info_.limit() < top + size_in_bytes) {
+ // See if we can create room.
+ if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
+ return AllocationResult::Retry();
+ }
+
+ top = allocation_info_.top();
+ }
+
+ HeapObject obj = HeapObject::FromAddress(top);
+ allocation_info_.set_top(top + size_in_bytes);
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
+ return obj;
+}
+
+AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ if (top() < top_on_previous_step_) {
+ // Generated code decreased the top() pointer to do folded allocations
+ DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
+ Page::FromAllocationAreaAddress(top_on_previous_step_));
+ top_on_previous_step_ = top();
+ }
+#ifdef V8_HOST_ARCH_32_BIT
+ return alignment != kWordAligned
+ ? AllocateRawAligned(size_in_bytes, alignment, origin)
+ : AllocateRawUnaligned(size_in_bytes, origin);
+#else
+#ifdef V8_COMPRESS_POINTERS
+ // TODO(ishell, v8:8875): Consider using aligned allocations once the
+ // allocation alignment inconsistency is fixed. For now we keep using
+ // unaligned access since both x64 and arm64 architectures (where pointer
+ // compression is supported) allow unaligned access to doubles and full words.
+#endif // V8_COMPRESS_POINTERS
+ return AllocateRawUnaligned(size_in_bytes, origin);
+#endif
+}
+
+V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
+ int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
+ base::MutexGuard guard(&mutex_);
+ return AllocateRaw(size_in_bytes, alignment, origin);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_NEW_SPACES_INL_H_
diff --git a/chromium/v8/src/heap/new-spaces.cc b/chromium/v8/src/heap/new-spaces.cc
new file mode 100644
index 00000000000..4b4b04a1111
--- /dev/null
+++ b/chromium/v8/src/heap/new-spaces.cc
@@ -0,0 +1,653 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/new-spaces.h"
+
+#include "src/heap/array-buffer-sweeper.h"
+#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/memory-allocator.h"
+#include "src/heap/paged-spaces.h"
+#include "src/heap/spaces-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
+ bool in_to_space = (id() != kFromSpace);
+ chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
+ Page* page = static_cast<Page*>(chunk);
+ page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ page->AllocateLocalTracker();
+ page->list_node().Initialize();
+#ifdef ENABLE_MINOR_MC
+ if (FLAG_minor_mc) {
+ page->AllocateYoungGenerationBitmap();
+ heap()
+ ->minor_mark_compact_collector()
+ ->non_atomic_marking_state()
+ ->ClearLiveness(page);
+ }
+#endif // ENABLE_MINOR_MC
+ page->InitializationMemoryFence();
+ return page;
+}
+
+bool SemiSpace::EnsureCurrentCapacity() {
+ if (is_committed()) {
+ const int expected_pages =
+ static_cast<int>(current_capacity_ / Page::kPageSize);
+ MemoryChunk* current_page = first_page();
+ int actual_pages = 0;
+
+ // First iterate through the pages list until expected pages if so many
+ // pages exist.
+ while (current_page != nullptr && actual_pages < expected_pages) {
+ actual_pages++;
+ current_page = current_page->list_node().next();
+ }
+
+ // Free all overallocated pages which are behind current_page.
+ while (current_page) {
+ MemoryChunk* next_current = current_page->list_node().next();
+ memory_chunk_list_.Remove(current_page);
+ // Clear new space flags to avoid this page being treated as a new
+ // space page that is potentially being swept.
+ current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
+ current_page);
+ current_page = next_current;
+ }
+
+ // Add more pages if we have less than expected_pages.
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ while (actual_pages < expected_pages) {
+ actual_pages++;
+ current_page =
+ heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
+ if (current_page == nullptr) return false;
+ DCHECK_NOT_NULL(current_page);
+ memory_chunk_list_.PushBack(current_page);
+ marking_state->ClearLiveness(current_page);
+ current_page->SetFlags(first_page()->GetFlags(),
+ static_cast<uintptr_t>(Page::kCopyAllFlags));
+ heap()->CreateFillerObjectAt(current_page->area_start(),
+ static_cast<int>(current_page->area_size()),
+ ClearRecordedSlots::kNo);
+ }
+ }
+ return true;
+}
+
+// -----------------------------------------------------------------------------
+// SemiSpace implementation
+
+void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
+ DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
+ minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
+ current_capacity_ = minimum_capacity_;
+ maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
+ committed_ = false;
+}
+
+void SemiSpace::TearDown() {
+ // Properly uncommit memory to keep the allocator counters in sync.
+ if (is_committed()) {
+ Uncommit();
+ }
+ current_capacity_ = maximum_capacity_ = 0;
+}
+
+bool SemiSpace::Commit() {
+ DCHECK(!is_committed());
+ const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
+ for (int pages_added = 0; pages_added < num_pages; pages_added++) {
+ // Pages in the new spaces can be moved to the old space by the full
+ // collector. Therefore, they must be initialized with the same FreeList as
+ // old pages.
+ Page* new_page =
+ heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
+ if (new_page == nullptr) {
+ if (pages_added) RewindPages(pages_added);
+ return false;
+ }
+ memory_chunk_list_.PushBack(new_page);
+ }
+ Reset();
+ AccountCommitted(current_capacity_);
+ if (age_mark_ == kNullAddress) {
+ age_mark_ = first_page()->area_start();
+ }
+ committed_ = true;
+ return true;
+}
+
+bool SemiSpace::Uncommit() {
+ DCHECK(is_committed());
+ while (!memory_chunk_list_.Empty()) {
+ MemoryChunk* chunk = memory_chunk_list_.front();
+ memory_chunk_list_.Remove(chunk);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
+ }
+ current_page_ = nullptr;
+ AccountUncommitted(current_capacity_);
+ committed_ = false;
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
+ return true;
+}
+
+size_t SemiSpace::CommittedPhysicalMemory() {
+ if (!is_committed()) return 0;
+ size_t size = 0;
+ for (Page* p : *this) {
+ size += p->CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+bool SemiSpace::GrowTo(size_t new_capacity) {
+ if (!is_committed()) {
+ if (!Commit()) return false;
+ }
+ DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
+ DCHECK_LE(new_capacity, maximum_capacity_);
+ DCHECK_GT(new_capacity, current_capacity_);
+ const size_t delta = new_capacity - current_capacity_;
+ DCHECK(IsAligned(delta, AllocatePageSize()));
+ const int delta_pages = static_cast<int>(delta / Page::kPageSize);
+ DCHECK(last_page());
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
+ Page* new_page =
+ heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
+ if (new_page == nullptr) {
+ if (pages_added) RewindPages(pages_added);
+ return false;
+ }
+ memory_chunk_list_.PushBack(new_page);
+ marking_state->ClearLiveness(new_page);
+ // Duplicate the flags that was set on the old page.
+ new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
+ }
+ AccountCommitted(delta);
+ current_capacity_ = new_capacity;
+ return true;
+}
+
+void SemiSpace::RewindPages(int num_pages) {
+ DCHECK_GT(num_pages, 0);
+ DCHECK(last_page());
+ while (num_pages > 0) {
+ MemoryChunk* last = last_page();
+ memory_chunk_list_.Remove(last);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
+ num_pages--;
+ }
+}
+
+bool SemiSpace::ShrinkTo(size_t new_capacity) {
+ DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
+ DCHECK_GE(new_capacity, minimum_capacity_);
+ DCHECK_LT(new_capacity, current_capacity_);
+ if (is_committed()) {
+ const size_t delta = current_capacity_ - new_capacity;
+ DCHECK(IsAligned(delta, Page::kPageSize));
+ int delta_pages = static_cast<int>(delta / Page::kPageSize);
+ RewindPages(delta_pages);
+ AccountUncommitted(delta);
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
+ }
+ current_capacity_ = new_capacity;
+ return true;
+}
+
+void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
+ for (Page* page : *this) {
+ page->set_owner(this);
+ page->SetFlags(flags, mask);
+ if (id_ == kToSpace) {
+ page->ClearFlag(MemoryChunk::FROM_PAGE);
+ page->SetFlag(MemoryChunk::TO_PAGE);
+ page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+ heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
+ page, 0);
+ } else {
+ page->SetFlag(MemoryChunk::FROM_PAGE);
+ page->ClearFlag(MemoryChunk::TO_PAGE);
+ }
+ DCHECK(page->InYoungGeneration());
+ }
+}
+
+void SemiSpace::Reset() {
+ DCHECK(first_page());
+ DCHECK(last_page());
+ current_page_ = first_page();
+ pages_used_ = 0;
+}
+
+void SemiSpace::RemovePage(Page* page) {
+ if (current_page_ == page) {
+ if (page->prev_page()) {
+ current_page_ = page->prev_page();
+ }
+ }
+ memory_chunk_list_.Remove(page);
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
+}
+
+void SemiSpace::PrependPage(Page* page) {
+ page->SetFlags(current_page()->GetFlags(),
+ static_cast<uintptr_t>(Page::kCopyAllFlags));
+ page->set_owner(this);
+ memory_chunk_list_.PushFront(page);
+ pages_used_++;
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
+}
+
+void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
+ // We won't be swapping semispaces without data in them.
+ DCHECK(from->first_page());
+ DCHECK(to->first_page());
+
+ intptr_t saved_to_space_flags = to->current_page()->GetFlags();
+
+ // We swap all properties but id_.
+ std::swap(from->current_capacity_, to->current_capacity_);
+ std::swap(from->maximum_capacity_, to->maximum_capacity_);
+ std::swap(from->minimum_capacity_, to->minimum_capacity_);
+ std::swap(from->age_mark_, to->age_mark_);
+ std::swap(from->committed_, to->committed_);
+ std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
+ std::swap(from->current_page_, to->current_page_);
+ std::swap(from->external_backing_store_bytes_,
+ to->external_backing_store_bytes_);
+
+ to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
+ from->FixPagesFlags(0, 0);
+}
+
+void SemiSpace::set_age_mark(Address mark) {
+ DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
+ age_mark_ = mark;
+ // Mark all pages up to the one containing mark.
+ for (Page* p : PageRange(space_start(), mark)) {
+ p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+ }
+}
+
+std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator(Heap* heap) {
+ // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
+ UNREACHABLE();
+}
+
+#ifdef DEBUG
+void SemiSpace::Print() {}
+#endif
+
+#ifdef VERIFY_HEAP
+void SemiSpace::Verify() {
+ bool is_from_space = (id_ == kFromSpace);
+ size_t external_backing_store_bytes[kNumTypes];
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ for (Page* page : *this) {
+ CHECK_EQ(page->owner(), this);
+ CHECK(page->InNewSpace());
+ CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
+ : MemoryChunk::TO_PAGE));
+ CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
+ : MemoryChunk::FROM_PAGE));
+ CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
+ if (!is_from_space) {
+ // The pointers-from-here-are-interesting flag isn't updated dynamically
+ // on from-space pages, so it might be out of sync with the marking state.
+ if (page->heap()->incremental_marking()->IsMarking()) {
+ CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+ } else {
+ CHECK(
+ !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+ }
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
+ }
+
+ CHECK_IMPLIES(page->list_node().prev(),
+ page->list_node().prev()->list_node().next() == page);
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
+ }
+}
+#endif
+
+#ifdef DEBUG
+void SemiSpace::AssertValidRange(Address start, Address end) {
+ // Addresses belong to same semi-space
+ Page* page = Page::FromAllocationAreaAddress(start);
+ Page* end_page = Page::FromAllocationAreaAddress(end);
+ SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
+ DCHECK_EQ(space, end_page->owner());
+ // Start address is before end address, either on same page,
+ // or end address is on a later page in the linked list of
+ // semi-space pages.
+ if (page == end_page) {
+ DCHECK_LE(start, end);
+ } else {
+ while (page != end_page) {
+ page = page->next_page();
+ }
+ DCHECK(page);
+ }
+}
+#endif
+
+// -----------------------------------------------------------------------------
+// SemiSpaceObjectIterator implementation.
+
+SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) {
+ Initialize(space->first_allocatable_address(), space->top());
+}
+
+void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
+ SemiSpace::AssertValidRange(start, end);
+ current_ = start;
+ limit_ = end;
+}
+
+size_t NewSpace::CommittedPhysicalMemory() {
+ if (!base::OS::HasLazyCommits()) return CommittedMemory();
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ size_t size = to_space_.CommittedPhysicalMemory();
+ if (from_space_.is_committed()) {
+ size += from_space_.CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+// -----------------------------------------------------------------------------
+// NewSpace implementation
+
+NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
+ size_t initial_semispace_capacity,
+ size_t max_semispace_capacity)
+ : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
+ to_space_(heap, kToSpace),
+ from_space_(heap, kFromSpace) {
+ DCHECK(initial_semispace_capacity <= max_semispace_capacity);
+
+ to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
+ from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
+ if (!to_space_.Commit()) {
+ V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
+ }
+ DCHECK(!from_space_.is_committed()); // No need to use memory yet.
+ ResetLinearAllocationArea();
+}
+
+void NewSpace::TearDown() {
+ allocation_info_.Reset(kNullAddress, kNullAddress);
+
+ to_space_.TearDown();
+ from_space_.TearDown();
+}
+
+void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
+
+void NewSpace::Grow() {
+ // Double the semispace size but only up to maximum capacity.
+ DCHECK(TotalCapacity() < MaximumCapacity());
+ size_t new_capacity =
+ Min(MaximumCapacity(),
+ static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
+ if (to_space_.GrowTo(new_capacity)) {
+ // Only grow from space if we managed to grow to-space.
+ if (!from_space_.GrowTo(new_capacity)) {
+ // If we managed to grow to-space but couldn't grow from-space,
+ // attempt to shrink to-space.
+ if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
+ // We are in an inconsistent state because we could not
+ // commit/uncommit memory from new space.
+ FATAL("inconsistent state");
+ }
+ }
+ }
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+void NewSpace::Shrink() {
+ size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
+ size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
+ if (rounded_new_capacity < TotalCapacity() &&
+ to_space_.ShrinkTo(rounded_new_capacity)) {
+ // Only shrink from-space if we managed to shrink to-space.
+ from_space_.Reset();
+ if (!from_space_.ShrinkTo(rounded_new_capacity)) {
+ // If we managed to shrink to-space but couldn't shrink from
+ // space, attempt to grow to-space again.
+ if (!to_space_.GrowTo(from_space_.current_capacity())) {
+ // We are in an inconsistent state because we could not
+ // commit/uncommit memory from new space.
+ FATAL("inconsistent state");
+ }
+ }
+ }
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+bool NewSpace::Rebalance() {
+ // Order here is important to make use of the page pool.
+ return to_space_.EnsureCurrentCapacity() &&
+ from_space_.EnsureCurrentCapacity();
+}
+
+void NewSpace::UpdateLinearAllocationArea() {
+ // Make sure there is no unaccounted allocations.
+ DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
+
+ Address new_top = to_space_.page_low();
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.Reset(new_top, to_space_.page_high());
+ // The order of the following two stores is important.
+ // See the corresponding loads in ConcurrentMarking::Run.
+ original_limit_.store(limit(), std::memory_order_relaxed);
+ original_top_.store(top(), std::memory_order_release);
+ StartNextInlineAllocationStep();
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+void NewSpace::ResetLinearAllocationArea() {
+ // Do a step to account for memory allocated so far before resetting.
+ InlineAllocationStep(top(), top(), kNullAddress, 0);
+ to_space_.Reset();
+ UpdateLinearAllocationArea();
+ // Clear all mark-bits in the to-space.
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ for (Page* p : to_space_) {
+ marking_state->ClearLiveness(p);
+ // Concurrent marking may have local live bytes for this page.
+ heap()->concurrent_marking()->ClearMemoryChunkData(p);
+ }
+}
+
+void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
+ Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
+ allocation_info_.set_limit(new_limit);
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+bool NewSpace::AddFreshPage() {
+ Address top = allocation_info_.top();
+ DCHECK(!OldSpace::IsAtPageStart(top));
+
+ // Do a step to account for memory allocated on previous page.
+ InlineAllocationStep(top, top, kNullAddress, 0);
+
+ if (!to_space_.AdvancePage()) {
+ // No more pages left to advance.
+ return false;
+ }
+
+ // Clear remainder of current page.
+ Address limit = Page::FromAllocationAreaAddress(top)->area_end();
+ int remaining_in_page = static_cast<int>(limit - top);
+ heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
+ UpdateLinearAllocationArea();
+
+ return true;
+}
+
+bool NewSpace::AddFreshPageSynchronized() {
+ base::MutexGuard guard(&mutex_);
+ return AddFreshPage();
+}
+
+bool NewSpace::EnsureAllocation(int size_in_bytes,
+ AllocationAlignment alignment) {
+ Address old_top = allocation_info_.top();
+ Address high = to_space_.page_high();
+ int filler_size = Heap::GetFillToAlign(old_top, alignment);
+ int aligned_size_in_bytes = size_in_bytes + filler_size;
+
+ if (old_top + aligned_size_in_bytes > high) {
+ // Not enough room in the page, try to allocate a new one.
+ if (!AddFreshPage()) {
+ return false;
+ }
+
+ old_top = allocation_info_.top();
+ high = to_space_.page_high();
+ filler_size = Heap::GetFillToAlign(old_top, alignment);
+ }
+
+ DCHECK(old_top + aligned_size_in_bytes <= high);
+
+ if (allocation_info_.limit() < high) {
+ // Either the limit has been lowered because linear allocation was disabled
+ // or because incremental marking wants to get a chance to do a step,
+ // or because idle scavenge job wants to get a chance to post a task.
+ // Set the new limit accordingly.
+ Address new_top = old_top + aligned_size_in_bytes;
+ Address soon_object = old_top + filler_size;
+ InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
+ UpdateInlineAllocationLimit(aligned_size_in_bytes);
+ }
+ return true;
+}
+
+std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
+ return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
+}
+
+#ifdef VERIFY_HEAP
+// We do not use the SemiSpaceObjectIterator because verification doesn't assume
+// that it works (it depends on the invariants we are checking).
+void NewSpace::Verify(Isolate* isolate) {
+ // The allocation pointer should be in the space or at the very end.
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+ // There should be objects packed in from the low address up to the
+ // allocation pointer.
+ Address current = to_space_.first_page()->area_start();
+ CHECK_EQ(current, to_space_.space_start());
+
+ size_t external_space_bytes[kNumTypes];
+ for (int i = 0; i < kNumTypes; i++) {
+ external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ while (current != top()) {
+ if (!Page::IsAlignedToPageSize(current)) {
+ // The allocation pointer should not be in the middle of an object.
+ CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
+ current < top());
+
+ HeapObject object = HeapObject::FromAddress(current);
+
+ // The first word should be a map, and we expect all map pointers to
+ // be in map space or read-only space.
+ Map map = object.map();
+ CHECK(map.IsMap());
+ CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
+
+ // The object should not be code or a map.
+ CHECK(!object.IsMap());
+ CHECK(!object.IsAbstractCode());
+
+ // The object itself should look OK.
+ object.ObjectVerify(isolate);
+
+ // All the interior pointers should be contained in the heap.
+ VerifyPointersVisitor visitor(heap());
+ int size = object.Size();
+ object.IterateBody(map, size, &visitor);
+
+ if (object.IsExternalString()) {
+ ExternalString external_string = ExternalString::cast(object);
+ size_t size = external_string.ExternalPayloadSize();
+ external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
+ } else if (object.IsJSArrayBuffer()) {
+ JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
+ if (ArrayBufferTracker::IsTracked(array_buffer)) {
+ size_t size = ArrayBufferTracker::Lookup(heap(), array_buffer)
+ ->PerIsolateAccountingLength();
+ external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
+ }
+ }
+
+ current += size;
+ } else {
+ // At end of page, switch to next page.
+ Page* page = Page::FromAllocationAreaAddress(current)->next_page();
+ current = page->area_start();
+ }
+ }
+
+ for (int i = 0; i < kNumTypes; i++) {
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
+ i == ExternalBackingStoreType::kArrayBuffer)
+ continue;
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
+ }
+
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
+ size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
+ CHECK_EQ(bytes,
+ ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
+ }
+
+ // Check semi-spaces.
+ CHECK_EQ(from_space_.id(), kFromSpace);
+ CHECK_EQ(to_space_.id(), kToSpace);
+ from_space_.Verify();
+ to_space_.Verify();
+}
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/new-spaces.h b/chromium/v8/src/heap/new-spaces.h
new file mode 100644
index 00000000000..73613152fa0
--- /dev/null
+++ b/chromium/v8/src/heap/new-spaces.h
@@ -0,0 +1,501 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_NEW_SPACES_H_
+#define V8_HEAP_NEW_SPACES_H_
+
+#include <atomic>
+#include <memory>
+
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/heap/heap.h"
+#include "src/heap/spaces.h"
+#include "src/logging/log.h"
+#include "src/objects/heap-object.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class MemoryChunk;
+
+enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
+
+// -----------------------------------------------------------------------------
+// SemiSpace in young generation
+//
+// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
+// The mark-compact collector uses the memory of the first page in the from
+// space as a marking stack when tracing live objects.
+class SemiSpace : public Space {
+ public:
+ using iterator = PageIterator;
+ using const_iterator = ConstPageIterator;
+
+ static void Swap(SemiSpace* from, SemiSpace* to);
+
+ SemiSpace(Heap* heap, SemiSpaceId semispace)
+ : Space(heap, NEW_SPACE, new NoFreeList()),
+ current_capacity_(0),
+ maximum_capacity_(0),
+ minimum_capacity_(0),
+ age_mark_(kNullAddress),
+ committed_(false),
+ id_(semispace),
+ current_page_(nullptr),
+ pages_used_(0) {}
+
+ inline bool Contains(HeapObject o) const;
+ inline bool Contains(Object o) const;
+ inline bool ContainsSlow(Address a) const;
+
+ void SetUp(size_t initial_capacity, size_t maximum_capacity);
+ void TearDown();
+
+ bool Commit();
+ bool Uncommit();
+ bool is_committed() { return committed_; }
+
+ // Grow the semispace to the new capacity. The new capacity requested must
+ // be larger than the current capacity and less than the maximum capacity.
+ bool GrowTo(size_t new_capacity);
+
+ // Shrinks the semispace to the new capacity. The new capacity requested
+ // must be more than the amount of used memory in the semispace and less
+ // than the current capacity.
+ bool ShrinkTo(size_t new_capacity);
+
+ bool EnsureCurrentCapacity();
+
+ Address space_end() { return memory_chunk_list_.back()->area_end(); }
+
+ // Returns the start address of the first page of the space.
+ Address space_start() {
+ DCHECK_NE(memory_chunk_list_.front(), nullptr);
+ return memory_chunk_list_.front()->area_start();
+ }
+
+ Page* current_page() { return current_page_; }
+ int pages_used() { return pages_used_; }
+
+ // Returns the start address of the current page of the space.
+ Address page_low() { return current_page_->area_start(); }
+
+ // Returns one past the end address of the current page of the space.
+ Address page_high() { return current_page_->area_end(); }
+
+ bool AdvancePage() {
+ Page* next_page = current_page_->next_page();
+ // We cannot expand if we reached the maximum number of pages already. Note
+ // that we need to account for the next page already for this check as we
+ // could potentially fill the whole page after advancing.
+ const bool reached_max_pages = (pages_used_ + 1) == max_pages();
+ if (next_page == nullptr || reached_max_pages) {
+ return false;
+ }
+ current_page_ = next_page;
+ pages_used_++;
+ return true;
+ }
+
+ // Resets the space to using the first page.
+ void Reset();
+
+ void RemovePage(Page* page);
+ void PrependPage(Page* page);
+
+ Page* InitializePage(MemoryChunk* chunk);
+
+ // Age mark accessors.
+ Address age_mark() { return age_mark_; }
+ void set_age_mark(Address mark);
+
+ // Returns the current capacity of the semispace.
+ size_t current_capacity() { return current_capacity_; }
+
+ // Returns the maximum capacity of the semispace.
+ size_t maximum_capacity() { return maximum_capacity_; }
+
+ // Returns the initial capacity of the semispace.
+ size_t minimum_capacity() { return minimum_capacity_; }
+
+ SemiSpaceId id() { return id_; }
+
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory() override;
+
+ // If we don't have these here then SemiSpace will be abstract. However
+ // they should never be called:
+
+ size_t Size() override { UNREACHABLE(); }
+
+ size_t SizeOfObjects() override { return Size(); }
+
+ size_t Available() override { UNREACHABLE(); }
+
+ Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
+ Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
+
+ const Page* first_page() const {
+ return reinterpret_cast<const Page*>(Space::first_page());
+ }
+ const Page* last_page() const {
+ return reinterpret_cast<const Page*>(Space::last_page());
+ }
+
+ iterator begin() { return iterator(first_page()); }
+ iterator end() { return iterator(nullptr); }
+
+ const_iterator begin() const { return const_iterator(first_page()); }
+ const_iterator end() const { return const_iterator(nullptr); }
+
+ std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
+
+#ifdef DEBUG
+ V8_EXPORT_PRIVATE void Print() override;
+ // Validate a range of of addresses in a SemiSpace.
+ // The "from" address must be on a page prior to the "to" address,
+ // in the linked page order, or it must be earlier on the same page.
+ static void AssertValidRange(Address from, Address to);
+#else
+ // Do nothing.
+ inline static void AssertValidRange(Address from, Address to) {}
+#endif
+
+#ifdef VERIFY_HEAP
+ virtual void Verify();
+#endif
+
+ private:
+ void RewindPages(int num_pages);
+
+ inline int max_pages() {
+ return static_cast<int>(current_capacity_ / Page::kPageSize);
+ }
+
+ // Copies the flags into the masked positions on all pages in the space.
+ void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
+
+ // The currently committed space capacity.
+ size_t current_capacity_;
+
+ // The maximum capacity that can be used by this space. A space cannot grow
+ // beyond that size.
+ size_t maximum_capacity_;
+
+ // The minimum capacity for the space. A space cannot shrink below this size.
+ size_t minimum_capacity_;
+
+ // Used to govern object promotion during mark-compact collection.
+ Address age_mark_;
+
+ bool committed_;
+ SemiSpaceId id_;
+
+ Page* current_page_;
+
+ int pages_used_;
+
+ friend class NewSpace;
+ friend class SemiSpaceObjectIterator;
+};
+
+// A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
+// semispace of the heap's new space. It iterates over the objects in the
+// semispace from a given start address (defaulting to the bottom of the
+// semispace) to the top of the semispace. New objects allocated after the
+// iterator is created are not iterated.
+class SemiSpaceObjectIterator : public ObjectIterator {
+ public:
+ // Create an iterator over the allocated objects in the given to-space.
+ explicit SemiSpaceObjectIterator(NewSpace* space);
+
+ inline HeapObject Next() override;
+
+ private:
+ void Initialize(Address start, Address end);
+
+ // The current iteration point.
+ Address current_;
+ // The end of iteration.
+ Address limit_;
+};
+
+// -----------------------------------------------------------------------------
+// The young generation space.
+//
+// The new space consists of a contiguous pair of semispaces. It simply
+// forwards most functions to the appropriate semispace.
+
+class V8_EXPORT_PRIVATE NewSpace
+ : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
+ public:
+ using iterator = PageIterator;
+ using const_iterator = ConstPageIterator;
+
+ NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
+ size_t initial_semispace_capacity, size_t max_semispace_capacity);
+
+ ~NewSpace() override { TearDown(); }
+
+ inline bool ContainsSlow(Address a) const;
+ inline bool Contains(Object o) const;
+ inline bool Contains(HeapObject o) const;
+
+ // Tears down the space. Heap memory was not allocated by the space, so it
+ // is not deallocated here.
+ void TearDown();
+
+ // Flip the pair of spaces.
+ void Flip();
+
+ // Grow the capacity of the semispaces. Assumes that they are not at
+ // their maximum capacity.
+ void Grow();
+
+ // Shrink the capacity of the semispaces.
+ void Shrink();
+
+ // Return the allocated bytes in the active semispace.
+ size_t Size() final {
+ DCHECK_GE(top(), to_space_.page_low());
+ return to_space_.pages_used() *
+ MemoryChunkLayout::AllocatableMemoryInDataPage() +
+ static_cast<size_t>(top() - to_space_.page_low());
+ }
+
+ size_t SizeOfObjects() final { return Size(); }
+
+ // Return the allocatable capacity of a semispace.
+ size_t Capacity() {
+ SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
+ return (to_space_.current_capacity() / Page::kPageSize) *
+ MemoryChunkLayout::AllocatableMemoryInDataPage();
+ }
+
+ // Return the current size of a semispace, allocatable and non-allocatable
+ // memory.
+ size_t TotalCapacity() {
+ DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
+ return to_space_.current_capacity();
+ }
+
+ // Committed memory for NewSpace is the committed memory of both semi-spaces
+ // combined.
+ size_t CommittedMemory() final {
+ return from_space_.CommittedMemory() + to_space_.CommittedMemory();
+ }
+
+ size_t MaximumCommittedMemory() final {
+ return from_space_.MaximumCommittedMemory() +
+ to_space_.MaximumCommittedMemory();
+ }
+
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory() final;
+
+ // Return the available bytes without growing.
+ size_t Available() final {
+ DCHECK_GE(Capacity(), Size());
+ return Capacity() - Size();
+ }
+
+ size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
+ type == ExternalBackingStoreType::kArrayBuffer)
+ return heap()->YoungArrayBufferBytes();
+ DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
+ return to_space_.ExternalBackingStoreBytes(type);
+ }
+
+ size_t ExternalBackingStoreBytes() {
+ size_t result = 0;
+ for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ result +=
+ ExternalBackingStoreBytes(static_cast<ExternalBackingStoreType>(i));
+ }
+ return result;
+ }
+
+ size_t AllocatedSinceLastGC() {
+ const Address age_mark = to_space_.age_mark();
+ DCHECK_NE(age_mark, kNullAddress);
+ DCHECK_NE(top(), kNullAddress);
+ Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
+ Page* const last_page = Page::FromAllocationAreaAddress(top());
+ Page* current_page = age_mark_page;
+ size_t allocated = 0;
+ if (current_page != last_page) {
+ DCHECK_EQ(current_page, age_mark_page);
+ DCHECK_GE(age_mark_page->area_end(), age_mark);
+ allocated += age_mark_page->area_end() - age_mark;
+ current_page = current_page->next_page();
+ } else {
+ DCHECK_GE(top(), age_mark);
+ return top() - age_mark;
+ }
+ while (current_page != last_page) {
+ DCHECK_NE(current_page, age_mark_page);
+ allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
+ current_page = current_page->next_page();
+ }
+ DCHECK_GE(top(), current_page->area_start());
+ allocated += top() - current_page->area_start();
+ DCHECK_LE(allocated, Size());
+ return allocated;
+ }
+
+ void MovePageFromSpaceToSpace(Page* page) {
+ DCHECK(page->IsFromPage());
+ from_space_.RemovePage(page);
+ to_space_.PrependPage(page);
+ }
+
+ bool Rebalance();
+
+ // Return the maximum capacity of a semispace.
+ size_t MaximumCapacity() {
+ DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
+ return to_space_.maximum_capacity();
+ }
+
+ bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
+
+ // Returns the initial capacity of a semispace.
+ size_t InitialTotalCapacity() {
+ DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
+ return to_space_.minimum_capacity();
+ }
+
+ void ResetOriginalTop() {
+ DCHECK_GE(top(), original_top_);
+ DCHECK_LE(top(), original_limit_);
+ original_top_.store(top(), std::memory_order_release);
+ }
+
+ Address original_top_acquire() {
+ return original_top_.load(std::memory_order_acquire);
+ }
+ Address original_limit_relaxed() {
+ return original_limit_.load(std::memory_order_relaxed);
+ }
+
+ // Return the address of the first allocatable address in the active
+ // semispace. This may be the address where the first object resides.
+ Address first_allocatable_address() { return to_space_.space_start(); }
+
+ // Get the age mark of the inactive semispace.
+ Address age_mark() { return from_space_.age_mark(); }
+ // Set the age mark in the active semispace.
+ void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
+
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
+ int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
+ int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ // Reset the allocation pointer to the beginning of the active semispace.
+ void ResetLinearAllocationArea();
+
+ // When inline allocation stepping is active, either because of incremental
+ // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
+ // inline allocation every once in a while. This is done by setting
+ // allocation_info_.limit to be lower than the actual limit and and increasing
+ // it in steps to guarantee that the observers are notified periodically.
+ void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
+
+ inline bool ToSpaceContainsSlow(Address a) const;
+ inline bool ToSpaceContains(Object o) const;
+ inline bool FromSpaceContains(Object o) const;
+
+ // Try to switch the active semispace to a new, empty, page.
+ // Returns false if this isn't possible or reasonable (i.e., there
+ // are no pages, or the current page is already empty), or true
+ // if successful.
+ bool AddFreshPage();
+ bool AddFreshPageSynchronized();
+
+#ifdef VERIFY_HEAP
+ // Verify the active semispace.
+ virtual void Verify(Isolate* isolate);
+#endif
+
+#ifdef DEBUG
+ // Print the active semispace.
+ void Print() override { to_space_.Print(); }
+#endif
+
+ // Return whether the operation succeeded.
+ bool CommitFromSpaceIfNeeded() {
+ if (from_space_.is_committed()) return true;
+ return from_space_.Commit();
+ }
+
+ bool UncommitFromSpace() {
+ if (!from_space_.is_committed()) return true;
+ return from_space_.Uncommit();
+ }
+
+ bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
+
+ SemiSpace* active_space() { return &to_space_; }
+
+ Page* first_page() { return to_space_.first_page(); }
+ Page* last_page() { return to_space_.last_page(); }
+
+ iterator begin() { return to_space_.begin(); }
+ iterator end() { return to_space_.end(); }
+
+ const_iterator begin() const { return to_space_.begin(); }
+ const_iterator end() const { return to_space_.end(); }
+
+ std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
+
+ SemiSpace& from_space() { return from_space_; }
+ SemiSpace& to_space() { return to_space_; }
+
+ private:
+ // Update linear allocation area to match the current to-space page.
+ void UpdateLinearAllocationArea();
+
+ base::Mutex mutex_;
+
+ // The top and the limit at the time of setting the linear allocation area.
+ // These values can be accessed by background tasks.
+ std::atomic<Address> original_top_;
+ std::atomic<Address> original_limit_;
+
+ // The semispaces.
+ SemiSpace to_space_;
+ SemiSpace from_space_;
+ VirtualMemory reservation_;
+
+ bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
+ bool SupportsInlineAllocation() override { return true; }
+
+ friend class SemiSpaceObjectIterator;
+};
+
+// For contiguous spaces, top should be in the space (or at the end) and limit
+// should be the end of the space.
+#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
+ SLOW_DCHECK((space).page_low() <= (info).top() && \
+ (info).top() <= (space).page_high() && \
+ (info).limit() <= (space).page_high())
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_NEW_SPACES_H_
diff --git a/chromium/v8/src/heap/object-stats.cc b/chromium/v8/src/heap/object-stats.cc
index bd15b50b96a..05929acb973 100644
--- a/chromium/v8/src/heap/object-stats.cc
+++ b/chromium/v8/src/heap/object-stats.cc
@@ -426,7 +426,7 @@ class ObjectStatsCollectorImpl {
bool CanRecordFixedArray(FixedArrayBase array);
bool IsCowArray(FixedArrayBase array);
- // Blacklist for objects that should not be recorded using
+ // Blocklist for objects that should not be recorded using
// VirtualObjectStats and RecordSimpleVirtualObjectStats. For recording those
// objects dispatch to the low level ObjectStats::RecordObjectStats manually.
bool ShouldRecordObject(HeapObject object, CowMode check_cow_array);
@@ -839,7 +839,6 @@ void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject obj,
bool ObjectStatsCollectorImpl::CanRecordFixedArray(FixedArrayBase array) {
ReadOnlyRoots roots(heap_);
return array != roots.empty_fixed_array() &&
- array != roots.empty_sloppy_arguments_elements() &&
array != roots.empty_slow_element_dictionary() &&
array != roots.empty_property_dictionary();
}
diff --git a/chromium/v8/src/heap/off-thread-heap.cc b/chromium/v8/src/heap/off-thread-heap.cc
index fec93f80685..584fe349717 100644
--- a/chromium/v8/src/heap/off-thread-heap.cc
+++ b/chromium/v8/src/heap/off-thread-heap.cc
@@ -4,10 +4,14 @@
#include "src/heap/off-thread-heap.h"
+#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
+#include "src/handles/off-thread-transfer-handle-storage-inl.h"
+#include "src/heap/paged-spaces-inl.h"
#include "src/heap/spaces-inl.h"
-#include "src/heap/spaces.h"
#include "src/objects/objects-body-descriptors-inl.h"
#include "src/roots/roots.h"
+#include "src/snapshot/references.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
@@ -15,7 +19,16 @@
namespace v8 {
namespace internal {
-OffThreadHeap::OffThreadHeap(Heap* heap) : space_(heap), lo_space_(heap) {}
+OffThreadHeap::~OffThreadHeap() = default;
+
+OffThreadHeap::OffThreadHeap(Heap* heap)
+ : space_(heap),
+ lo_space_(heap),
+ off_thread_transfer_handles_head_(nullptr) {}
+
+bool OffThreadHeap::Contains(HeapObject obj) {
+ return space_.Contains(obj) || lo_space_.Contains(obj);
+}
class OffThreadHeap::StringSlotCollectingVisitor : public ObjectVisitor {
public:
@@ -74,6 +87,13 @@ void OffThreadHeap::FinishOffThread() {
string_slots_ = std::move(string_slot_collector.string_slots);
+ OffThreadTransferHandleStorage* storage =
+ off_thread_transfer_handles_head_.get();
+ while (storage != nullptr) {
+ storage->ConvertFromOffThreadHandleOnFinish();
+ storage = storage->next();
+ }
+
is_finished = true;
}
@@ -82,25 +102,70 @@ void OffThreadHeap::Publish(Heap* heap) {
Isolate* isolate = heap->isolate();
ReadOnlyRoots roots(isolate);
+ // Before we do anything else, ensure that the old-space can expand to the
+ // size needed for the off-thread objects. Use capacity rather than size since
+ // we're adding entire pages.
+ size_t off_thread_size = space_.Capacity() + lo_space_.Size();
+ if (!heap->CanExpandOldGeneration(off_thread_size)) {
+ heap->CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
+ if (!heap->CanExpandOldGeneration(off_thread_size)) {
+ heap->FatalProcessOutOfMemory(
+ "Can't expand old-space enough to merge off-thread pages.");
+ }
+ }
+
+ // Merging and transferring handles should be atomic from the point of view
+ // of the GC, since we neither want the GC to walk main-thread handles that
+ // point into off-thread pages, nor do we want the GC to move the raw
+ // pointers we have into off-thread pages before we've had a chance to turn
+ // them into real handles.
+ // TODO(leszeks): This could be a stronger assertion, that we don't GC at
+ // all.
+ DisallowHeapAllocation no_gc;
+
+ // Merge the spaces.
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.OffThreadFinalization.Publish.Merge");
+
+ heap->old_space()->MergeLocalSpace(&space_);
+ heap->lo_space()->MergeOffThreadSpace(&lo_space_);
+
+ DCHECK(heap->CanExpandOldGeneration(0));
+ }
+
+ // Transfer all the transfer handles to be real handles. Make sure to do this
+ // before creating any handle scopes, to allow these handles to live in the
+ // caller's handle scope.
+ OffThreadTransferHandleStorage* storage =
+ off_thread_transfer_handles_head_.get();
+ while (storage != nullptr) {
+ storage->ConvertToHandleOnPublish(isolate, &no_gc);
+ storage = storage->next();
+ }
+
+ // Create a new handle scope after transferring handles, for the slot holder
+ // handles below.
HandleScope handle_scope(isolate);
- // First, handlify all the string slot holder objects, so that we can keep
- // track of them if they move.
+ // Handlify all the string slot holder objects, so that we can keep track of
+ // them if they move.
//
// TODO(leszeks): We might be able to create a HandleScope-compatible
- // structure off-thread and merge it into the current handle scope all in one
- // go (DeferredHandles maybe?).
- std::vector<Handle<HeapObject>> heap_object_handles;
+ // structure off-thread and merge it into the current handle scope all in
+ // one go (DeferredHandles maybe?).
+ std::vector<std::pair<Handle<HeapObject>, Handle<Map>>> heap_object_handles;
std::vector<Handle<Script>> script_handles;
{
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OffThreadFinalization.Publish.CollectHandles");
heap_object_handles.reserve(string_slots_.size());
for (RelativeSlot relative_slot : string_slots_) {
- // TODO(leszeks): Group slots in the same parent object to avoid creating
- // multiple duplicate handles.
+ // TODO(leszeks): Group slots in the same parent object to avoid
+ // creating multiple duplicate handles.
HeapObject obj = HeapObject::FromAddress(relative_slot.object_address);
- heap_object_handles.push_back(handle(obj, isolate));
+ heap_object_handles.push_back(
+ {handle(obj, isolate), handle(obj.map(), isolate)});
// De-internalize the string so that we can re-internalize it later.
String string =
@@ -116,46 +181,20 @@ void OffThreadHeap::Publish(Heap* heap) {
}
}
- // Then merge the spaces. At this point, we are allowed to point between (no
- // longer) off-thread pages and main-thread heap pages, and objects in the
- // previously off-thread page can move.
- {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.OffThreadFinalization.Publish.Merge");
- Heap* heap = isolate->heap();
+ // After this point, all objects are transferred and all handles are valid,
+ // so we can GC again.
+ no_gc.Release();
- // Ensure that the old-space can expand do the size needed for the
- // off-thread objects. Use capacity rather than size since we're adding
- // entire pages.
- size_t off_thread_size = space_.Capacity() + lo_space_.Size();
- if (!heap->CanExpandOldGeneration(off_thread_size)) {
- heap->InvokeNearHeapLimitCallback();
- if (!heap->CanExpandOldGeneration(off_thread_size)) {
- heap->CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
- if (!heap->CanExpandOldGeneration(off_thread_size)) {
- heap->FatalProcessOutOfMemory(
- "Can't expand old-space enough to merge off-thread pages.");
- }
- }
- }
+ // Possibly trigger a GC if we're close to exhausting the old generation.
+ // TODO(leszeks): Adjust the heuristics here.
+ heap->StartIncrementalMarkingIfAllocationLimitIsReached(
+ heap->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
- heap->old_space()->MergeLocalSpace(&space_);
- heap->lo_space()->MergeOffThreadSpace(&lo_space_);
-
- DCHECK(heap->CanExpandOldGeneration(0));
- heap->NotifyOldGenerationExpansion();
-
- // Possibly trigger a GC if we're close to exhausting the old generation.
- // TODO(leszeks): Adjust the heuristics here.
- heap->StartIncrementalMarkingIfAllocationLimitIsReached(
- heap->GCFlagsForIncrementalMarking(),
- kGCCallbackScheduleIdleGarbageCollection);
-
- if (!heap->ShouldExpandOldGenerationOnSlowAllocation() ||
- !heap->CanExpandOldGeneration(1 * MB)) {
- heap->CollectGarbage(OLD_SPACE,
- GarbageCollectionReason::kAllocationFailure);
- }
+ if (!heap->ShouldExpandOldGenerationOnSlowAllocation() ||
+ !heap->CanExpandOldGeneration(1 * MB)) {
+ heap->CollectGarbage(OLD_SPACE,
+ GarbageCollectionReason::kAllocationFailure);
}
// Iterate the string slots, as an offset from the holders we have handles to.
@@ -163,12 +202,13 @@ void OffThreadHeap::Publish(Heap* heap) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OffThreadFinalization.Publish.UpdateHandles");
for (size_t i = 0; i < string_slots_.size(); ++i) {
- HeapObject obj = *heap_object_handles[i];
+ HeapObject obj = *heap_object_handles[i].first;
int slot_offset = string_slots_[i].slot_offset;
// There's currently no cases where the holder object could have been
// resized.
- DCHECK_LT(slot_offset, obj.Size());
+ CHECK_EQ(obj.map(), *heap_object_handles[i].second);
+ CHECK_LT(slot_offset, obj.Size());
String string = String::cast(RELAXED_READ_FIELD(obj, slot_offset));
if (string.IsThinString()) {
@@ -188,8 +228,14 @@ void OffThreadHeap::Publish(Heap* heap) {
if (*string_handle != *internalized_string) {
// Re-read the object from the handle in case there was GC during
// internalization and it moved.
- HeapObject obj = *heap_object_handles[i];
+ HeapObject obj = *heap_object_handles[i].first;
String value = *internalized_string;
+
+ // Sanity checks that the object or string slot value hasn't changed.
+ CHECK_EQ(obj.map(), *heap_object_handles[i].second);
+ CHECK_LT(slot_offset, obj.Size());
+ CHECK_EQ(RELAXED_READ_FIELD(obj, slot_offset), *string_handle);
+
RELAXED_WRITE_FIELD(obj, slot_offset, value);
WRITE_BARRIER(obj, slot_offset, value);
}
@@ -223,7 +269,37 @@ HeapObject OffThreadHeap::AllocateRaw(int size, AllocationType allocation,
} else {
result = space_.AllocateRaw(size, alignment);
}
- return result.ToObjectChecked();
+ HeapObject obj = result.ToObjectChecked();
+ OnAllocationEvent(obj, size);
+ return obj;
+}
+
+bool OffThreadHeap::ReserveSpace(Heap::Reservation* reservations) {
+#ifdef DEBUG
+ for (int space = FIRST_SPACE;
+ space < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces); space++) {
+ if (space == OLD_SPACE || space == LO_SPACE) continue;
+ Heap::Reservation* reservation = &reservations[space];
+ DCHECK_EQ(reservation->size(), 1);
+ DCHECK_EQ(reservation->at(0).size, 0);
+ }
+#endif
+
+ for (auto& chunk : reservations[OLD_SPACE]) {
+ int size = chunk.size;
+ AllocationResult allocation = space_.AllocateRawUnaligned(size);
+ HeapObject free_space = allocation.ToObjectChecked();
+
+ // Mark with a free list node, in case we have a GC before
+ // deserializing.
+ Address free_space_address = free_space.address();
+ CreateFillerObjectAt(free_space_address, size,
+ ClearFreedMemoryMode::kDontClearFreedMemory);
+ chunk.start = free_space_address;
+ chunk.end = free_space_address + size;
+ }
+
+ return true;
}
HeapObject OffThreadHeap::CreateFillerObjectAt(
@@ -234,6 +310,17 @@ HeapObject OffThreadHeap::CreateFillerObjectAt(
return filler;
}
+OffThreadTransferHandleStorage* OffThreadHeap::AddTransferHandleStorage(
+ HandleBase handle) {
+ DCHECK_IMPLIES(off_thread_transfer_handles_head_ != nullptr,
+ off_thread_transfer_handles_head_->state() ==
+ OffThreadTransferHandleStorage::kOffThreadHandle);
+ off_thread_transfer_handles_head_ =
+ std::make_unique<OffThreadTransferHandleStorage>(
+ handle.location(), std::move(off_thread_transfer_handles_head_));
+ return off_thread_transfer_handles_head_.get();
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/off-thread-heap.h b/chromium/v8/src/heap/off-thread-heap.h
index de902be52fb..3bb1777df11 100644
--- a/chromium/v8/src/heap/off-thread-heap.h
+++ b/chromium/v8/src/heap/off-thread-heap.h
@@ -6,28 +6,51 @@
#define V8_HEAP_OFF_THREAD_HEAP_H_
#include <vector>
+
#include "src/common/globals.h"
#include "src/heap/large-spaces.h"
+#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
+#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
+class OffThreadTransferHandleStorage;
+
class V8_EXPORT_PRIVATE OffThreadHeap {
public:
explicit OffThreadHeap(Heap* heap);
+ ~OffThreadHeap();
HeapObject AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned);
void AddToScriptList(Handle<Script> shared);
+ void OnAllocationEvent(HeapObject obj, int size) {
+ // TODO(leszeks): Do something here.
+ }
+
+ ReadOnlySpace* read_only_space() const {
+ // Access the main-thread heap via the spaces.
+ return space_.heap()->read_only_space();
+ }
+
+ bool Contains(HeapObject obj);
+
+ bool ReserveSpace(Heap::Reservation* reservations);
+
HeapObject CreateFillerObjectAt(Address addr, int size,
ClearFreedMemoryMode clear_memory_mode);
+ OffThreadTransferHandleStorage* AddTransferHandleStorage(HandleBase handle);
+
void FinishOffThread();
void Publish(Heap* heap);
private:
+ friend class DeserializerAllocator;
+
class StringSlotCollectingVisitor;
struct RelativeSlot {
@@ -43,6 +66,8 @@ class V8_EXPORT_PRIVATE OffThreadHeap {
OffThreadLargeObjectSpace lo_space_;
std::vector<RelativeSlot> string_slots_;
std::vector<Script> script_list_;
+ std::unique_ptr<OffThreadTransferHandleStorage>
+ off_thread_transfer_handles_head_;
bool is_finished = false;
};
diff --git a/chromium/v8/src/heap/paged-spaces-inl.h b/chromium/v8/src/heap/paged-spaces-inl.h
new file mode 100644
index 00000000000..6b2e5a848a5
--- /dev/null
+++ b/chromium/v8/src/heap/paged-spaces-inl.h
@@ -0,0 +1,208 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_PAGED_SPACES_INL_H_
+#define V8_HEAP_PAGED_SPACES_INL_H_
+
+#include "src/heap/incremental-marking.h"
+#include "src/heap/paged-spaces.h"
+#include "src/objects/code-inl.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// PagedSpaceObjectIterator
+
+HeapObject PagedSpaceObjectIterator::Next() {
+ do {
+ HeapObject next_obj = FromCurrentPage();
+ if (!next_obj.is_null()) return next_obj;
+ } while (AdvanceToNextPage());
+ return HeapObject();
+}
+
+HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
+ while (cur_addr_ != cur_end_) {
+ HeapObject obj = HeapObject::FromAddress(cur_addr_);
+ const int obj_size = obj.Size();
+ cur_addr_ += obj_size;
+ DCHECK_LE(cur_addr_, cur_end_);
+ if (!obj.IsFreeSpaceOrFiller()) {
+ if (obj.IsCode()) {
+ DCHECK_EQ(space_->identity(), CODE_SPACE);
+ DCHECK_CODEOBJECT_SIZE(obj_size, space_);
+ } else {
+ DCHECK_OBJECT_SIZE(obj_size);
+ }
+ return obj;
+ }
+ }
+ return HeapObject();
+}
+
+bool PagedSpace::Contains(Address addr) const {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ return true;
+ }
+ return Page::FromAddress(addr)->owner() == this;
+}
+
+bool PagedSpace::Contains(Object o) const {
+ if (!o.IsHeapObject()) return false;
+ return Page::FromAddress(o.ptr())->owner() == this;
+}
+
+void PagedSpace::UnlinkFreeListCategories(Page* page) {
+ DCHECK_EQ(this, page->owner());
+ page->ForAllFreeListCategories([this](FreeListCategory* category) {
+ free_list()->RemoveCategory(category);
+ });
+}
+
+size_t PagedSpace::RelinkFreeListCategories(Page* page) {
+ DCHECK_EQ(this, page->owner());
+ size_t added = 0;
+ page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
+ added += category->available();
+ category->Relink(free_list());
+ });
+
+ DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
+ page->AvailableInFreeList() ==
+ page->AvailableInFreeListFromAllocatedBytes());
+ return added;
+}
+
+bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
+ if (allocation_info_.top() != kNullAddress) {
+ const Address object_address = object.address();
+ if ((allocation_info_.top() - object_size) == object_address) {
+ allocation_info_.set_top(object_address);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
+ if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
+ return true;
+ }
+ return SlowRefillLinearAllocationArea(size_in_bytes, origin);
+}
+
+HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
+ Address current_top = allocation_info_.top();
+ Address new_top = current_top + size_in_bytes;
+ DCHECK_LE(new_top, allocation_info_.limit());
+ allocation_info_.set_top(new_top);
+ return HeapObject::FromAddress(current_top);
+}
+
+HeapObject PagedSpace::TryAllocateLinearlyAligned(
+ int* size_in_bytes, AllocationAlignment alignment) {
+ Address current_top = allocation_info_.top();
+ int filler_size = Heap::GetFillToAlign(current_top, alignment);
+
+ Address new_top = current_top + filler_size + *size_in_bytes;
+ if (new_top > allocation_info_.limit()) return HeapObject();
+
+ allocation_info_.set_top(new_top);
+ if (filler_size > 0) {
+ *size_in_bytes += filler_size;
+ return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
+ HeapObject::FromAddress(current_top),
+ filler_size);
+ }
+
+ return HeapObject::FromAddress(current_top);
+}
+
+AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
+ AllocationOrigin origin) {
+ if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
+ return AllocationResult::Retry(identity());
+ }
+ HeapObject object = AllocateLinearly(size_in_bytes);
+ DCHECK(!object.is_null());
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
+ return object;
+}
+
+AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ DCHECK_EQ(identity(), OLD_SPACE);
+ int allocation_size = size_in_bytes;
+ HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
+ if (object.is_null()) {
+ // We don't know exactly how much filler we need to align until space is
+ // allocated, so assume the worst case.
+ int filler_size = Heap::GetMaximumFillToAlign(alignment);
+ allocation_size += filler_size;
+ if (!EnsureLinearAllocationArea(allocation_size, origin)) {
+ return AllocationResult::Retry(identity());
+ }
+ allocation_size = size_in_bytes;
+ object = TryAllocateLinearlyAligned(&allocation_size, alignment);
+ DCHECK(!object.is_null());
+ }
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
+ return object;
+}
+
+AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ if (top_on_previous_step_ && top() < top_on_previous_step_ &&
+ SupportsInlineAllocation()) {
+ // Generated code decreased the top() pointer to do folded allocations.
+ // The top_on_previous_step_ can be one byte beyond the current page.
+ DCHECK_NE(top(), kNullAddress);
+ DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
+ Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
+ top_on_previous_step_ = top();
+ }
+ size_t bytes_since_last =
+ top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
+
+ DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
+#ifdef V8_HOST_ARCH_32_BIT
+ AllocationResult result =
+ alignment != kWordAligned
+ ? AllocateRawAligned(size_in_bytes, alignment, origin)
+ : AllocateRawUnaligned(size_in_bytes, origin);
+#else
+ AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
+#endif
+ HeapObject heap_obj;
+ if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
+ AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
+ heap_obj.address(), size_in_bytes);
+ StartNextInlineAllocationStep();
+ DCHECK_IMPLIES(
+ heap()->incremental_marking()->black_allocation(),
+ heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
+ }
+ return result;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_PAGED_SPACES_INL_H_
diff --git a/chromium/v8/src/heap/paged-spaces.cc b/chromium/v8/src/heap/paged-spaces.cc
new file mode 100644
index 00000000000..dabdf2d5a0e
--- /dev/null
+++ b/chromium/v8/src/heap/paged-spaces.cc
@@ -0,0 +1,1047 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/paged-spaces.h"
+
+#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
+#include "src/execution/isolate.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/heap/array-buffer-sweeper.h"
+#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/memory-allocator.h"
+#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/paged-spaces-inl.h"
+#include "src/heap/read-only-heap.h"
+#include "src/logging/counters.h"
+#include "src/objects/string.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// PagedSpaceObjectIterator
+
+PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
+ PagedSpace* space)
+ : cur_addr_(kNullAddress),
+ cur_end_(kNullAddress),
+ space_(space),
+ page_range_(space->first_page(), nullptr),
+ current_page_(page_range_.begin()) {
+ space_->MakeLinearAllocationAreaIterable();
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
+}
+
+PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
+ PagedSpace* space,
+ Page* page)
+ : cur_addr_(kNullAddress),
+ cur_end_(kNullAddress),
+ space_(space),
+ page_range_(page),
+ current_page_(page_range_.begin()) {
+ space_->MakeLinearAllocationAreaIterable();
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
+#ifdef DEBUG
+ AllocationSpace owner = page->owner_identity();
+ DCHECK(owner == OLD_SPACE || owner == MAP_SPACE || owner == CODE_SPACE);
+#endif // DEBUG
+}
+
+PagedSpaceObjectIterator::PagedSpaceObjectIterator(OffThreadSpace* space)
+ : cur_addr_(kNullAddress),
+ cur_end_(kNullAddress),
+ space_(space),
+ page_range_(space->first_page(), nullptr),
+ current_page_(page_range_.begin()) {
+ space_->MakeLinearAllocationAreaIterable();
+}
+
+// We have hit the end of the page and should advance to the next block of
+// objects. This happens at the end of the page.
+bool PagedSpaceObjectIterator::AdvanceToNextPage() {
+ DCHECK_EQ(cur_addr_, cur_end_);
+ if (current_page_ == page_range_.end()) return false;
+ Page* cur_page = *(current_page_++);
+
+ cur_addr_ = cur_page->area_start();
+ cur_end_ = cur_page->area_end();
+ DCHECK(cur_page->SweepingDone());
+ return true;
+}
+Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
+ Page* page = static_cast<Page*>(chunk);
+ DCHECK_EQ(
+ MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
+ page->area_size());
+ // Make sure that categories are initialized before freeing the area.
+ page->ResetAllocationStatistics();
+ page->SetOldGenerationPageFlags(!is_off_thread_space() &&
+ heap()->incremental_marking()->IsMarking());
+ page->AllocateFreeListCategories();
+ page->InitializeFreeListCategories();
+ page->list_node().Initialize();
+ page->InitializationMemoryFence();
+ return page;
+}
+
+PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
+ Executability executable, FreeList* free_list,
+ LocalSpaceKind local_space_kind)
+ : SpaceWithLinearArea(heap, space, free_list),
+ executable_(executable),
+ local_space_kind_(local_space_kind) {
+ area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
+ accounting_stats_.Clear();
+}
+
+void PagedSpace::TearDown() {
+ while (!memory_chunk_list_.Empty()) {
+ MemoryChunk* chunk = memory_chunk_list_.front();
+ memory_chunk_list_.Remove(chunk);
+ heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
+ }
+ accounting_stats_.Clear();
+}
+
+void PagedSpace::RefillFreeList() {
+ // Any PagedSpace might invoke RefillFreeList. We filter all but our old
+ // generation spaces out.
+ if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
+ identity() != MAP_SPACE) {
+ return;
+ }
+ DCHECK_NE(local_space_kind(), LocalSpaceKind::kOffThreadSpace);
+ DCHECK_IMPLIES(is_local_space(), is_compaction_space());
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ size_t added = 0;
+
+ {
+ Page* p = nullptr;
+ while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
+ // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
+ // entries here to make them unavailable for allocations.
+ if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
+ p->ForAllFreeListCategories([this](FreeListCategory* category) {
+ category->Reset(free_list());
+ });
+ }
+
+ // Also merge old-to-new remembered sets if not scavenging because of
+ // data races: One thread might iterate remembered set, while another
+ // thread merges them.
+ if (local_space_kind() != LocalSpaceKind::kCompactionSpaceForScavenge) {
+ p->MergeOldToNewRememberedSets();
+ }
+
+ // Only during compaction pages can actually change ownership. This is
+ // safe because there exists no other competing action on the page links
+ // during compaction.
+ if (is_compaction_space()) {
+ DCHECK_NE(this, p->owner());
+ PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
+ base::MutexGuard guard(owner->mutex());
+ owner->RefineAllocatedBytesAfterSweeping(p);
+ owner->RemovePage(p);
+ added += AddPage(p);
+ } else {
+ base::MutexGuard guard(mutex());
+ DCHECK_EQ(this, p->owner());
+ RefineAllocatedBytesAfterSweeping(p);
+ added += RelinkFreeListCategories(p);
+ }
+ added += p->wasted_memory();
+ if (is_compaction_space() && (added > kCompactionMemoryWanted)) break;
+ }
+ }
+}
+
+void OffThreadSpace::RefillFreeList() {
+ // We should never try to refill the free list in off-thread space, because
+ // we know it will always be fully linear.
+ UNREACHABLE();
+}
+
+void PagedSpace::MergeLocalSpace(LocalSpace* other) {
+ base::MutexGuard guard(mutex());
+
+ DCHECK(identity() == other->identity());
+
+ // Unmerged fields:
+ // area_size_
+ other->FreeLinearAllocationArea();
+
+ for (int i = static_cast<int>(AllocationOrigin::kFirstAllocationOrigin);
+ i <= static_cast<int>(AllocationOrigin::kLastAllocationOrigin); i++) {
+ allocations_origins_[i] += other->allocations_origins_[i];
+ }
+
+ // The linear allocation area of {other} should be destroyed now.
+ DCHECK_EQ(kNullAddress, other->top());
+ DCHECK_EQ(kNullAddress, other->limit());
+
+ bool merging_from_off_thread = other->is_off_thread_space();
+
+ // Move over pages.
+ for (auto it = other->begin(); it != other->end();) {
+ Page* p = *(it++);
+
+ if (merging_from_off_thread) {
+ DCHECK_NULL(p->sweeping_slot_set());
+
+ // Make sure the page is entirely white.
+ CHECK(heap()
+ ->incremental_marking()
+ ->non_atomic_marking_state()
+ ->bitmap(p)
+ ->IsClean());
+
+ p->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ if (heap()->incremental_marking()->black_allocation()) {
+ p->CreateBlackArea(p->area_start(), p->HighWaterMark());
+ }
+ } else {
+ p->MergeOldToNewRememberedSets();
+ }
+
+ // Ensure that pages are initialized before objects on it are discovered by
+ // concurrent markers.
+ p->InitializationMemoryFence();
+
+ // Relinking requires the category to be unlinked.
+ other->RemovePage(p);
+ AddPage(p);
+ heap()->NotifyOldGenerationExpansion(identity(), p);
+ DCHECK_IMPLIES(
+ !p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
+ p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
+
+ // TODO(leszeks): Here we should allocation step, but:
+ // 1. Allocation groups are currently not handled properly by the sampling
+ // allocation profiler, and
+ // 2. Observers might try to take the space lock, which isn't reentrant.
+ // We'll have to come up with a better solution for allocation stepping
+ // before shipping, which will likely be using LocalHeap.
+ }
+
+ DCHECK_EQ(0u, other->Size());
+ DCHECK_EQ(0u, other->Capacity());
+}
+
+size_t PagedSpace::CommittedPhysicalMemory() {
+ if (!base::OS::HasLazyCommits()) return CommittedMemory();
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ size_t size = 0;
+ for (Page* page : *this) {
+ size += page->CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+bool PagedSpace::ContainsSlow(Address addr) const {
+ Page* p = Page::FromAddress(addr);
+ for (const Page* page : *this) {
+ if (page == p) return true;
+ }
+ return false;
+}
+
+void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
+ CHECK(page->SweepingDone());
+ auto marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ // The live_byte on the page was accounted in the space allocated
+ // bytes counter. After sweeping allocated_bytes() contains the
+ // accurate live byte count on the page.
+ size_t old_counter = marking_state->live_bytes(page);
+ size_t new_counter = page->allocated_bytes();
+ DCHECK_GE(old_counter, new_counter);
+ if (old_counter > new_counter) {
+ DecreaseAllocatedBytes(old_counter - new_counter, page);
+ // Give the heap a chance to adjust counters in response to the
+ // more precise and smaller old generation size.
+ heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
+ }
+ marking_state->SetLiveBytes(page, 0);
+}
+
+Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
+ base::MutexGuard guard(mutex());
+ Page* page = free_list()->GetPageForSize(size_in_bytes);
+ if (!page) return nullptr;
+ RemovePage(page);
+ return page;
+}
+
+size_t PagedSpace::AddPage(Page* page) {
+ CHECK(page->SweepingDone());
+ page->set_owner(this);
+ memory_chunk_list_.PushBack(page);
+ AccountCommitted(page->size());
+ IncreaseCapacity(page->area_size());
+ IncreaseAllocatedBytes(page->allocated_bytes(), page);
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
+ return RelinkFreeListCategories(page);
+}
+
+void PagedSpace::RemovePage(Page* page) {
+ CHECK(page->SweepingDone());
+ memory_chunk_list_.Remove(page);
+ UnlinkFreeListCategories(page);
+ DecreaseAllocatedBytes(page->allocated_bytes(), page);
+ DecreaseCapacity(page->area_size());
+ AccountUncommitted(page->size());
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
+}
+
+size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
+ size_t unused = page->ShrinkToHighWaterMark();
+ accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
+ AccountUncommitted(unused);
+ return unused;
+}
+
+void PagedSpace::ResetFreeList() {
+ for (Page* page : *this) {
+ free_list_->EvictFreeListItems(page);
+ }
+ DCHECK(free_list_->IsEmpty());
+}
+
+void PagedSpace::ShrinkImmortalImmovablePages() {
+ DCHECK(!heap()->deserialization_complete());
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ FreeLinearAllocationArea();
+ ResetFreeList();
+ for (Page* page : *this) {
+ DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
+ ShrinkPageToHighWaterMark(page);
+ }
+}
+
+Page* PagedSpace::AllocatePage() {
+ return heap()->memory_allocator()->AllocatePage(AreaSize(), this,
+ executable());
+}
+
+Page* PagedSpace::Expand() {
+ Page* page = AllocatePage();
+ if (page == nullptr) return nullptr;
+ AddPage(page);
+ Free(page->area_start(), page->area_size(),
+ SpaceAccountingMode::kSpaceAccounted);
+ return page;
+}
+
+Page* PagedSpace::ExpandBackground(LocalHeap* local_heap) {
+ Page* page = AllocatePage();
+ if (page == nullptr) return nullptr;
+ ParkedMutexGuard lock(local_heap, &allocation_mutex_);
+ AddPage(page);
+ Free(page->area_start(), page->area_size(),
+ SpaceAccountingMode::kSpaceAccounted);
+ return page;
+}
+
+int PagedSpace::CountTotalPages() {
+ int count = 0;
+ for (Page* page : *this) {
+ count++;
+ USE(page);
+ }
+ return count;
+}
+
+void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
+ SetTopAndLimit(top, limit);
+ if (top != kNullAddress && top != limit && !is_off_thread_space() &&
+ heap()->incremental_marking()->black_allocation()) {
+ Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
+ }
+}
+
+void PagedSpace::DecreaseLimit(Address new_limit) {
+ Address old_limit = limit();
+ DCHECK_LE(top(), new_limit);
+ DCHECK_GE(old_limit, new_limit);
+ if (new_limit != old_limit) {
+ SetTopAndLimit(top(), new_limit);
+ Free(new_limit, old_limit - new_limit,
+ SpaceAccountingMode::kSpaceAccounted);
+ if (heap()->incremental_marking()->black_allocation()) {
+ Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
+ old_limit);
+ }
+ }
+}
+
+void PagedSpace::MarkLinearAllocationAreaBlack() {
+ DCHECK(heap()->incremental_marking()->black_allocation());
+ Address current_top = top();
+ Address current_limit = limit();
+ if (current_top != kNullAddress && current_top != current_limit) {
+ Page::FromAllocationAreaAddress(current_top)
+ ->CreateBlackArea(current_top, current_limit);
+ }
+}
+
+void PagedSpace::UnmarkLinearAllocationArea() {
+ Address current_top = top();
+ Address current_limit = limit();
+ if (current_top != kNullAddress && current_top != current_limit) {
+ Page::FromAllocationAreaAddress(current_top)
+ ->DestroyBlackArea(current_top, current_limit);
+ }
+}
+
+void PagedSpace::MakeLinearAllocationAreaIterable() {
+ Address current_top = top();
+ Address current_limit = limit();
+ if (current_top != kNullAddress && current_top != current_limit) {
+ base::Optional<CodePageMemoryModificationScope> optional_scope;
+
+ if (identity() == CODE_SPACE) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(current_top);
+ optional_scope.emplace(chunk);
+ }
+
+ heap_->CreateFillerObjectAt(current_top,
+ static_cast<int>(current_limit - current_top),
+ ClearRecordedSlots::kNo);
+ }
+}
+
+void PagedSpace::FreeLinearAllocationArea() {
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap.
+ Address current_top = top();
+ Address current_limit = limit();
+ if (current_top == kNullAddress) {
+ DCHECK_EQ(kNullAddress, current_limit);
+ return;
+ }
+
+ if (!is_off_thread_space() &&
+ heap()->incremental_marking()->black_allocation()) {
+ Page* page = Page::FromAllocationAreaAddress(current_top);
+
+ // Clear the bits in the unused black area.
+ if (current_top != current_limit) {
+ IncrementalMarking::MarkingState* marking_state =
+ heap()->incremental_marking()->marking_state();
+ marking_state->bitmap(page)->ClearRange(
+ page->AddressToMarkbitIndex(current_top),
+ page->AddressToMarkbitIndex(current_limit));
+ marking_state->IncrementLiveBytes(
+ page, -static_cast<int>(current_limit - current_top));
+ }
+ }
+
+ if (!is_local_space()) {
+ InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
+ }
+
+ SetTopAndLimit(kNullAddress, kNullAddress);
+ DCHECK_GE(current_limit, current_top);
+
+ // The code page of the linear allocation area needs to be unprotected
+ // because we are going to write a filler into that memory area below.
+ if (identity() == CODE_SPACE) {
+ heap()->UnprotectAndRegisterMemoryChunk(
+ MemoryChunk::FromAddress(current_top));
+ }
+ Free(current_top, current_limit - current_top,
+ SpaceAccountingMode::kSpaceAccounted);
+}
+
+void PagedSpace::ReleasePage(Page* page) {
+ DCHECK_EQ(
+ 0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
+ page));
+ DCHECK_EQ(page->owner(), this);
+
+ free_list_->EvictFreeListItems(page);
+
+ if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
+ DCHECK(!top_on_previous_step_);
+ allocation_info_.Reset(kNullAddress, kNullAddress);
+ }
+
+ heap()->isolate()->RemoveCodeMemoryChunk(page);
+
+ AccountUncommitted(page->size());
+ accounting_stats_.DecreaseCapacity(page->area_size());
+ heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
+}
+
+void PagedSpace::SetReadable() {
+ DCHECK(identity() == CODE_SPACE);
+ for (Page* page : *this) {
+ CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
+ page->SetReadable();
+ }
+}
+
+void PagedSpace::SetReadAndExecutable() {
+ DCHECK(identity() == CODE_SPACE);
+ for (Page* page : *this) {
+ CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
+ page->SetReadAndExecutable();
+ }
+}
+
+void PagedSpace::SetReadAndWritable() {
+ DCHECK(identity() == CODE_SPACE);
+ for (Page* page : *this) {
+ CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
+ page->SetReadAndWritable();
+ }
+}
+
+std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator(Heap* heap) {
+ return std::unique_ptr<ObjectIterator>(
+ new PagedSpaceObjectIterator(heap, this));
+}
+
+bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
+ size_t size_in_bytes, AllocationOrigin origin) {
+ DCHECK(IsAligned(size_in_bytes, kTaggedSize));
+ DCHECK_LE(top(), limit());
+#ifdef DEBUG
+ if (top() != limit()) {
+ DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
+ }
+#endif
+ // Don't free list allocate if there is linear space available.
+ DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
+
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap. This also puts it back in the free list
+ // if it is big enough.
+ FreeLinearAllocationArea();
+
+ if (!is_local_space()) {
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
+ heap()->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
+ }
+
+ size_t new_node_size = 0;
+ FreeSpace new_node =
+ free_list_->Allocate(size_in_bytes, &new_node_size, origin);
+ if (new_node.is_null()) return false;
+ DCHECK_GE(new_node_size, size_in_bytes);
+
+ // The old-space-step might have finished sweeping and restarted marking.
+ // Verify that it did not turn the page of the new node into an evacuation
+ // candidate.
+ DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
+
+ // Memory in the linear allocation area is counted as allocated. We may free
+ // a little of this again immediately - see below.
+ Page* page = Page::FromHeapObject(new_node);
+ IncreaseAllocatedBytes(new_node_size, page);
+
+ Address start = new_node.address();
+ Address end = new_node.address() + new_node_size;
+ Address limit = ComputeLimit(start, end, size_in_bytes);
+ DCHECK_LE(limit, end);
+ DCHECK_LE(size_in_bytes, limit - start);
+ if (limit != end) {
+ if (identity() == CODE_SPACE) {
+ heap()->UnprotectAndRegisterMemoryChunk(page);
+ }
+ Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
+ }
+ SetLinearAllocationArea(start, limit);
+
+ return true;
+}
+
+base::Optional<std::pair<Address, size_t>>
+PagedSpace::SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
+ size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ DCHECK(!is_local_space() && identity() == OLD_SPACE);
+ DCHECK_EQ(origin, AllocationOrigin::kRuntime);
+
+ auto result = TryAllocationFromFreeListBackground(
+ local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ if (result) return result;
+
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ // Sweeping is still in progress.
+ if (collector->sweeping_in_progress()) {
+ // First try to refill the free-list, concurrent sweeper threads
+ // may have freed some objects in the meantime.
+ {
+ ParkedMutexGuard lock(local_heap, &allocation_mutex_);
+ RefillFreeList();
+ }
+
+ // Retry the free list allocation.
+ auto result = TryAllocationFromFreeListBackground(
+ local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ if (result) return result;
+
+ Sweeper::FreeSpaceMayContainInvalidatedSlots
+ invalidated_slots_in_free_space =
+ Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
+
+ const int kMaxPagesToSweep = 1;
+ int max_freed = collector->sweeper()->ParallelSweepSpace(
+ identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
+ invalidated_slots_in_free_space);
+
+ {
+ ParkedMutexGuard lock(local_heap, &allocation_mutex_);
+ RefillFreeList();
+ }
+
+ if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
+ auto result = TryAllocationFromFreeListBackground(
+ local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ if (result) return result;
+ }
+ }
+
+ if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
+ heap()->CanExpandOldGenerationBackground(AreaSize()) &&
+ ExpandBackground(local_heap)) {
+ DCHECK((CountTotalPages() > 1) ||
+ (min_size_in_bytes <= free_list_->Available()));
+ auto result = TryAllocationFromFreeListBackground(
+ local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ if (result) return result;
+ }
+
+ // TODO(dinfuehr): Complete sweeping here and try allocation again.
+
+ return {};
+}
+
+base::Optional<std::pair<Address, size_t>>
+PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
+ size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ ParkedMutexGuard lock(local_heap, &allocation_mutex_);
+ DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
+ DCHECK_EQ(identity(), OLD_SPACE);
+
+ size_t new_node_size = 0;
+ FreeSpace new_node =
+ free_list_->Allocate(min_size_in_bytes, &new_node_size, origin);
+ if (new_node.is_null()) return {};
+ DCHECK_GE(new_node_size, min_size_in_bytes);
+
+ // The old-space-step might have finished sweeping and restarted marking.
+ // Verify that it did not turn the page of the new node into an evacuation
+ // candidate.
+ DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
+
+ // Memory in the linear allocation area is counted as allocated. We may free
+ // a little of this again immediately - see below.
+ Page* page = Page::FromHeapObject(new_node);
+ IncreaseAllocatedBytes(new_node_size, page);
+
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
+
+ size_t used_size_in_bytes = Min(new_node_size, max_size_in_bytes);
+
+ Address start = new_node.address();
+ Address end = new_node.address() + new_node_size;
+ Address limit = new_node.address() + used_size_in_bytes;
+ DCHECK_LE(limit, end);
+ DCHECK_LE(min_size_in_bytes, limit - start);
+ if (limit != end) {
+ Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
+ }
+
+ return std::make_pair(start, used_size_in_bytes);
+}
+
+#ifdef DEBUG
+void PagedSpace::Print() {}
+#endif
+
+#ifdef VERIFY_HEAP
+void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
+ bool allocation_pointer_found_in_space =
+ (allocation_info_.top() == allocation_info_.limit());
+ size_t external_space_bytes[kNumTypes];
+ size_t external_page_bytes[kNumTypes];
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ for (Page* page : *this) {
+ CHECK_EQ(page->owner(), this);
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
+ allocation_pointer_found_in_space = true;
+ }
+ CHECK(page->SweepingDone());
+ PagedSpaceObjectIterator it(isolate->heap(), this, page);
+ Address end_of_previous_object = page->area_start();
+ Address top = page->area_end();
+
+ for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
+ CHECK(end_of_previous_object <= object.address());
+
+ // The first word should be a map, and we expect all map pointers to
+ // be in map space.
+ Map map = object.map();
+ CHECK(map.IsMap());
+ CHECK(ReadOnlyHeap::Contains(map) ||
+ isolate->heap()->map_space()->Contains(map));
+
+ // Perform space-specific object verification.
+ VerifyObject(object);
+
+ // The object itself should look OK.
+ object.ObjectVerify(isolate);
+
+ if (identity() != RO_SPACE && !FLAG_verify_heap_skip_remembered_set) {
+ isolate->heap()->VerifyRememberedSetFor(object);
+ }
+
+ // All the interior pointers should be contained in the heap.
+ int size = object.Size();
+ object.IterateBody(map, size, visitor);
+ CHECK(object.address() + size <= top);
+ end_of_previous_object = object.address() + size;
+
+ if (object.IsExternalString()) {
+ ExternalString external_string = ExternalString::cast(object);
+ size_t size = external_string.ExternalPayloadSize();
+ external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
+ } else if (object.IsJSArrayBuffer()) {
+ JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
+ if (ArrayBufferTracker::IsTracked(array_buffer)) {
+ size_t size =
+ ArrayBufferTracker::Lookup(isolate->heap(), array_buffer)
+ ->PerIsolateAccountingLength();
+ external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
+ }
+ }
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
+ external_space_bytes[t] += external_page_bytes[t];
+ }
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
+ i == ExternalBackingStoreType::kArrayBuffer)
+ continue;
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
+ }
+ CHECK(allocation_pointer_found_in_space);
+
+ if (identity() == OLD_SPACE && V8_ARRAY_BUFFER_EXTENSION_BOOL) {
+ size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
+ CHECK_EQ(bytes,
+ ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
+ }
+
+#ifdef DEBUG
+ VerifyCountersAfterSweeping(isolate->heap());
+#endif
+}
+
+void PagedSpace::VerifyLiveBytes() {
+ IncrementalMarking::MarkingState* marking_state =
+ heap()->incremental_marking()->marking_state();
+ for (Page* page : *this) {
+ CHECK(page->SweepingDone());
+ PagedSpaceObjectIterator it(heap(), this, page);
+ int black_size = 0;
+ for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
+ // All the interior pointers should be contained in the heap.
+ if (marking_state->IsBlack(object)) {
+ black_size += object.Size();
+ }
+ }
+ CHECK_LE(black_size, marking_state->live_bytes(page));
+ }
+}
+#endif // VERIFY_HEAP
+
+#ifdef DEBUG
+void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
+ size_t total_capacity = 0;
+ size_t total_allocated = 0;
+ for (Page* page : *this) {
+ DCHECK(page->SweepingDone());
+ total_capacity += page->area_size();
+ PagedSpaceObjectIterator it(heap, this, page);
+ size_t real_allocated = 0;
+ for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
+ if (!object.IsFreeSpaceOrFiller()) {
+ real_allocated += object.Size();
+ }
+ }
+ total_allocated += page->allocated_bytes();
+ // The real size can be smaller than the accounted size if array trimming,
+ // object slack tracking happened after sweeping.
+ DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
+ DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
+ }
+ DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
+ DCHECK_EQ(total_allocated, accounting_stats_.Size());
+}
+
+void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
+ // We need to refine the counters on pages that are already swept and have
+ // not been moved over to the actual space. Otherwise, the AccountingStats
+ // are just an over approximation.
+ RefillFreeList();
+
+ size_t total_capacity = 0;
+ size_t total_allocated = 0;
+ auto marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ for (Page* page : *this) {
+ size_t page_allocated =
+ page->SweepingDone()
+ ? page->allocated_bytes()
+ : static_cast<size_t>(marking_state->live_bytes(page));
+ total_capacity += page->area_size();
+ total_allocated += page_allocated;
+ DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
+ }
+ DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
+ DCHECK_EQ(total_allocated, accounting_stats_.Size());
+}
+#endif
+
+void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
+ Address new_limit = ComputeLimit(top(), limit(), min_size);
+ DCHECK_LE(new_limit, limit());
+ DecreaseLimit(new_limit);
+}
+
+// -----------------------------------------------------------------------------
+// OldSpace implementation
+
+void PagedSpace::PrepareForMarkCompact() {
+ // We don't have a linear allocation area while sweeping. It will be restored
+ // on the first allocation after the sweep.
+ FreeLinearAllocationArea();
+
+ // Clear the free list before a full GC---it will be rebuilt afterward.
+ free_list_->Reset();
+}
+
+size_t PagedSpace::SizeOfObjects() {
+ CHECK_GE(limit(), top());
+ DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
+ return Size() - (limit() - top());
+}
+
+bool PagedSpace::EnsureSweptAndRetryAllocation(int size_in_bytes,
+ AllocationOrigin origin) {
+ DCHECK(!is_local_space());
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ // Wait for the sweeper threads here and complete the sweeping phase.
+ collector->EnsureSweepingCompleted();
+
+ // After waiting for the sweeper threads, there may be new free-list
+ // entries.
+ return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
+ }
+ return false;
+}
+
+bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
+ VMState<GC> state(heap()->isolate());
+ RuntimeCallTimerScope runtime_timer(
+ heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
+ base::Optional<base::MutexGuard> optional_mutex;
+
+ if (FLAG_concurrent_allocation && origin != AllocationOrigin::kGC &&
+ identity() == OLD_SPACE) {
+ optional_mutex.emplace(&allocation_mutex_);
+ }
+
+ return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
+}
+
+bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
+ return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
+}
+
+bool OffThreadSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
+ if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
+ return true;
+
+ if (heap()->CanExpandOldGenerationBackground(size_in_bytes) && Expand()) {
+ DCHECK((CountTotalPages() > 1) ||
+ (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
+ return RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes), origin);
+ }
+
+ return false;
+}
+
+bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
+ // Non-compaction local spaces are not supported.
+ DCHECK_IMPLIES(is_local_space(), is_compaction_space());
+
+ // Allocation in this space has failed.
+ DCHECK_GE(size_in_bytes, 0);
+ const int kMaxPagesToSweep = 1;
+
+ if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
+ return true;
+
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ // Sweeping is still in progress.
+ if (collector->sweeping_in_progress()) {
+ if (FLAG_concurrent_sweeping && !is_compaction_space() &&
+ !collector->sweeper()->AreSweeperTasksRunning()) {
+ collector->EnsureSweepingCompleted();
+ }
+
+ // First try to refill the free-list, concurrent sweeper threads
+ // may have freed some objects in the meantime.
+ RefillFreeList();
+
+ // Retry the free list allocation.
+ if (RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes), origin))
+ return true;
+
+ if (SweepAndRetryAllocation(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
+ origin))
+ return true;
+ }
+
+ if (is_compaction_space()) {
+ // The main thread may have acquired all swept pages. Try to steal from
+ // it. This can only happen during young generation evacuation.
+ PagedSpace* main_space = heap()->paged_space(identity());
+ Page* page = main_space->RemovePageSafe(size_in_bytes);
+ if (page != nullptr) {
+ AddPage(page);
+ if (RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes), origin))
+ return true;
+ }
+ }
+
+ if (heap()->ShouldExpandOldGenerationOnSlowAllocation() &&
+ heap()->CanExpandOldGeneration(AreaSize())) {
+ Page* page = Expand();
+ if (page) {
+ if (!is_compaction_space()) {
+ heap()->NotifyOldGenerationExpansion(identity(), page);
+ }
+ DCHECK((CountTotalPages() > 1) ||
+ (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
+ return RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes), origin);
+ }
+ }
+
+ if (is_compaction_space()) {
+ return SweepAndRetryAllocation(0, 0, size_in_bytes, origin);
+
+ } else {
+ // If sweeper threads are active, wait for them at that point and steal
+ // elements from their free-lists. Allocation may still fail here which
+ // would indicate that there is not enough memory for the given allocation.
+ return EnsureSweptAndRetryAllocation(size_in_bytes, origin);
+ }
+}
+
+bool PagedSpace::SweepAndRetryAllocation(int required_freed_bytes,
+ int max_pages, int size_in_bytes,
+ AllocationOrigin origin) {
+ // Cleanup invalidated old-to-new refs for compaction space in the
+ // final atomic pause.
+ Sweeper::FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
+ is_compaction_space() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
+ : Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
+
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ int max_freed = collector->sweeper()->ParallelSweepSpace(
+ identity(), required_freed_bytes, max_pages,
+ invalidated_slots_in_free_space);
+ RefillFreeList();
+ if (max_freed >= size_in_bytes)
+ return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
+ }
+ return false;
+}
+
+// -----------------------------------------------------------------------------
+// MapSpace implementation
+
+// TODO(dmercadier): use a heap instead of sorting like that.
+// Using a heap will have multiple benefits:
+// - for now, SortFreeList is only called after sweeping, which is somewhat
+// late. Using a heap, sorting could be done online: FreeListCategories would
+// be inserted in a heap (ie, in a sorted manner).
+// - SortFreeList is a bit fragile: any change to FreeListMap (or to
+// MapSpace::free_list_) could break it.
+void MapSpace::SortFreeList() {
+ using LiveBytesPagePair = std::pair<size_t, Page*>;
+ std::vector<LiveBytesPagePair> pages;
+ pages.reserve(CountTotalPages());
+
+ for (Page* p : *this) {
+ free_list()->RemoveCategory(p->free_list_category(kFirstCategory));
+ pages.push_back(std::make_pair(p->allocated_bytes(), p));
+ }
+
+ // Sorting by least-allocated-bytes first.
+ std::sort(pages.begin(), pages.end(),
+ [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
+ return a.first < b.first;
+ });
+
+ for (LiveBytesPagePair const& p : pages) {
+ // Since AddCategory inserts in head position, it reverts the order produced
+ // by the sort above: least-allocated-bytes will be Added first, and will
+ // therefore be the last element (and the first one will be
+ // most-allocated-bytes).
+ free_list()->AddCategory(p.second->free_list_category(kFirstCategory));
+ }
+}
+
+#ifdef VERIFY_HEAP
+void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/paged-spaces.h b/chromium/v8/src/heap/paged-spaces.h
new file mode 100644
index 00000000000..395ff293433
--- /dev/null
+++ b/chromium/v8/src/heap/paged-spaces.h
@@ -0,0 +1,588 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_PAGED_SPACES_H_
+#define V8_HEAP_PAGED_SPACES_H_
+
+#include <memory>
+#include <utility>
+
+#include "src/base/bounds.h"
+#include "src/base/macros.h"
+#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
+#include "src/flags/flags.h"
+#include "src/heap/allocation-stats.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class HeapObject;
+class Isolate;
+class LocalSpace;
+class OffThreadSpace;
+class ObjectVisitor;
+
+// -----------------------------------------------------------------------------
+// Heap object iterator in old/map spaces.
+//
+// A PagedSpaceObjectIterator iterates objects from the bottom of the given
+// space to its top or from the bottom of the given page to its top.
+//
+// If objects are allocated in the page during iteration the iterator may
+// or may not iterate over those objects. The caller must create a new
+// iterator in order to be sure to visit these new objects.
+class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
+ public:
+ // Creates a new object iterator in a given space.
+ PagedSpaceObjectIterator(Heap* heap, PagedSpace* space);
+ PagedSpaceObjectIterator(Heap* heap, PagedSpace* space, Page* page);
+
+ // Creates a new object iterator in a given off-thread space.
+ explicit PagedSpaceObjectIterator(OffThreadSpace* space);
+
+ // Advance to the next object, skipping free spaces and other fillers and
+ // skipping the special garbage section of which there is one per space.
+ // Returns nullptr when the iteration has ended.
+ inline HeapObject Next() override;
+
+ private:
+ // Fast (inlined) path of next().
+ inline HeapObject FromCurrentPage();
+
+ // Slow path of next(), goes into the next page. Returns false if the
+ // iteration has ended.
+ bool AdvanceToNextPage();
+
+ Address cur_addr_; // Current iteration point.
+ Address cur_end_; // End iteration point.
+ PagedSpace* space_;
+ PageRange page_range_;
+ PageRange::iterator current_page_;
+};
+
+class V8_EXPORT_PRIVATE PagedSpace
+ : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
+ public:
+ using iterator = PageIterator;
+ using const_iterator = ConstPageIterator;
+
+ static const size_t kCompactionMemoryWanted = 500 * KB;
+
+ // Creates a space with an id.
+ PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
+ FreeList* free_list,
+ LocalSpaceKind local_space_kind = LocalSpaceKind::kNone);
+
+ ~PagedSpace() override { TearDown(); }
+
+ // Checks whether an object/address is in this space.
+ inline bool Contains(Address a) const;
+ inline bool Contains(Object o) const;
+ bool ContainsSlow(Address addr) const;
+
+ // Does the space need executable memory?
+ Executability executable() { return executable_; }
+
+ // Prepares for a mark-compact GC.
+ void PrepareForMarkCompact();
+
+ // Current capacity without growing (Size() + Available()).
+ size_t Capacity() { return accounting_stats_.Capacity(); }
+
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory() override;
+
+ // Sets the capacity, the available space and the wasted space to zero.
+ // The stats are rebuilt during sweeping by adding each page to the
+ // capacity and the size when it is encountered. As free spaces are
+ // discovered during the sweeping they are subtracted from the size and added
+ // to the available and wasted totals. The free list is cleared as well.
+ void ClearAllocatorState() {
+ accounting_stats_.ClearSize();
+ free_list_->Reset();
+ }
+
+ // Available bytes without growing. These are the bytes on the free list.
+ // The bytes in the linear allocation area are not included in this total
+ // because updating the stats would slow down allocation. New pages are
+ // immediately added to the free list so they show up here.
+ size_t Available() override { return free_list_->Available(); }
+
+ // Allocated bytes in this space. Garbage bytes that were not found due to
+ // concurrent sweeping are counted as being allocated! The bytes in the
+ // current linear allocation area (between top and limit) are also counted
+ // here.
+ size_t Size() override { return accounting_stats_.Size(); }
+
+ // As size, but the bytes in lazily swept pages are estimated and the bytes
+ // in the current linear allocation area are not included.
+ size_t SizeOfObjects() override;
+
+ // Wasted bytes in this space. These are just the bytes that were thrown away
+ // due to being too small to use for allocation.
+ virtual size_t Waste() { return free_list_->wasted_bytes(); }
+
+ // Allocate the requested number of bytes in the space if possible, return a
+ // failure object if not.
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
+ int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ // Allocate the requested number of bytes in the space double aligned if
+ // possible, return a failure object if not.
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ // Allocate the requested number of bytes in the space and consider allocation
+ // alignment if needed.
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
+ int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ // Allocate the requested number of bytes in the space from a background
+ // thread.
+ V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
+ SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
+ size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin);
+
+ size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
+ if (size_in_bytes == 0) return 0;
+ heap()->CreateFillerObjectAtBackground(
+ start, static_cast<int>(size_in_bytes),
+ ClearFreedMemoryMode::kDontClearFreedMemory);
+ if (mode == SpaceAccountingMode::kSpaceAccounted) {
+ return AccountedFree(start, size_in_bytes);
+ } else {
+ return UnaccountedFree(start, size_in_bytes);
+ }
+ }
+
+ // Give a block of memory to the space's free list. It might be added to
+ // the free list or accounted as waste.
+ // If add_to_freelist is false then just accounting stats are updated and
+ // no attempt to add area to free list is made.
+ size_t AccountedFree(Address start, size_t size_in_bytes) {
+ size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
+ Page* page = Page::FromAddress(start);
+ accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
+ DCHECK_GE(size_in_bytes, wasted);
+ return size_in_bytes - wasted;
+ }
+
+ size_t UnaccountedFree(Address start, size_t size_in_bytes) {
+ size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
+ DCHECK_GE(size_in_bytes, wasted);
+ return size_in_bytes - wasted;
+ }
+
+ inline bool TryFreeLast(HeapObject object, int object_size);
+
+ void ResetFreeList();
+
+ // Empty space linear allocation area, returning unused area to free list.
+ void FreeLinearAllocationArea();
+
+ void MakeLinearAllocationAreaIterable();
+
+ void MarkLinearAllocationAreaBlack();
+ void UnmarkLinearAllocationArea();
+
+ void DecreaseAllocatedBytes(size_t bytes, Page* page) {
+ accounting_stats_.DecreaseAllocatedBytes(bytes, page);
+ }
+ void IncreaseAllocatedBytes(size_t bytes, Page* page) {
+ accounting_stats_.IncreaseAllocatedBytes(bytes, page);
+ }
+ void DecreaseCapacity(size_t bytes) {
+ accounting_stats_.DecreaseCapacity(bytes);
+ }
+ void IncreaseCapacity(size_t bytes) {
+ accounting_stats_.IncreaseCapacity(bytes);
+ }
+
+ void RefineAllocatedBytesAfterSweeping(Page* page);
+
+ Page* InitializePage(MemoryChunk* chunk);
+
+ void ReleasePage(Page* page);
+
+ // Adds the page to this space and returns the number of bytes added to the
+ // free list of the space.
+ size_t AddPage(Page* page);
+ void RemovePage(Page* page);
+ // Remove a page if it has at least |size_in_bytes| bytes available that can
+ // be used for allocation.
+ Page* RemovePageSafe(int size_in_bytes);
+
+ void SetReadable();
+ void SetReadAndExecutable();
+ void SetReadAndWritable();
+
+ void SetDefaultCodePermissions() {
+ if (FLAG_jitless) {
+ SetReadable();
+ } else {
+ SetReadAndExecutable();
+ }
+ }
+
+#ifdef VERIFY_HEAP
+ // Verify integrity of this space.
+ virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
+
+ void VerifyLiveBytes();
+
+ // Overridden by subclasses to verify space-specific object
+ // properties (e.g., only maps or free-list nodes are in map space).
+ virtual void VerifyObject(HeapObject obj) {}
+#endif
+
+#ifdef DEBUG
+ void VerifyCountersAfterSweeping(Heap* heap);
+ void VerifyCountersBeforeConcurrentSweeping();
+ // Print meta info and objects in this space.
+ void Print() override;
+
+ // Report code object related statistics
+ static void ReportCodeStatistics(Isolate* isolate);
+ static void ResetCodeStatistics(Isolate* isolate);
+#endif
+
+ bool CanExpand(size_t size);
+
+ // Returns the number of total pages in this space.
+ int CountTotalPages();
+
+ // Return size of allocatable area on a page in this space.
+ inline int AreaSize() { return static_cast<int>(area_size_); }
+
+ bool is_local_space() { return local_space_kind_ != LocalSpaceKind::kNone; }
+
+ bool is_off_thread_space() {
+ return local_space_kind_ == LocalSpaceKind::kOffThreadSpace;
+ }
+
+ bool is_compaction_space() {
+ return base::IsInRange(local_space_kind_,
+ LocalSpaceKind::kFirstCompactionSpace,
+ LocalSpaceKind::kLastCompactionSpace);
+ }
+
+ LocalSpaceKind local_space_kind() { return local_space_kind_; }
+
+ // Merges {other} into the current space. Note that this modifies {other},
+ // e.g., removes its bump pointer area and resets statistics.
+ void MergeLocalSpace(LocalSpace* other);
+
+ // Refills the free list from the corresponding free list filled by the
+ // sweeper.
+ virtual void RefillFreeList();
+
+ base::Mutex* mutex() { return &space_mutex_; }
+
+ inline void UnlinkFreeListCategories(Page* page);
+ inline size_t RelinkFreeListCategories(Page* page);
+
+ Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
+ const Page* first_page() const {
+ return reinterpret_cast<const Page*>(Space::first_page());
+ }
+
+ iterator begin() { return iterator(first_page()); }
+ iterator end() { return iterator(nullptr); }
+
+ const_iterator begin() const { return const_iterator(first_page()); }
+ const_iterator end() const { return const_iterator(nullptr); }
+
+ // Shrink immortal immovable pages of the space to be exactly the size needed
+ // using the high water mark.
+ void ShrinkImmortalImmovablePages();
+
+ size_t ShrinkPageToHighWaterMark(Page* page);
+
+ std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
+
+ void SetLinearAllocationArea(Address top, Address limit);
+
+ private:
+ // Set space linear allocation area.
+ void SetTopAndLimit(Address top, Address limit) {
+ DCHECK(top == limit ||
+ Page::FromAddress(top) == Page::FromAddress(limit - 1));
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.Reset(top, limit);
+ }
+ void DecreaseLimit(Address new_limit);
+ void UpdateInlineAllocationLimit(size_t min_size) override;
+ bool SupportsInlineAllocation() override {
+ return identity() == OLD_SPACE && !is_local_space();
+ }
+
+ protected:
+ // PagedSpaces that should be included in snapshots have different, i.e.,
+ // smaller, initial pages.
+ virtual bool snapshotable() { return true; }
+
+ bool HasPages() { return first_page() != nullptr; }
+
+ // Cleans up the space, frees all pages in this space except those belonging
+ // to the initial chunk, uncommits addresses in the initial chunk.
+ void TearDown();
+
+ // Expands the space by allocating a fixed number of pages. Returns false if
+ // it cannot allocate requested number of pages from OS, or if the hard heap
+ // size limit has been hit.
+ Page* Expand();
+ Page* ExpandBackground(LocalHeap* local_heap);
+ Page* AllocatePage();
+
+ // Sets up a linear allocation area that fits the given number of bytes.
+ // Returns false if there is not enough space and the caller has to retry
+ // after collecting garbage.
+ inline bool EnsureLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin);
+ // Allocates an object from the linear allocation area. Assumes that the
+ // linear allocation area is large enought to fit the object.
+ inline HeapObject AllocateLinearly(int size_in_bytes);
+ // Tries to allocate an aligned object from the linear allocation area.
+ // Returns nullptr if the linear allocation area does not fit the object.
+ // Otherwise, returns the object pointer and writes the allocation size
+ // (object size + alignment filler size) to the size_in_bytes.
+ inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
+ AllocationAlignment alignment);
+
+ V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
+ size_t size_in_bytes, AllocationOrigin origin);
+
+ // If sweeping is still in progress try to sweep unswept pages. If that is
+ // not successful, wait for the sweeper threads and retry free-list
+ // allocation. Returns false if there is not enough space and the caller
+ // has to retry after collecting garbage.
+ V8_WARN_UNUSED_RESULT bool EnsureSweptAndRetryAllocation(
+ int size_in_bytes, AllocationOrigin origin);
+
+ V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(int required_freed_bytes,
+ int max_pages,
+ int size_in_bytes,
+ AllocationOrigin origin);
+
+ // Slow path of AllocateRaw. This function is space-dependent. Returns false
+ // if there is not enough space and the caller has to retry after
+ // collecting garbage.
+ V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
+ int size_in_bytes, AllocationOrigin origin);
+
+ // Implementation of SlowAllocateRaw. Returns false if there is not enough
+ // space and the caller has to retry after collecting garbage.
+ V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
+ int size_in_bytes, AllocationOrigin origin);
+
+ V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
+ TryAllocationFromFreeListBackground(LocalHeap* local_heap,
+ size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin);
+
+ Executability executable_;
+
+ LocalSpaceKind local_space_kind_;
+
+ size_t area_size_;
+
+ // Accounting information for this space.
+ AllocationStats accounting_stats_;
+
+ // Mutex guarding any concurrent access to the space.
+ base::Mutex space_mutex_;
+
+ // Mutex guarding concurrent allocation.
+ base::Mutex allocation_mutex_;
+
+ friend class IncrementalMarking;
+ friend class MarkCompactCollector;
+
+ // Used in cctest.
+ friend class heap::HeapTester;
+};
+
+// -----------------------------------------------------------------------------
+// Base class for compaction space and off-thread space.
+
+class V8_EXPORT_PRIVATE LocalSpace : public PagedSpace {
+ public:
+ LocalSpace(Heap* heap, AllocationSpace id, Executability executable,
+ LocalSpaceKind local_space_kind)
+ : PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
+ local_space_kind) {
+ DCHECK_NE(local_space_kind, LocalSpaceKind::kNone);
+ }
+
+ protected:
+ // The space is temporary and not included in any snapshots.
+ bool snapshotable() override { return false; }
+};
+
+// -----------------------------------------------------------------------------
+// Compaction space that is used temporarily during compaction.
+
+class V8_EXPORT_PRIVATE CompactionSpace : public LocalSpace {
+ public:
+ CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
+ LocalSpaceKind local_space_kind)
+ : LocalSpace(heap, id, executable, local_space_kind) {
+ DCHECK(is_compaction_space());
+ }
+
+ protected:
+ V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
+ int size_in_bytes, AllocationOrigin origin) override;
+};
+
+// A collection of |CompactionSpace|s used by a single compaction task.
+class CompactionSpaceCollection : public Malloced {
+ public:
+ explicit CompactionSpaceCollection(Heap* heap,
+ LocalSpaceKind local_space_kind)
+ : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
+ local_space_kind),
+ code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
+ local_space_kind) {}
+
+ CompactionSpace* Get(AllocationSpace space) {
+ switch (space) {
+ case OLD_SPACE:
+ return &old_space_;
+ case CODE_SPACE:
+ return &code_space_;
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ }
+
+ private:
+ CompactionSpace old_space_;
+ CompactionSpace code_space_;
+};
+
+// -----------------------------------------------------------------------------
+// Old generation regular object space.
+
+class OldSpace : public PagedSpace {
+ public:
+ // Creates an old space object. The constructor does not allocate pages
+ // from OS.
+ explicit OldSpace(Heap* heap)
+ : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
+ FreeList::CreateFreeList()) {}
+
+ static bool IsAtPageStart(Address addr) {
+ return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
+ MemoryChunkLayout::ObjectStartOffsetInDataPage();
+ }
+
+ size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
+ type == ExternalBackingStoreType::kArrayBuffer)
+ return heap()->OldArrayBufferBytes();
+ return external_backing_store_bytes_[type];
+ }
+};
+
+// -----------------------------------------------------------------------------
+// Old generation code object space.
+
+class CodeSpace : public PagedSpace {
+ public:
+ // Creates an old space object. The constructor does not allocate pages
+ // from OS.
+ explicit CodeSpace(Heap* heap)
+ : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
+};
+
+// -----------------------------------------------------------------------------
+// Old space for all map objects
+
+class MapSpace : public PagedSpace {
+ public:
+ // Creates a map space object.
+ explicit MapSpace(Heap* heap)
+ : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, new FreeListMap()) {}
+
+ int RoundSizeDownToObjectAlignment(int size) override {
+ if (base::bits::IsPowerOfTwo(Map::kSize)) {
+ return RoundDown(size, Map::kSize);
+ } else {
+ return (size / Map::kSize) * Map::kSize;
+ }
+ }
+
+ void SortFreeList();
+
+#ifdef VERIFY_HEAP
+ void VerifyObject(HeapObject obj) override;
+#endif
+};
+
+// -----------------------------------------------------------------------------
+// Off-thread space that is used for folded allocation on a different thread.
+
+class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
+ public:
+ explicit OffThreadSpace(Heap* heap)
+ : LocalSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
+ LocalSpaceKind::kOffThreadSpace) {
+#ifdef V8_ENABLE_THIRD_PARTY_HEAP
+ // OffThreadSpace doesn't work with third-party heap.
+ UNREACHABLE();
+#endif
+ }
+
+ protected:
+ V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
+ int size_in_bytes, AllocationOrigin origin) override;
+
+ void RefillFreeList() override;
+};
+
+// Iterates over the chunks (pages and large object pages) that can contain
+// pointers to new space or to evacuation candidates.
+class OldGenerationMemoryChunkIterator {
+ public:
+ inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
+
+ // Return nullptr when the iterator is done.
+ inline MemoryChunk* next();
+
+ private:
+ enum State {
+ kOldSpaceState,
+ kMapState,
+ kCodeState,
+ kLargeObjectState,
+ kCodeLargeObjectState,
+ kFinishedState
+ };
+ Heap* heap_;
+ State state_;
+ PageIterator old_iterator_;
+ PageIterator code_iterator_;
+ PageIterator map_iterator_;
+ LargePageIterator lo_iterator_;
+ LargePageIterator code_lo_iterator_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_PAGED_SPACES_H_
diff --git a/chromium/v8/src/heap/read-only-heap.cc b/chromium/v8/src/heap/read-only-heap.cc
index e2387984ccd..5bea259e7de 100644
--- a/chromium/v8/src/heap/read-only-heap.cc
+++ b/chromium/v8/src/heap/read-only-heap.cc
@@ -10,6 +10,7 @@
#include "src/base/lazy-instance.h"
#include "src/base/lsan.h"
#include "src/base/platform/mutex.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-spaces.h"
@@ -137,7 +138,7 @@ ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(
void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
DCHECK(!init_complete_);
- read_only_space_->ShrinkImmortalImmovablePages();
+ read_only_space_->ShrinkPages();
#ifdef V8_SHARED_RO_HEAP
std::shared_ptr<ReadOnlyArtifacts> artifacts(*read_only_artifacts_.Pointer());
read_only_space()->DetachPagesAndAddToArtifacts(artifacts);
@@ -174,7 +175,7 @@ void ReadOnlyHeap::PopulateReadOnlySpaceStatistics(
if (artifacts) {
auto ro_space = artifacts->shared_read_only_space();
statistics->read_only_space_size_ = ro_space->CommittedMemory();
- statistics->read_only_space_used_size_ = ro_space->SizeOfObjects();
+ statistics->read_only_space_used_size_ = ro_space->Size();
statistics->read_only_space_physical_size_ =
ro_space->CommittedPhysicalMemory();
}
@@ -183,7 +184,7 @@ void ReadOnlyHeap::PopulateReadOnlySpaceStatistics(
// static
bool ReadOnlyHeap::Contains(Address address) {
- return MemoryChunk::FromAddress(address)->InReadOnlySpace();
+ return BasicMemoryChunk::FromAddress(address)->InReadOnlySpace();
}
// static
@@ -191,7 +192,7 @@ bool ReadOnlyHeap::Contains(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return third_party_heap::Heap::InReadOnlySpace(object.address());
} else {
- return MemoryChunk::FromHeapObject(object)->InReadOnlySpace();
+ return BasicMemoryChunk::FromHeapObject(object)->InReadOnlySpace();
}
}
@@ -214,30 +215,33 @@ ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap)
ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space)
: ro_space_(ro_space),
- current_page_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL ? nullptr
- : ro_space->first_page()),
+ current_page_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL
+ ? std::vector<ReadOnlyPage*>::iterator()
+ : ro_space->pages().begin()),
current_addr_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL
? Address()
- : current_page_->area_start()) {}
+ : (*current_page_)->area_start()) {}
HeapObject ReadOnlyHeapObjectIterator::Next() {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return HeapObject(); // Unsupported
}
- if (current_page_ == nullptr) {
+ if (current_page_ == ro_space_->pages().end()) {
return HeapObject();
}
+ BasicMemoryChunk* current_page = *current_page_;
for (;;) {
- DCHECK_LE(current_addr_, current_page_->area_end());
- if (current_addr_ == current_page_->area_end()) {
+ DCHECK_LE(current_addr_, current_page->area_end());
+ if (current_addr_ == current_page->area_end()) {
// Progress to the next page.
- current_page_ = current_page_->next_page();
- if (current_page_ == nullptr) {
+ ++current_page_;
+ if (current_page_ == ro_space_->pages().end()) {
return HeapObject();
}
- current_addr_ = current_page_->area_start();
+ current_page = *current_page_;
+ current_addr_ = current_page->area_start();
}
if (current_addr_ == ro_space_->top() &&
diff --git a/chromium/v8/src/heap/read-only-heap.h b/chromium/v8/src/heap/read-only-heap.h
index ed105211296..548f73bfbbf 100644
--- a/chromium/v8/src/heap/read-only-heap.h
+++ b/chromium/v8/src/heap/read-only-heap.h
@@ -7,6 +7,7 @@
#include <memory>
#include <utility>
+#include <vector>
#include "src/base/macros.h"
#include "src/base/optional.h"
@@ -20,10 +21,12 @@ class SharedMemoryStatistics;
namespace internal {
+class BasicMemoryChunk;
class Isolate;
class Page;
class ReadOnlyArtifacts;
class ReadOnlyDeserializer;
+class ReadOnlyPage;
class ReadOnlySpace;
// This class transparently manages read-only space, roots and cache creation
@@ -116,7 +119,7 @@ class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator {
private:
ReadOnlySpace* const ro_space_;
- Page* current_page_;
+ std::vector<ReadOnlyPage*>::const_iterator current_page_;
Address current_addr_;
};
diff --git a/chromium/v8/src/heap/read-only-spaces.cc b/chromium/v8/src/heap/read-only-spaces.cc
index a2e72952580..a88753edf99 100644
--- a/chromium/v8/src/heap/read-only-spaces.cc
+++ b/chromium/v8/src/heap/read-only-spaces.cc
@@ -4,10 +4,14 @@
#include "src/heap/read-only-spaces.h"
+#include "include/v8-internal.h"
#include "src/base/lsan.h"
+#include "src/common/globals.h"
#include "src/execution/isolate.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/combined-heap.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/objects/objects-inl.h"
@@ -20,20 +24,29 @@ namespace internal {
// ReadOnlySpace implementation
ReadOnlySpace::ReadOnlySpace(Heap* heap)
- : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList()),
- is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
+ : BaseSpace(heap, RO_SPACE),
+ top_(kNullAddress),
+ limit_(kNullAddress),
+ is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()),
+ capacity_(0),
+ area_size_(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE)) {}
+
+ReadOnlySpace::~ReadOnlySpace() {
+ Unseal();
+ for (ReadOnlyPage* chunk : pages_) {
+ heap()->memory_allocator()->FreeReadOnlyPage(chunk);
+ }
+ pages_.resize(0);
+ accounting_stats_.Clear();
}
ReadOnlyArtifacts::~ReadOnlyArtifacts() {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- MemoryChunk* next_chunk;
- for (MemoryChunk* chunk = pages_.front(); chunk != nullptr;
- chunk = next_chunk) {
+ for (ReadOnlyPage* chunk : pages_) {
void* chunk_address = reinterpret_cast<void*>(chunk->address());
page_allocator->SetPermissions(chunk_address, chunk->size(),
PageAllocator::kReadWrite);
- next_chunk = chunk->list_node().next();
size_t size = RoundUp(chunk->size(), page_allocator->AllocatePageSize());
CHECK(page_allocator->FreePages(chunk_address, size));
}
@@ -45,17 +58,19 @@ void ReadOnlyArtifacts::set_read_only_heap(
}
SharedReadOnlySpace::~SharedReadOnlySpace() {
- // Clear the memory chunk list before the space is deleted, so that the
- // inherited destructors don't try to destroy the MemoryChunks themselves.
- memory_chunk_list_ = heap::List<MemoryChunk>();
+ // Clear the chunk list before the space is deleted, so that the inherited
+ // destructors don't try to destroy the BasicMemoryChunks themselves.
+ pages_.resize(0);
}
SharedReadOnlySpace::SharedReadOnlySpace(
Heap* heap, std::shared_ptr<ReadOnlyArtifacts> artifacts)
: ReadOnlySpace(heap) {
- artifacts->pages().ShallowCopyTo(&memory_chunk_list_);
+ pages_ = artifacts->pages();
is_marked_read_only_ = true;
accounting_stats_ = artifacts->accounting_stats();
+ top_ = kNullAddress;
+ limit_ = kNullAddress;
}
void ReadOnlySpace::DetachPagesAndAddToArtifacts(
@@ -63,14 +78,13 @@ void ReadOnlySpace::DetachPagesAndAddToArtifacts(
Heap* heap = ReadOnlySpace::heap();
Seal(SealMode::kDetachFromHeapAndForget);
artifacts->set_accounting_stats(accounting_stats_);
- artifacts->TransferPages(std::move(memory_chunk_list_));
+ artifacts->TransferPages(std::move(pages_));
artifacts->set_shared_read_only_space(
std::make_unique<SharedReadOnlySpace>(heap, artifacts));
heap->ReplaceReadOnlySpace(artifacts->shared_read_only_space());
}
void ReadOnlyPage::MakeHeaderRelocatable() {
- ReleaseAllocatedMemoryNeededForWritableChunk();
// Detached read-only space needs to have a valid marking bitmap. Instruct
// Lsan to ignore it if required.
LSAN_IGNORE_OBJECT(marking_bitmap_);
@@ -80,12 +94,13 @@ void ReadOnlyPage::MakeHeaderRelocatable() {
void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
PageAllocator::Permission access) {
- for (Page* p : *this) {
+ for (BasicMemoryChunk* chunk : pages_) {
// Read only pages don't have valid reservation object so we get proper
// page allocator manually.
v8::PageAllocator* page_allocator =
- memory_allocator->page_allocator(p->executable());
- CHECK(SetPermissions(page_allocator, p->address(), p->size(), access));
+ memory_allocator->page_allocator(NOT_EXECUTABLE);
+ CHECK(SetPermissions(page_allocator, chunk->address(), chunk->size(),
+ access));
}
}
@@ -93,27 +108,20 @@ void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
// on the heap. If there was already a free list then the elements on it
// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
// fix them.
-void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
- free_list_->RepairLists(heap());
+void ReadOnlySpace::RepairFreeSpacesAfterDeserialization() {
+ BasicMemoryChunk::UpdateHighWaterMark(top_);
// Each page may have a small free space that is not tracked by a free list.
// Those free spaces still contain null as their map pointer.
// Overwrite them with new fillers.
- for (Page* page : *this) {
- int size = static_cast<int>(page->wasted_memory());
- if (size == 0) {
- // If there is no wasted memory then all free space is in the free list.
- continue;
- }
- Address start = page->HighWaterMark();
- Address end = page->area_end();
- if (start < end - size) {
- // A region at the high watermark is already in free list.
- HeapObject filler = HeapObject::FromAddress(start);
- CHECK(filler.IsFreeSpaceOrFiller());
- start += filler.Size();
+ for (BasicMemoryChunk* chunk : pages_) {
+ Address start = chunk->HighWaterMark();
+ Address end = chunk->area_end();
+ // Put a filler object in the gap between the end of the allocated objects
+ // and the end of the allocatable area.
+ if (start < end) {
+ heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
+ ClearRecordedSlots::kNo);
}
- CHECK_EQ(size, static_cast<int>(end - start));
- heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
}
}
@@ -144,29 +152,374 @@ void ReadOnlySpace::Seal(SealMode ro_mode) {
if (ro_mode == SealMode::kDetachFromHeapAndForget) {
DetachFromHeap();
- for (Page* p : *this) {
- memory_allocator->UnregisterMemory(p);
- static_cast<ReadOnlyPage*>(p)->MakeHeaderRelocatable();
- }
- } else {
- for (Page* p : *this) {
- p->ReleaseAllocatedMemoryNeededForWritableChunk();
+ for (BasicMemoryChunk* chunk : pages_) {
+ memory_allocator->UnregisterMemory(chunk);
+ static_cast<ReadOnlyPage*>(chunk)->MakeHeaderRelocatable();
}
}
- free_list_.reset();
-
SetPermissionsForPages(memory_allocator, PageAllocator::kRead);
}
void ReadOnlySpace::Unseal() {
DCHECK(is_marked_read_only_);
- if (HasPages()) {
+ if (!pages_.empty()) {
SetPermissionsForPages(heap()->memory_allocator(),
PageAllocator::kReadWrite);
}
is_marked_read_only_ = false;
}
+bool ReadOnlySpace::ContainsSlow(Address addr) {
+ BasicMemoryChunk* c = BasicMemoryChunk::FromAddress(addr);
+ for (BasicMemoryChunk* chunk : pages_) {
+ if (chunk == c) return true;
+ }
+ return false;
+}
+
+namespace {
+// Only iterates over a single chunk as the chunk iteration is done externally.
+class ReadOnlySpaceObjectIterator : public ObjectIterator {
+ public:
+ ReadOnlySpaceObjectIterator(Heap* heap, ReadOnlySpace* space,
+ BasicMemoryChunk* chunk)
+ : cur_addr_(kNullAddress), cur_end_(kNullAddress), space_(space) {}
+
+ // Advance to the next object, skipping free spaces and other fillers and
+ // skipping the special garbage section of which there is one per space.
+ // Returns nullptr when the iteration has ended.
+ HeapObject Next() override {
+ HeapObject next_obj = FromCurrentPage();
+ if (!next_obj.is_null()) return next_obj;
+ return HeapObject();
+ }
+
+ private:
+ HeapObject FromCurrentPage() {
+ while (cur_addr_ != cur_end_) {
+ if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
+ cur_addr_ = space_->limit();
+ continue;
+ }
+ HeapObject obj = HeapObject::FromAddress(cur_addr_);
+ const int obj_size = obj.Size();
+ cur_addr_ += obj_size;
+ DCHECK_LE(cur_addr_, cur_end_);
+ if (!obj.IsFreeSpaceOrFiller()) {
+ if (obj.IsCode()) {
+ DCHECK(Code::cast(obj).is_builtin());
+ DCHECK_CODEOBJECT_SIZE(obj_size, space_);
+ } else {
+ DCHECK_OBJECT_SIZE(obj_size);
+ }
+ return obj;
+ }
+ }
+ return HeapObject();
+ }
+
+ Address cur_addr_; // Current iteration point.
+ Address cur_end_; // End iteration point.
+ ReadOnlySpace* space_;
+};
+} // namespace
+
+#ifdef VERIFY_HEAP
+namespace {
+class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
+ public:
+ explicit VerifyReadOnlyPointersVisitor(Heap* heap)
+ : VerifyPointersVisitor(heap) {}
+
+ protected:
+ void VerifyPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
+ if (!host.is_null()) {
+ CHECK(ReadOnlyHeap::Contains(host.map()));
+ }
+ VerifyPointersVisitor::VerifyPointers(host, start, end);
+
+ for (MaybeObjectSlot current = start; current < end; ++current) {
+ HeapObject heap_object;
+ if ((*current)->GetHeapObject(&heap_object)) {
+ CHECK(ReadOnlyHeap::Contains(heap_object));
+ }
+ }
+ }
+};
+} // namespace
+
+void ReadOnlySpace::Verify(Isolate* isolate) {
+ bool allocation_pointer_found_in_space = top_ == limit_;
+ VerifyReadOnlyPointersVisitor visitor(isolate->heap());
+
+ for (BasicMemoryChunk* page : pages_) {
+#ifdef V8_SHARED_RO_HEAP
+ CHECK_NULL(page->owner());
+#else
+ CHECK_EQ(page->owner(), this);
+#endif
+
+ if (page == Page::FromAllocationAreaAddress(top_)) {
+ allocation_pointer_found_in_space = true;
+ }
+ ReadOnlySpaceObjectIterator it(isolate->heap(), this, page);
+ Address end_of_previous_object = page->area_start();
+ Address top = page->area_end();
+
+ for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
+ CHECK(end_of_previous_object <= object.address());
+
+ Map map = object.map();
+ CHECK(map.IsMap());
+
+ // The object itself should look OK.
+ object.ObjectVerify(isolate);
+
+ // All the interior pointers should be contained in the heap.
+ int size = object.Size();
+ object.IterateBody(map, size, &visitor);
+ CHECK(object.address() + size <= top);
+ end_of_previous_object = object.address() + size;
+
+ CHECK(!object.IsExternalString());
+ CHECK(!object.IsJSArrayBuffer());
+ }
+ }
+ CHECK(allocation_pointer_found_in_space);
+
+#ifdef DEBUG
+ VerifyCounters(isolate->heap());
+#endif
+}
+
+#ifdef DEBUG
+void ReadOnlySpace::VerifyCounters(Heap* heap) {
+ size_t total_capacity = 0;
+ size_t total_allocated = 0;
+ for (BasicMemoryChunk* page : pages_) {
+ total_capacity += page->area_size();
+ ReadOnlySpaceObjectIterator it(heap, this, page);
+ size_t real_allocated = 0;
+ for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
+ if (!object.IsFreeSpaceOrFiller()) {
+ real_allocated += object.Size();
+ }
+ }
+ total_allocated += page->allocated_bytes();
+ // The real size can be smaller than the accounted size if array trimming,
+ // object slack tracking happened after sweeping.
+ DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
+ DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
+ }
+ DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
+ DCHECK_EQ(total_allocated, accounting_stats_.Size());
+}
+#endif // DEBUG
+#endif // VERIFY_HEAP
+
+size_t ReadOnlySpace::CommittedPhysicalMemory() {
+ if (!base::OS::HasLazyCommits()) return CommittedMemory();
+ BasicMemoryChunk::UpdateHighWaterMark(top_);
+ size_t size = 0;
+ for (auto* chunk : pages_) {
+ size += chunk->size();
+ }
+
+ return size;
+}
+
+void ReadOnlySpace::FreeLinearAllocationArea() {
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap.
+ if (top_ == kNullAddress) {
+ DCHECK_EQ(kNullAddress, limit_);
+ return;
+ }
+
+ // Clear the bits in the unused black area.
+ ReadOnlyPage* page = pages_.back();
+ heap()->incremental_marking()->marking_state()->bitmap(page)->ClearRange(
+ page->AddressToMarkbitIndex(top_), page->AddressToMarkbitIndex(limit_));
+
+ heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
+ ClearRecordedSlots::kNo);
+
+ BasicMemoryChunk::UpdateHighWaterMark(top_);
+
+ top_ = kNullAddress;
+ limit_ = kNullAddress;
+}
+
+void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) {
+ if (top_ + size_in_bytes <= limit_) {
+ return;
+ }
+
+ DCHECK_GE(size_in_bytes, 0);
+
+ FreeLinearAllocationArea();
+
+ BasicMemoryChunk* chunk =
+ heap()->memory_allocator()->AllocateReadOnlyPage(AreaSize(), this);
+ capacity_ += AreaSize();
+
+ accounting_stats_.IncreaseCapacity(chunk->area_size());
+ AccountCommitted(chunk->size());
+ CHECK_NOT_NULL(chunk);
+ pages_.push_back(static_cast<ReadOnlyPage*>(chunk));
+
+ heap()->CreateFillerObjectAt(chunk->area_start(),
+ static_cast<int>(chunk->area_size()),
+ ClearRecordedSlots::kNo);
+
+ top_ = chunk->area_start();
+ limit_ = chunk->area_end();
+ return;
+}
+
+HeapObject ReadOnlySpace::TryAllocateLinearlyAligned(
+ int size_in_bytes, AllocationAlignment alignment) {
+ Address current_top = top_;
+ int filler_size = Heap::GetFillToAlign(current_top, alignment);
+
+ Address new_top = current_top + filler_size + size_in_bytes;
+ if (new_top > limit_) return HeapObject();
+
+ top_ = new_top;
+ if (filler_size > 0) {
+ return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
+ HeapObject::FromAddress(current_top),
+ filler_size);
+ }
+
+ // Allocation always occurs in the last chunk for RO_SPACE.
+ BasicMemoryChunk* chunk = pages_.back();
+ int allocated_size = filler_size + size_in_bytes;
+ accounting_stats_.IncreaseAllocatedBytes(allocated_size, chunk);
+ chunk->IncreaseAllocatedBytes(allocated_size);
+
+ return HeapObject::FromAddress(current_top);
+}
+
+AllocationResult ReadOnlySpace::AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment) {
+ DCHECK(!IsDetached());
+ int allocation_size = size_in_bytes;
+
+ HeapObject object = TryAllocateLinearlyAligned(allocation_size, alignment);
+ if (object.is_null()) {
+ // We don't know exactly how much filler we need to align until space is
+ // allocated, so assume the worst case.
+ EnsureSpaceForAllocation(allocation_size +
+ Heap::GetMaximumFillToAlign(alignment));
+ allocation_size = size_in_bytes;
+ object = TryAllocateLinearlyAligned(size_in_bytes, alignment);
+ CHECK(!object.is_null());
+ }
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+ return object;
+}
+
+AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
+ DCHECK(!IsDetached());
+ EnsureSpaceForAllocation(size_in_bytes);
+ Address current_top = top_;
+ Address new_top = current_top + size_in_bytes;
+ DCHECK_LE(new_top, limit_);
+ top_ = new_top;
+ HeapObject object = HeapObject::FromAddress(current_top);
+
+ DCHECK(!object.is_null());
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+ // Allocation always occurs in the last chunk for RO_SPACE.
+ BasicMemoryChunk* chunk = pages_.back();
+ accounting_stats_.IncreaseAllocatedBytes(size_in_bytes, chunk);
+ chunk->IncreaseAllocatedBytes(size_in_bytes);
+
+ return object;
+}
+
+AllocationResult ReadOnlySpace::AllocateRaw(size_t size_in_bytes,
+ AllocationAlignment alignment) {
+#ifdef V8_HOST_ARCH_32_BIT
+ AllocationResult result = alignment != kWordAligned
+ ? AllocateRawAligned(size_in_bytes, alignment)
+ : AllocateRawUnaligned(size_in_bytes);
+#else
+ AllocationResult result =
+ AllocateRawUnaligned(static_cast<int>(size_in_bytes));
+#endif
+ HeapObject heap_obj;
+ if (!result.IsRetry() && result.To(&heap_obj)) {
+ DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
+ }
+ return result;
+}
+
+size_t ReadOnlyPage::ShrinkToHighWaterMark() {
+ // Shrink pages to high water mark. The water mark points either to a filler
+ // or the area_end.
+ HeapObject filler = HeapObject::FromAddress(HighWaterMark());
+ if (filler.address() == area_end()) return 0;
+ CHECK(filler.IsFreeSpaceOrFiller());
+ DCHECK_EQ(filler.address() + filler.Size(), area_end());
+
+ size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
+ MemoryAllocator::GetCommitPageSize());
+ if (unused > 0) {
+ DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
+ if (FLAG_trace_gc_verbose) {
+ PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
+ reinterpret_cast<void*>(this),
+ reinterpret_cast<void*>(area_end()),
+ reinterpret_cast<void*>(area_end() - unused));
+ }
+ heap()->CreateFillerObjectAt(
+ filler.address(),
+ static_cast<int>(area_end() - filler.address() - unused),
+ ClearRecordedSlots::kNo);
+ heap()->memory_allocator()->PartialFreeMemory(
+ this, address() + size() - unused, unused, area_end() - unused);
+ if (filler.address() != area_end()) {
+ CHECK(filler.IsFreeSpaceOrFiller());
+ CHECK_EQ(filler.address() + filler.Size(), area_end());
+ }
+ }
+ return unused;
+}
+
+void ReadOnlySpace::ShrinkPages() {
+ BasicMemoryChunk::UpdateHighWaterMark(top_);
+ heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
+ ClearRecordedSlots::kNo);
+
+ for (ReadOnlyPage* chunk : pages_) {
+ DCHECK(chunk->IsFlagSet(Page::NEVER_EVACUATE));
+ size_t unused = chunk->ShrinkToHighWaterMark();
+ capacity_ -= unused;
+ accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
+ AccountUncommitted(unused);
+ }
+ limit_ = pages_.back()->area_end();
+}
+
+ReadOnlyPage* ReadOnlySpace::InitializePage(BasicMemoryChunk* chunk) {
+ ReadOnlyPage* page = reinterpret_cast<ReadOnlyPage*>(chunk);
+ page->allocated_bytes_ = 0;
+ page->SetFlag(BasicMemoryChunk::Flag::NEVER_EVACUATE);
+ heap()
+ ->incremental_marking()
+ ->non_atomic_marking_state()
+ ->bitmap(chunk)
+ ->MarkAllBits();
+ chunk->SetFlag(BasicMemoryChunk::READ_ONLY_HEAP);
+
+ return page;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/read-only-spaces.h b/chromium/v8/src/heap/read-only-spaces.h
index dd82182b7f6..ae2e6859440 100644
--- a/chromium/v8/src/heap/read-only-spaces.h
+++ b/chromium/v8/src/heap/read-only-spaces.h
@@ -10,21 +10,27 @@
#include "include/v8-platform.h"
#include "src/base/macros.h"
+#include "src/common/globals.h"
+#include "src/heap/allocation-stats.h"
+#include "src/heap/base-space.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/list.h"
#include "src/heap/memory-chunk.h"
-#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
+class MemoryAllocator;
class ReadOnlyHeap;
-class ReadOnlyPage : public Page {
+class ReadOnlyPage : public BasicMemoryChunk {
public:
// Clears any pointers in the header that point out of the page that would
// otherwise make the header non-relocatable.
void MakeHeaderRelocatable();
+ size_t ShrinkToHighWaterMark();
+
private:
friend class ReadOnlySpace;
};
@@ -45,8 +51,8 @@ class ReadOnlyArtifacts {
return shared_read_only_space_.get();
}
- heap::List<MemoryChunk>& pages() { return pages_; }
- void TransferPages(heap::List<MemoryChunk>&& pages) {
+ std::vector<ReadOnlyPage*>& pages() { return pages_; }
+ void TransferPages(std::vector<ReadOnlyPage*>&& pages) {
pages_ = std::move(pages);
}
@@ -56,7 +62,7 @@ class ReadOnlyArtifacts {
ReadOnlyHeap* read_only_heap() { return read_only_heap_.get(); }
private:
- heap::List<MemoryChunk> pages_;
+ std::vector<ReadOnlyPage*> pages_;
AllocationStats stats_;
std::unique_ptr<SharedReadOnlySpace> shared_read_only_space_;
std::unique_ptr<ReadOnlyHeap> read_only_heap_;
@@ -64,22 +70,28 @@ class ReadOnlyArtifacts {
// -----------------------------------------------------------------------------
// Read Only space for all Immortal Immovable and Immutable objects
-class ReadOnlySpace : public PagedSpace {
+class ReadOnlySpace : public BaseSpace {
public:
- explicit ReadOnlySpace(Heap* heap);
+ V8_EXPORT_PRIVATE explicit ReadOnlySpace(Heap* heap);
// Detach the pages and them to artifacts for using in creating a
// SharedReadOnlySpace.
void DetachPagesAndAddToArtifacts(
std::shared_ptr<ReadOnlyArtifacts> artifacts);
- ~ReadOnlySpace() override { Unseal(); }
+ V8_EXPORT_PRIVATE ~ReadOnlySpace() override;
+
+ bool IsDetached() const { return heap_ == nullptr; }
bool writable() const { return !is_marked_read_only_; }
bool Contains(Address a) = delete;
bool Contains(Object o) = delete;
+ V8_EXPORT_PRIVATE
+ AllocationResult AllocateRaw(size_t size_in_bytes,
+ AllocationAlignment alignment);
+
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
@@ -87,13 +99,35 @@ class ReadOnlySpace : public PagedSpace {
// Seal the space by marking it read-only, optionally detaching it
// from the heap and forgetting it for memory bookkeeping purposes (e.g.
// prevent space's memory from registering as leaked).
- void Seal(SealMode ro_mode);
+ V8_EXPORT_PRIVATE void Seal(SealMode ro_mode);
// During boot the free_space_map is created, and afterwards we may need
- // to write it into the free list nodes that were already created.
- void RepairFreeListsAfterDeserialization();
+ // to write it into the free space nodes that were already created.
+ void RepairFreeSpacesAfterDeserialization();
+
+ size_t Size() override { return accounting_stats_.Size(); }
+ V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() override;
+
+ const std::vector<ReadOnlyPage*>& pages() const { return pages_; }
+ Address top() const { return top_; }
+ Address limit() const { return limit_; }
+ size_t Capacity() const { return capacity_; }
+
+ bool ContainsSlow(Address addr);
+ V8_EXPORT_PRIVATE void ShrinkPages();
+#ifdef VERIFY_HEAP
+ void Verify(Isolate* isolate);
+#ifdef DEBUG
+ void VerifyCounters(Heap* heap);
+#endif // DEBUG
+#endif // VERIFY_HEAP
+
+ // Return size of allocatable area on a page in this space.
+ int AreaSize() { return static_cast<int>(area_size_); }
+
+ ReadOnlyPage* InitializePage(BasicMemoryChunk* chunk);
- size_t Available() override { return 0; }
+ Address FirstPageAddress() const { return pages_.front()->address(); }
protected:
void SetPermissionsForPages(MemoryAllocator* memory_allocator,
@@ -101,16 +135,36 @@ class ReadOnlySpace : public PagedSpace {
bool is_marked_read_only_ = false;
+ // Accounting information for this space.
+ AllocationStats accounting_stats_;
+
+ std::vector<ReadOnlyPage*> pages_;
+
+ Address top_;
+ Address limit_;
+
private:
- // Unseal the space after is has been sealed, by making it writable.
- // TODO(v8:7464): Only possible if the space hasn't been detached.
+ // Unseal the space after it has been sealed, by making it writable.
void Unseal();
- //
- // String padding must be cleared just before serialization and therefore the
- // string padding in the space will already have been cleared if the space was
- // deserialized.
+ void DetachFromHeap() { heap_ = nullptr; }
+
+ AllocationResult AllocateRawUnaligned(int size_in_bytes);
+ AllocationResult AllocateRawAligned(int size_in_bytes,
+ AllocationAlignment alignment);
+
+ HeapObject TryAllocateLinearlyAligned(int size_in_bytes,
+ AllocationAlignment alignment);
+ void EnsureSpaceForAllocation(int size_in_bytes);
+ void FreeLinearAllocationArea();
+
+ // String padding must be cleared just before serialization and therefore
+ // the string padding in the space will already have been cleared if the
+ // space was deserialized.
bool is_string_padding_cleared_;
+
+ size_t capacity_;
+ const size_t area_size_;
};
class SharedReadOnlySpace : public ReadOnlySpace {
diff --git a/chromium/v8/src/heap/remembered-set-inl.h b/chromium/v8/src/heap/remembered-set-inl.h
index 034e98a06fb..3790ed9e712 100644
--- a/chromium/v8/src/heap/remembered-set-inl.h
+++ b/chromium/v8/src/heap/remembered-set-inl.h
@@ -5,437 +5,53 @@
#ifndef V8_HEAP_REMEMBERED_SET_INL_H_
#define V8_HEAP_REMEMBERED_SET_INL_H_
-#include <memory>
-
-#include "src/base/bounds.h"
-#include "src/base/memory.h"
-#include "src/codegen/reloc-info.h"
-#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
-#include "src/heap/heap.h"
-#include "src/heap/memory-chunk.h"
-#include "src/heap/slot-set.h"
-#include "src/heap/spaces.h"
-#include "src/heap/worklist.h"
+#include "src/heap/remembered-set.h"
namespace v8 {
namespace internal {
-enum RememberedSetIterationMode { SYNCHRONIZED, NON_SYNCHRONIZED };
-
-class RememberedSetOperations {
- public:
- // Given a page and a slot in that page, this function adds the slot to the
- // remembered set.
- template <AccessMode access_mode>
- static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- uintptr_t offset = slot_addr - chunk->address();
- slot_set->Insert<access_mode>(offset);
- }
-
- template <typename Callback>
- static int Iterate(SlotSet* slot_set, MemoryChunk* chunk, Callback callback,
- SlotSet::EmptyBucketMode mode) {
- int slots = 0;
- if (slot_set != nullptr) {
- slots += slot_set->Iterate(chunk->address(), 0, chunk->buckets(),
- callback, mode);
+template <typename Callback>
+SlotCallbackResult UpdateTypedSlotHelper::UpdateTypedSlot(Heap* heap,
+ SlotType slot_type,
+ Address addr,
+ Callback callback) {
+ switch (slot_type) {
+ case CODE_TARGET_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, Code());
+ return UpdateCodeTarget(&rinfo, callback);
}
- return slots;
- }
-
- static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
- if (slot_set != nullptr) {
- uintptr_t offset = slot_addr - chunk->address();
- slot_set->Remove(offset);
- }
- }
-
- static void RemoveRange(SlotSet* slot_set, MemoryChunk* chunk, Address start,
- Address end, SlotSet::EmptyBucketMode mode) {
- if (slot_set != nullptr) {
- uintptr_t start_offset = start - chunk->address();
- uintptr_t end_offset = end - chunk->address();
- DCHECK_LT(start_offset, end_offset);
- slot_set->RemoveRange(static_cast<int>(start_offset),
- static_cast<int>(end_offset), chunk->buckets(),
- mode);
- }
- }
-
- static void CheckNoneInRange(SlotSet* slot_set, MemoryChunk* chunk,
- Address start, Address end) {
- if (slot_set != nullptr) {
- size_t start_bucket = SlotSet::BucketForSlot(start - chunk->address());
- // Both 'end' and 'end_bucket' are exclusive limits, so do some index
- // juggling to make sure we get the right bucket even if the end address
- // is at the start of a bucket.
- size_t end_bucket =
- SlotSet::BucketForSlot(end - chunk->address() - kTaggedSize) + 1;
- slot_set->Iterate(
- chunk->address(), start_bucket, end_bucket,
- [start, end](MaybeObjectSlot slot) {
- CHECK(!base::IsInRange(slot.address(), start, end + 1));
- return KEEP_SLOT;
- },
- SlotSet::KEEP_EMPTY_BUCKETS);
+ case CODE_ENTRY_SLOT: {
+ return UpdateCodeEntry(addr, callback);
}
- }
-};
-
-// TODO(ulan): Investigate performance of de-templatizing this class.
-template <RememberedSetType type>
-class RememberedSet : public AllStatic {
- public:
- // Given a page and a slot in that page, this function adds the slot to the
- // remembered set.
- template <AccessMode access_mode>
- static void Insert(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type, access_mode>();
- if (slot_set == nullptr) {
- slot_set = chunk->AllocateSlotSet<type>();
+ case COMPRESSED_EMBEDDED_OBJECT_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::COMPRESSED_EMBEDDED_OBJECT, 0, Code());
+ return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
- RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
- }
-
- // Given a page and a slot in that page, this function returns true if
- // the remembered set contains the slot.
- static bool Contains(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type>();
- if (slot_set == nullptr) {
- return false;
+ case FULL_EMBEDDED_OBJECT_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
+ return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
- uintptr_t offset = slot_addr - chunk->address();
- return slot_set->Contains(offset);
- }
-
- static void CheckNoneInRange(MemoryChunk* chunk, Address start, Address end) {
- SlotSet* slot_set = chunk->slot_set<type>();
- RememberedSetOperations::CheckNoneInRange(slot_set, chunk, start, end);
- }
-
- // Given a page and a slot in that page, this function removes the slot from
- // the remembered set.
- // If the slot was never added, then the function does nothing.
- static void Remove(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type>();
- RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
- }
-
- // Given a page and a range of slots in that page, this function removes the
- // slots from the remembered set.
- static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
- SlotSet::EmptyBucketMode mode) {
- SlotSet* slot_set = chunk->slot_set<type>();
- RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
- }
-
- // Iterates and filters the remembered set with the given callback.
- // The callback should take (Address slot) and return SlotCallbackResult.
- template <typename Callback>
- static void Iterate(Heap* heap, RememberedSetIterationMode mode,
- Callback callback) {
- IterateMemoryChunks(heap, [mode, callback](MemoryChunk* chunk) {
- if (mode == SYNCHRONIZED) chunk->mutex()->Lock();
- Iterate(chunk, callback);
- if (mode == SYNCHRONIZED) chunk->mutex()->Unlock();
- });
- }
-
- // Iterates over all memory chunks that contains non-empty slot sets.
- // The callback should take (MemoryChunk* chunk) and return void.
- template <typename Callback>
- static void IterateMemoryChunks(Heap* heap, Callback callback) {
- OldGenerationMemoryChunkIterator it(heap);
- MemoryChunk* chunk;
- while ((chunk = it.next()) != nullptr) {
- SlotSet* slot_set = chunk->slot_set<type>();
- SlotSet* sweeping_slot_set =
- type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr;
- TypedSlotSet* typed_slot_set = chunk->typed_slot_set<type>();
- if (slot_set != nullptr || sweeping_slot_set != nullptr ||
- typed_slot_set != nullptr ||
- chunk->invalidated_slots<type>() != nullptr) {
- callback(chunk);
+ case COMPRESSED_OBJECT_SLOT: {
+ HeapObject old_target = HeapObject::cast(Object(
+ DecompressTaggedAny(heap->isolate(), base::Memory<Tagged_t>(addr))));
+ HeapObject new_target = old_target;
+ SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
+ DCHECK(!HasWeakHeapObjectTag(new_target));
+ if (new_target != old_target) {
+ base::Memory<Tagged_t>(addr) = CompressTagged(new_target.ptr());
}
+ return result;
}
- }
-
- // Iterates and filters the remembered set in the given memory chunk with
- // the given callback. The callback should take (Address slot) and return
- // SlotCallbackResult.
- //
- // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
- // threads concurrently inserting slots.
- template <typename Callback>
- static int Iterate(MemoryChunk* chunk, Callback callback,
- SlotSet::EmptyBucketMode mode) {
- SlotSet* slot_set = chunk->slot_set<type>();
- return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
- }
-
- template <typename Callback>
- static int IterateAndTrackEmptyBuckets(
- MemoryChunk* chunk, Callback callback,
- Worklist<MemoryChunk*, 64>::View empty_chunks) {
- SlotSet* slot_set = chunk->slot_set<type>();
- int slots = 0;
- if (slot_set != nullptr) {
- PossiblyEmptyBuckets* possibly_empty_buckets =
- chunk->possibly_empty_buckets();
- slots += slot_set->IterateAndTrackEmptyBuckets(chunk->address(), 0,
- chunk->buckets(), callback,
- possibly_empty_buckets);
- if (!possibly_empty_buckets->IsEmpty()) empty_chunks.Push(chunk);
- }
- return slots;
- }
-
- static void FreeEmptyBuckets(MemoryChunk* chunk) {
- DCHECK(type == OLD_TO_NEW);
- SlotSet* slot_set = chunk->slot_set<type>();
- if (slot_set != nullptr && slot_set->FreeEmptyBuckets(chunk->buckets())) {
- chunk->ReleaseSlotSet<type>();
- }
- }
-
- static bool CheckPossiblyEmptyBuckets(MemoryChunk* chunk) {
- DCHECK(type == OLD_TO_NEW);
- SlotSet* slot_set = chunk->slot_set<type, AccessMode::NON_ATOMIC>();
- if (slot_set != nullptr &&
- slot_set->CheckPossiblyEmptyBuckets(chunk->buckets(),
- chunk->possibly_empty_buckets())) {
- chunk->ReleaseSlotSet<type>();
- return true;
- }
-
- return false;
- }
-
- // Given a page and a typed slot in that page, this function adds the slot
- // to the remembered set.
- static void InsertTyped(MemoryChunk* memory_chunk, SlotType slot_type,
- uint32_t offset) {
- TypedSlotSet* slot_set = memory_chunk->typed_slot_set<type>();
- if (slot_set == nullptr) {
- slot_set = memory_chunk->AllocateTypedSlotSet<type>();
- }
- slot_set->Insert(slot_type, offset);
- }
-
- static void MergeTyped(MemoryChunk* page, std::unique_ptr<TypedSlots> other) {
- TypedSlotSet* slot_set = page->typed_slot_set<type>();
- if (slot_set == nullptr) {
- slot_set = page->AllocateTypedSlotSet<type>();
+ case FULL_OBJECT_SLOT: {
+ return callback(FullMaybeObjectSlot(addr));
}
- slot_set->Merge(other.get());
- }
-
- // Given a page and a range of typed slots in that page, this function removes
- // the slots from the remembered set.
- static void RemoveRangeTyped(MemoryChunk* page, Address start, Address end) {
- TypedSlotSet* slot_set = page->typed_slot_set<type>();
- if (slot_set != nullptr) {
- slot_set->Iterate(
- [=](SlotType slot_type, Address slot_addr) {
- return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
- : KEEP_SLOT;
- },
- TypedSlotSet::FREE_EMPTY_CHUNKS);
- }
- }
-
- // Iterates and filters the remembered set with the given callback.
- // The callback should take (SlotType slot_type, Address addr) and return
- // SlotCallbackResult.
- template <typename Callback>
- static void IterateTyped(Heap* heap, RememberedSetIterationMode mode,
- Callback callback) {
- IterateMemoryChunks(heap, [mode, callback](MemoryChunk* chunk) {
- if (mode == SYNCHRONIZED) chunk->mutex()->Lock();
- IterateTyped(chunk, callback);
- if (mode == SYNCHRONIZED) chunk->mutex()->Unlock();
- });
- }
-
- // Iterates and filters typed pointers in the given memory chunk with the
- // given callback. The callback should take (SlotType slot_type, Address addr)
- // and return SlotCallbackResult.
- template <typename Callback>
- static void IterateTyped(MemoryChunk* chunk, Callback callback) {
- TypedSlotSet* slot_set = chunk->typed_slot_set<type>();
- if (slot_set != nullptr) {
- int new_count =
- slot_set->Iterate(callback, TypedSlotSet::KEEP_EMPTY_CHUNKS);
- if (new_count == 0) {
- chunk->ReleaseTypedSlotSet<type>();
- }
- }
- }
-
- // Clear all old to old slots from the remembered set.
- static void ClearAll(Heap* heap) {
- STATIC_ASSERT(type == OLD_TO_OLD);
- OldGenerationMemoryChunkIterator it(heap);
- MemoryChunk* chunk;
- while ((chunk = it.next()) != nullptr) {
- chunk->ReleaseSlotSet<OLD_TO_OLD>();
- chunk->ReleaseTypedSlotSet<OLD_TO_OLD>();
- chunk->ReleaseInvalidatedSlots<OLD_TO_OLD>();
- }
- }
-};
-
-class UpdateTypedSlotHelper {
- public:
- // Updates a typed slot using an untyped slot callback where |addr| depending
- // on slot type represents either address for respective RelocInfo or address
- // of the uncompressed constant pool entry.
- // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
- template <typename Callback>
- static SlotCallbackResult UpdateTypedSlot(Heap* heap, SlotType slot_type,
- Address addr, Callback callback) {
- switch (slot_type) {
- case CODE_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, Code());
- return UpdateCodeTarget(&rinfo, callback);
- }
- case CODE_ENTRY_SLOT: {
- return UpdateCodeEntry(addr, callback);
- }
- case COMPRESSED_EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::COMPRESSED_EMBEDDED_OBJECT, 0, Code());
- return UpdateEmbeddedPointer(heap, &rinfo, callback);
- }
- case FULL_EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
- return UpdateEmbeddedPointer(heap, &rinfo, callback);
- }
- case COMPRESSED_OBJECT_SLOT: {
- HeapObject old_target = HeapObject::cast(Object(DecompressTaggedAny(
- heap->isolate(), base::Memory<Tagged_t>(addr))));
- HeapObject new_target = old_target;
- SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
- DCHECK(!HasWeakHeapObjectTag(new_target));
- if (new_target != old_target) {
- base::Memory<Tagged_t>(addr) = CompressTagged(new_target.ptr());
- }
- return result;
- }
- case FULL_OBJECT_SLOT: {
- return callback(FullMaybeObjectSlot(addr));
- }
- case CLEARED_SLOT:
- break;
- }
- UNREACHABLE();
- }
-
- private:
- // Updates a code entry slot using an untyped slot callback.
- // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
- template <typename Callback>
- static SlotCallbackResult UpdateCodeEntry(Address entry_address,
- Callback callback) {
- Code code = Code::GetObjectFromEntryAddress(entry_address);
- Code old_code = code;
- SlotCallbackResult result = callback(FullMaybeObjectSlot(&code));
- DCHECK(!HasWeakHeapObjectTag(code));
- if (code != old_code) {
- base::Memory<Address>(entry_address) = code.entry();
- }
- return result;
- }
-
- // Updates a code target slot using an untyped slot callback.
- // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
- template <typename Callback>
- static SlotCallbackResult UpdateCodeTarget(RelocInfo* rinfo,
- Callback callback) {
- DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- Code old_target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Code new_target = old_target;
- SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
- DCHECK(!HasWeakHeapObjectTag(new_target));
- if (new_target != old_target) {
- rinfo->set_target_address(Code::cast(new_target).raw_instruction_start());
- }
- return result;
- }
-
- // Updates an embedded pointer slot using an untyped slot callback.
- // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
- template <typename Callback>
- static SlotCallbackResult UpdateEmbeddedPointer(Heap* heap, RelocInfo* rinfo,
- Callback callback) {
- DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
- HeapObject old_target = rinfo->target_object_no_host(heap->isolate());
- HeapObject new_target = old_target;
- SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
- DCHECK(!HasWeakHeapObjectTag(new_target));
- if (new_target != old_target) {
- rinfo->set_target_object(heap, HeapObject::cast(new_target));
- }
- return result;
- }
-};
-
-class RememberedSetSweeping {
- public:
- template <AccessMode access_mode>
- static void Insert(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->sweeping_slot_set<access_mode>();
- if (slot_set == nullptr) {
- slot_set = chunk->AllocateSweepingSlotSet();
- }
- RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
- }
-
- static void Remove(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->sweeping_slot_set<AccessMode::ATOMIC>();
- RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
- }
-
- // Given a page and a range of slots in that page, this function removes the
- // slots from the remembered set.
- static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
- SlotSet::EmptyBucketMode mode) {
- SlotSet* slot_set = chunk->sweeping_slot_set();
- RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
- }
-
- // Iterates and filters the remembered set in the given memory chunk with
- // the given callback. The callback should take (Address slot) and return
- // SlotCallbackResult.
- //
- // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
- // threads concurrently inserting slots.
- template <typename Callback>
- static int Iterate(MemoryChunk* chunk, Callback callback,
- SlotSet::EmptyBucketMode mode) {
- SlotSet* slot_set = chunk->sweeping_slot_set();
- return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
- }
-};
-
-inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
- if (RelocInfo::IsCodeTargetMode(rmode)) {
- return CODE_TARGET_SLOT;
- } else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
- return FULL_EMBEDDED_OBJECT_SLOT;
- } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
- return COMPRESSED_EMBEDDED_OBJECT_SLOT;
+ case CLEARED_SLOT:
+ break;
}
UNREACHABLE();
}
} // namespace internal
} // namespace v8
-
#endif // V8_HEAP_REMEMBERED_SET_INL_H_
diff --git a/chromium/v8/src/heap/remembered-set.h b/chromium/v8/src/heap/remembered-set.h
new file mode 100644
index 00000000000..4ded63de03a
--- /dev/null
+++ b/chromium/v8/src/heap/remembered-set.h
@@ -0,0 +1,406 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_REMEMBERED_SET_H_
+#define V8_HEAP_REMEMBERED_SET_H_
+
+#include <memory>
+
+#include "src/base/bounds.h"
+#include "src/base/memory.h"
+#include "src/codegen/reloc-info.h"
+#include "src/common/globals.h"
+#include "src/heap/heap.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/paged-spaces.h"
+#include "src/heap/slot-set.h"
+#include "src/heap/spaces.h"
+#include "src/heap/worklist.h"
+
+namespace v8 {
+namespace internal {
+
+enum RememberedSetIterationMode { SYNCHRONIZED, NON_SYNCHRONIZED };
+
+class RememberedSetOperations {
+ public:
+ // Given a page and a slot in that page, this function adds the slot to the
+ // remembered set.
+ template <AccessMode access_mode>
+ static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ uintptr_t offset = slot_addr - chunk->address();
+ slot_set->Insert<access_mode>(offset);
+ }
+
+ template <typename Callback>
+ static int Iterate(SlotSet* slot_set, MemoryChunk* chunk, Callback callback,
+ SlotSet::EmptyBucketMode mode) {
+ int slots = 0;
+ if (slot_set != nullptr) {
+ slots += slot_set->Iterate(chunk->address(), 0, chunk->buckets(),
+ callback, mode);
+ }
+ return slots;
+ }
+
+ static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
+ if (slot_set != nullptr) {
+ uintptr_t offset = slot_addr - chunk->address();
+ slot_set->Remove(offset);
+ }
+ }
+
+ static void RemoveRange(SlotSet* slot_set, MemoryChunk* chunk, Address start,
+ Address end, SlotSet::EmptyBucketMode mode) {
+ if (slot_set != nullptr) {
+ uintptr_t start_offset = start - chunk->address();
+ uintptr_t end_offset = end - chunk->address();
+ DCHECK_LT(start_offset, end_offset);
+ slot_set->RemoveRange(static_cast<int>(start_offset),
+ static_cast<int>(end_offset), chunk->buckets(),
+ mode);
+ }
+ }
+
+ static void CheckNoneInRange(SlotSet* slot_set, MemoryChunk* chunk,
+ Address start, Address end) {
+ if (slot_set != nullptr) {
+ size_t start_bucket = SlotSet::BucketForSlot(start - chunk->address());
+ // Both 'end' and 'end_bucket' are exclusive limits, so do some index
+ // juggling to make sure we get the right bucket even if the end address
+ // is at the start of a bucket.
+ size_t end_bucket =
+ SlotSet::BucketForSlot(end - chunk->address() - kTaggedSize) + 1;
+ slot_set->Iterate(
+ chunk->address(), start_bucket, end_bucket,
+ [start, end](MaybeObjectSlot slot) {
+ CHECK(!base::IsInRange(slot.address(), start, end + 1));
+ return KEEP_SLOT;
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ }
+ }
+};
+
+// TODO(ulan): Investigate performance of de-templatizing this class.
+template <RememberedSetType type>
+class RememberedSet : public AllStatic {
+ public:
+ // Given a page and a slot in that page, this function adds the slot to the
+ // remembered set.
+ template <AccessMode access_mode>
+ static void Insert(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->slot_set<type, access_mode>();
+ if (slot_set == nullptr) {
+ slot_set = chunk->AllocateSlotSet<type>();
+ }
+ RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
+ }
+
+ // Given a page and a slot in that page, this function returns true if
+ // the remembered set contains the slot.
+ static bool Contains(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->slot_set<type>();
+ if (slot_set == nullptr) {
+ return false;
+ }
+ uintptr_t offset = slot_addr - chunk->address();
+ return slot_set->Contains(offset);
+ }
+
+ static void CheckNoneInRange(MemoryChunk* chunk, Address start, Address end) {
+ SlotSet* slot_set = chunk->slot_set<type>();
+ RememberedSetOperations::CheckNoneInRange(slot_set, chunk, start, end);
+ }
+
+ // Given a page and a slot in that page, this function removes the slot from
+ // the remembered set.
+ // If the slot was never added, then the function does nothing.
+ static void Remove(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->slot_set<type>();
+ RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
+ }
+
+ // Given a page and a range of slots in that page, this function removes the
+ // slots from the remembered set.
+ static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slot_set = chunk->slot_set<type>();
+ RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
+ }
+
+ // Iterates and filters the remembered set with the given callback.
+ // The callback should take (Address slot) and return SlotCallbackResult.
+ template <typename Callback>
+ static void Iterate(Heap* heap, RememberedSetIterationMode mode,
+ Callback callback) {
+ IterateMemoryChunks(heap, [mode, callback](MemoryChunk* chunk) {
+ if (mode == SYNCHRONIZED) chunk->mutex()->Lock();
+ Iterate(chunk, callback);
+ if (mode == SYNCHRONIZED) chunk->mutex()->Unlock();
+ });
+ }
+
+ // Iterates over all memory chunks that contains non-empty slot sets.
+ // The callback should take (MemoryChunk* chunk) and return void.
+ template <typename Callback>
+ static void IterateMemoryChunks(Heap* heap, Callback callback) {
+ OldGenerationMemoryChunkIterator it(heap);
+ MemoryChunk* chunk;
+ while ((chunk = it.next()) != nullptr) {
+ SlotSet* slot_set = chunk->slot_set<type>();
+ SlotSet* sweeping_slot_set =
+ type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr;
+ TypedSlotSet* typed_slot_set = chunk->typed_slot_set<type>();
+ if (slot_set != nullptr || sweeping_slot_set != nullptr ||
+ typed_slot_set != nullptr ||
+ chunk->invalidated_slots<type>() != nullptr) {
+ callback(chunk);
+ }
+ }
+ }
+
+ // Iterates and filters the remembered set in the given memory chunk with
+ // the given callback. The callback should take (Address slot) and return
+ // SlotCallbackResult.
+ //
+ // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
+ // threads concurrently inserting slots.
+ template <typename Callback>
+ static int Iterate(MemoryChunk* chunk, Callback callback,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slot_set = chunk->slot_set<type>();
+ return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
+ }
+
+ template <typename Callback>
+ static int IterateAndTrackEmptyBuckets(
+ MemoryChunk* chunk, Callback callback,
+ Worklist<MemoryChunk*, 64>::View empty_chunks) {
+ SlotSet* slot_set = chunk->slot_set<type>();
+ int slots = 0;
+ if (slot_set != nullptr) {
+ PossiblyEmptyBuckets* possibly_empty_buckets =
+ chunk->possibly_empty_buckets();
+ slots += slot_set->IterateAndTrackEmptyBuckets(chunk->address(), 0,
+ chunk->buckets(), callback,
+ possibly_empty_buckets);
+ if (!possibly_empty_buckets->IsEmpty()) empty_chunks.Push(chunk);
+ }
+ return slots;
+ }
+
+ static void FreeEmptyBuckets(MemoryChunk* chunk) {
+ DCHECK(type == OLD_TO_NEW);
+ SlotSet* slot_set = chunk->slot_set<type>();
+ if (slot_set != nullptr && slot_set->FreeEmptyBuckets(chunk->buckets())) {
+ chunk->ReleaseSlotSet<type>();
+ }
+ }
+
+ static bool CheckPossiblyEmptyBuckets(MemoryChunk* chunk) {
+ DCHECK(type == OLD_TO_NEW);
+ SlotSet* slot_set = chunk->slot_set<type, AccessMode::NON_ATOMIC>();
+ if (slot_set != nullptr &&
+ slot_set->CheckPossiblyEmptyBuckets(chunk->buckets(),
+ chunk->possibly_empty_buckets())) {
+ chunk->ReleaseSlotSet<type>();
+ return true;
+ }
+
+ return false;
+ }
+
+ // Given a page and a typed slot in that page, this function adds the slot
+ // to the remembered set.
+ static void InsertTyped(MemoryChunk* memory_chunk, SlotType slot_type,
+ uint32_t offset) {
+ TypedSlotSet* slot_set = memory_chunk->typed_slot_set<type>();
+ if (slot_set == nullptr) {
+ slot_set = memory_chunk->AllocateTypedSlotSet<type>();
+ }
+ slot_set->Insert(slot_type, offset);
+ }
+
+ static void MergeTyped(MemoryChunk* page, std::unique_ptr<TypedSlots> other) {
+ TypedSlotSet* slot_set = page->typed_slot_set<type>();
+ if (slot_set == nullptr) {
+ slot_set = page->AllocateTypedSlotSet<type>();
+ }
+ slot_set->Merge(other.get());
+ }
+
+ // Given a page and a range of typed slots in that page, this function removes
+ // the slots from the remembered set.
+ static void RemoveRangeTyped(MemoryChunk* page, Address start, Address end) {
+ TypedSlotSet* slot_set = page->typed_slot_set<type>();
+ if (slot_set != nullptr) {
+ slot_set->Iterate(
+ [=](SlotType slot_type, Address slot_addr) {
+ return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
+ : KEEP_SLOT;
+ },
+ TypedSlotSet::FREE_EMPTY_CHUNKS);
+ }
+ }
+
+ // Iterates and filters the remembered set with the given callback.
+ // The callback should take (SlotType slot_type, Address addr) and return
+ // SlotCallbackResult.
+ template <typename Callback>
+ static void IterateTyped(Heap* heap, RememberedSetIterationMode mode,
+ Callback callback) {
+ IterateMemoryChunks(heap, [mode, callback](MemoryChunk* chunk) {
+ if (mode == SYNCHRONIZED) chunk->mutex()->Lock();
+ IterateTyped(chunk, callback);
+ if (mode == SYNCHRONIZED) chunk->mutex()->Unlock();
+ });
+ }
+
+ // Iterates and filters typed pointers in the given memory chunk with the
+ // given callback. The callback should take (SlotType slot_type, Address addr)
+ // and return SlotCallbackResult.
+ template <typename Callback>
+ static void IterateTyped(MemoryChunk* chunk, Callback callback) {
+ TypedSlotSet* slot_set = chunk->typed_slot_set<type>();
+ if (slot_set != nullptr) {
+ int new_count =
+ slot_set->Iterate(callback, TypedSlotSet::KEEP_EMPTY_CHUNKS);
+ if (new_count == 0) {
+ chunk->ReleaseTypedSlotSet<type>();
+ }
+ }
+ }
+
+ // Clear all old to old slots from the remembered set.
+ static void ClearAll(Heap* heap) {
+ STATIC_ASSERT(type == OLD_TO_OLD);
+ OldGenerationMemoryChunkIterator it(heap);
+ MemoryChunk* chunk;
+ while ((chunk = it.next()) != nullptr) {
+ chunk->ReleaseSlotSet<OLD_TO_OLD>();
+ chunk->ReleaseTypedSlotSet<OLD_TO_OLD>();
+ chunk->ReleaseInvalidatedSlots<OLD_TO_OLD>();
+ }
+ }
+};
+
+class UpdateTypedSlotHelper {
+ public:
+ // Updates a typed slot using an untyped slot callback where |addr| depending
+ // on slot type represents either address for respective RelocInfo or address
+ // of the uncompressed constant pool entry.
+ // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateTypedSlot(Heap* heap, SlotType slot_type,
+ Address addr, Callback callback);
+
+ private:
+ // Updates a code entry slot using an untyped slot callback.
+ // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateCodeEntry(Address entry_address,
+ Callback callback) {
+ Code code = Code::GetObjectFromEntryAddress(entry_address);
+ Code old_code = code;
+ SlotCallbackResult result = callback(FullMaybeObjectSlot(&code));
+ DCHECK(!HasWeakHeapObjectTag(code));
+ if (code != old_code) {
+ base::Memory<Address>(entry_address) = code.entry();
+ }
+ return result;
+ }
+
+ // Updates a code target slot using an untyped slot callback.
+ // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateCodeTarget(RelocInfo* rinfo,
+ Callback callback) {
+ DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
+ Code old_target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code new_target = old_target;
+ SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
+ DCHECK(!HasWeakHeapObjectTag(new_target));
+ if (new_target != old_target) {
+ rinfo->set_target_address(Code::cast(new_target).raw_instruction_start());
+ }
+ return result;
+ }
+
+ // Updates an embedded pointer slot using an untyped slot callback.
+ // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateEmbeddedPointer(Heap* heap, RelocInfo* rinfo,
+ Callback callback) {
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
+ HeapObject old_target = rinfo->target_object_no_host(heap->isolate());
+ HeapObject new_target = old_target;
+ SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
+ DCHECK(!HasWeakHeapObjectTag(new_target));
+ if (new_target != old_target) {
+ rinfo->set_target_object(heap, HeapObject::cast(new_target));
+ }
+ return result;
+ }
+};
+
+class RememberedSetSweeping {
+ public:
+ template <AccessMode access_mode>
+ static void Insert(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->sweeping_slot_set<access_mode>();
+ if (slot_set == nullptr) {
+ slot_set = chunk->AllocateSweepingSlotSet();
+ }
+ RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
+ }
+
+ static void Remove(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->sweeping_slot_set<AccessMode::ATOMIC>();
+ RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
+ }
+
+ // Given a page and a range of slots in that page, this function removes the
+ // slots from the remembered set.
+ static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slot_set = chunk->sweeping_slot_set();
+ RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
+ }
+
+ // Iterates and filters the remembered set in the given memory chunk with
+ // the given callback. The callback should take (Address slot) and return
+ // SlotCallbackResult.
+ //
+ // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
+ // threads concurrently inserting slots.
+ template <typename Callback>
+ static int Iterate(MemoryChunk* chunk, Callback callback,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slot_set = chunk->sweeping_slot_set();
+ return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
+ }
+};
+
+inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
+ if (RelocInfo::IsCodeTargetMode(rmode)) {
+ return CODE_TARGET_SLOT;
+ } else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
+ return FULL_EMBEDDED_OBJECT_SLOT;
+ } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ return COMPRESSED_EMBEDDED_OBJECT_SLOT;
+ }
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_REMEMBERED_SET_H_
diff --git a/chromium/v8/src/heap/safepoint.cc b/chromium/v8/src/heap/safepoint.cc
index e6ccf642c09..3012413f48c 100644
--- a/chromium/v8/src/heap/safepoint.cc
+++ b/chromium/v8/src/heap/safepoint.cc
@@ -13,13 +13,16 @@ namespace v8 {
namespace internal {
GlobalSafepoint::GlobalSafepoint(Heap* heap)
- : heap_(heap), local_heaps_head_(nullptr), is_active_(false) {}
+ : heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
-void GlobalSafepoint::Start() { StopThreads(); }
+void GlobalSafepoint::Start() { EnterSafepointScope(); }
-void GlobalSafepoint::End() { ResumeThreads(); }
+void GlobalSafepoint::End() { LeaveSafepointScope(); }
-void GlobalSafepoint::StopThreads() {
+void GlobalSafepoint::EnterSafepointScope() {
+ if (!FLAG_local_heaps) return;
+
+ if (++active_safepoint_scopes_ > 1) return;
local_heaps_mutex_.Lock();
barrier_.Arm();
@@ -37,12 +40,13 @@ void GlobalSafepoint::StopThreads() {
current->state_change_.Wait(&current->state_mutex_);
}
}
-
- is_active_ = true;
}
-void GlobalSafepoint::ResumeThreads() {
- is_active_ = false;
+void GlobalSafepoint::LeaveSafepointScope() {
+ if (!FLAG_local_heaps) return;
+
+ DCHECK_GT(active_safepoint_scopes_, 0);
+ if (--active_safepoint_scopes_ > 0) return;
for (LocalHeap* current = local_heaps_head_; current;
current = current->next_) {
@@ -90,12 +94,10 @@ void GlobalSafepoint::Barrier::Wait() {
}
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
- if (FLAG_local_heaps) safepoint_->StopThreads();
+ safepoint_->EnterSafepointScope();
}
-SafepointScope::~SafepointScope() {
- if (FLAG_local_heaps) safepoint_->ResumeThreads();
-}
+SafepointScope::~SafepointScope() { safepoint_->LeaveSafepointScope(); }
void GlobalSafepoint::AddLocalHeap(LocalHeap* local_heap) {
base::MutexGuard guard(&local_heaps_mutex_);
diff --git a/chromium/v8/src/heap/safepoint.h b/chromium/v8/src/heap/safepoint.h
index 3ba96e11d59..0d397c9adaf 100644
--- a/chromium/v8/src/heap/safepoint.h
+++ b/chromium/v8/src/heap/safepoint.h
@@ -47,7 +47,7 @@ class GlobalSafepoint {
void Start();
void End();
- bool IsActive() { return is_active_; }
+ bool IsActive() { return active_safepoint_scopes_ > 0; }
private:
class Barrier {
@@ -63,8 +63,8 @@ class GlobalSafepoint {
void Wait();
};
- void StopThreads();
- void ResumeThreads();
+ void EnterSafepointScope();
+ void LeaveSafepointScope();
void AddLocalHeap(LocalHeap* local_heap);
void RemoveLocalHeap(LocalHeap* local_heap);
@@ -75,7 +75,7 @@ class GlobalSafepoint {
base::Mutex local_heaps_mutex_;
LocalHeap* local_heaps_head_;
- bool is_active_;
+ int active_safepoint_scopes_;
friend class SafepointScope;
friend class LocalHeap;
diff --git a/chromium/v8/src/heap/scavenger-inl.h b/chromium/v8/src/heap/scavenger-inl.h
index 3b3cc77b312..18933a5ac78 100644
--- a/chromium/v8/src/heap/scavenger-inl.h
+++ b/chromium/v8/src/heap/scavenger-inl.h
@@ -97,7 +97,7 @@ void Scavenger::PageMemoryFence(MaybeObject object) {
// with page initialization.
HeapObject heap_object;
if (object->GetHeapObject(&heap_object)) {
- MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
+ BasicMemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
}
#endif
}
@@ -211,7 +211,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
// object_size > kMaxRegularHeapObjectSize
if (V8_UNLIKELY(
FLAG_young_generation_large_objects &&
- MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
+ BasicMemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner_identity());
if (object.synchronized_compare_and_swap_map_word(
diff --git a/chromium/v8/src/heap/scavenger.cc b/chromium/v8/src/heap/scavenger.cc
index d0d0a30fb13..06d3af4c0ac 100644
--- a/chromium/v8/src/heap/scavenger.cc
+++ b/chromium/v8/src/heap/scavenger.cc
@@ -14,6 +14,7 @@
#include "src/heap/mark-compact-inl.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/remembered-set-inl.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/sweeper.h"
#include "src/objects/data-handler-inl.h"
@@ -524,7 +525,7 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
target.IterateBodyFast(map, size, &visitor);
if (map.IsJSArrayBufferMap()) {
- DCHECK(!MemoryChunk::FromHeapObject(target)->IsLargePage());
+ DCHECK(!BasicMemoryChunk::FromHeapObject(target)->IsLargePage());
JSArrayBuffer::cast(target).YoungMarkExtensionPromoted();
}
}
diff --git a/chromium/v8/src/heap/setup-heap-internal.cc b/chromium/v8/src/heap/setup-heap-internal.cc
index b62dd5c7fd1..a4d14649d6b 100644
--- a/chromium/v8/src/heap/setup-heap-internal.cc
+++ b/chromium/v8/src/heap/setup-heap-internal.cc
@@ -393,6 +393,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, self_reference_marker);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, basic_block_counters_marker);
ALLOCATE_VARSIZE_MAP(BIGINT_TYPE, bigint);
for (unsigned i = 0; i < arraysize(string_type_table); i++) {
@@ -420,16 +421,14 @@ bool Heap::CreateInitialMaps() {
#define TORQUE_ALLOCATE_MAP(NAME, Name, name) \
ALLOCATE_MAP(NAME, Name::kSize, name)
- TORQUE_INTERNAL_FIXED_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_MAP);
+ TORQUE_DEFINED_FIXED_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_MAP);
#undef TORQUE_ALLOCATE_MAP
#define TORQUE_ALLOCATE_VARSIZE_MAP(NAME, Name, name) \
ALLOCATE_VARSIZE_MAP(NAME, name)
- TORQUE_INTERNAL_VARSIZE_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_VARSIZE_MAP);
+ TORQUE_DEFINED_VARSIZE_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_VARSIZE_MAP);
#undef TORQUE_ALLOCATE_VARSIZE_MAP
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
-
ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell);
@@ -717,8 +716,9 @@ void Heap::CreateInitialObjects() {
handle(Smi::FromInt(-7), isolate()), "undefined",
Oddball::kStaleRegister));
- // Initialize the self-reference marker.
+ // Initialize marker objects used during compilation.
set_self_reference_marker(*factory->NewSelfReferenceMarker());
+ set_basic_block_counters_marker(*factory->NewBasicBlockCountersMarker());
set_interpreter_entry_trampoline_for_profiling(roots.undefined_value());
@@ -769,6 +769,8 @@ void Heap::CreateInitialObjects() {
set_number_string_cache(*factory->NewFixedArray(
kInitialNumberStringCacheSize * 2, AllocationType::kOld));
+ set_basic_block_profiling_data(ArrayList::cast(roots.empty_fixed_array()));
+
// Allocate cache for string split and regexp-multiple.
set_string_split_cache(*factory->NewFixedArray(
RegExpResultsCache::kRegExpResultsCacheSize, AllocationType::kOld));
@@ -780,14 +782,6 @@ void Heap::CreateInitialObjects() {
factory->NewManyClosuresCell(factory->undefined_value());
set_many_closures_cell(*many_closures_cell);
- {
- Handle<FixedArray> empty_sloppy_arguments_elements =
- factory->NewFixedArray(2, AllocationType::kReadOnly);
- empty_sloppy_arguments_elements->set_map_after_allocation(
- roots.sloppy_arguments_elements_map(), SKIP_WRITE_BARRIER);
- set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
- }
-
set_detached_contexts(roots.empty_weak_array_list());
set_retaining_path_targets(roots.empty_weak_array_list());
diff --git a/chromium/v8/src/heap/spaces-inl.h b/chromium/v8/src/heap/spaces-inl.h
index cb8b0a54d74..b54b6ac1150 100644
--- a/chromium/v8/src/heap/spaces-inl.h
+++ b/chromium/v8/src/heap/spaces-inl.h
@@ -6,15 +6,15 @@
#define V8_HEAP_SPACES_INL_H_
#include "src/base/atomic-utils.h"
-#include "src/base/bounded-page-allocator.h"
#include "src/base/v8-fallthrough.h"
#include "src/common/globals.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/new-spaces.h"
+#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
#include "src/objects/code-inl.h"
-#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
@@ -42,63 +42,6 @@ PageRange::PageRange(Address start, Address limit)
#endif // DEBUG
}
-// -----------------------------------------------------------------------------
-// SemiSpaceObjectIterator
-
-HeapObject SemiSpaceObjectIterator::Next() {
- while (current_ != limit_) {
- if (Page::IsAlignedToPageSize(current_)) {
- Page* page = Page::FromAllocationAreaAddress(current_);
- page = page->next_page();
- DCHECK(page);
- current_ = page->area_start();
- if (current_ == limit_) return HeapObject();
- }
- HeapObject object = HeapObject::FromAddress(current_);
- current_ += object.Size();
- if (!object.IsFreeSpaceOrFiller()) {
- return object;
- }
- }
- return HeapObject();
-}
-
-// -----------------------------------------------------------------------------
-// PagedSpaceObjectIterator
-
-HeapObject PagedSpaceObjectIterator::Next() {
- do {
- HeapObject next_obj = FromCurrentPage();
- if (!next_obj.is_null()) return next_obj;
- } while (AdvanceToNextPage());
- return HeapObject();
-}
-
-HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
- while (cur_addr_ != cur_end_) {
- if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
- cur_addr_ = space_->limit();
- continue;
- }
- HeapObject obj = HeapObject::FromAddress(cur_addr_);
- const int obj_size = obj.Size();
- cur_addr_ += obj_size;
- DCHECK_LE(cur_addr_, cur_end_);
- if (!obj.IsFreeSpaceOrFiller()) {
- if (obj.IsCode()) {
- DCHECK_IMPLIES(
- space_->identity() != CODE_SPACE,
- space_->identity() == RO_SPACE && Code::cast(obj).is_builtin());
- DCHECK_CODEOBJECT_SIZE(obj_size, space_);
- } else {
- DCHECK_OBJECT_SIZE(obj_size);
- }
- return obj;
- }
- }
- return HeapObject();
-}
-
void Space::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount) {
base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
@@ -120,93 +63,6 @@ void Space::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
}
-// -----------------------------------------------------------------------------
-// SemiSpace
-
-bool SemiSpace::Contains(HeapObject o) {
- MemoryChunk* memory_chunk = MemoryChunk::FromHeapObject(o);
- if (memory_chunk->IsLargePage()) return false;
- return id_ == kToSpace ? memory_chunk->IsToPage()
- : memory_chunk->IsFromPage();
-}
-
-bool SemiSpace::Contains(Object o) {
- return o.IsHeapObject() && Contains(HeapObject::cast(o));
-}
-
-bool SemiSpace::ContainsSlow(Address a) {
- for (Page* p : *this) {
- if (p == MemoryChunk::FromAddress(a)) return true;
- }
- return false;
-}
-
-// --------------------------------------------------------------------------
-// NewSpace
-
-bool NewSpace::Contains(Object o) {
- return o.IsHeapObject() && Contains(HeapObject::cast(o));
-}
-
-bool NewSpace::Contains(HeapObject o) {
- return MemoryChunk::FromHeapObject(o)->InNewSpace();
-}
-
-bool NewSpace::ContainsSlow(Address a) {
- return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
-}
-
-bool NewSpace::ToSpaceContainsSlow(Address a) {
- return to_space_.ContainsSlow(a);
-}
-
-bool NewSpace::ToSpaceContains(Object o) { return to_space_.Contains(o); }
-bool NewSpace::FromSpaceContains(Object o) { return from_space_.Contains(o); }
-
-bool PagedSpace::Contains(Address addr) {
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- return true;
- }
- return Page::FromAddress(addr)->owner() == this;
-}
-
-bool PagedSpace::Contains(Object o) {
- if (!o.IsHeapObject()) return false;
- return Page::FromAddress(o.ptr())->owner() == this;
-}
-
-void PagedSpace::UnlinkFreeListCategories(Page* page) {
- DCHECK_EQ(this, page->owner());
- page->ForAllFreeListCategories([this](FreeListCategory* category) {
- free_list()->RemoveCategory(category);
- });
-}
-
-size_t PagedSpace::RelinkFreeListCategories(Page* page) {
- DCHECK_EQ(this, page->owner());
- size_t added = 0;
- page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
- added += category->available();
- category->Relink(free_list());
- });
-
- DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
- page->AvailableInFreeList() ==
- page->AvailableInFreeListFromAllocatedBytes());
- return added;
-}
-
-bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
- if (allocation_info_.top() != kNullAddress) {
- const Address object_address = object.address();
- if ((allocation_info_.top() - object_size) == object_address) {
- allocation_info_.set_top(object_address);
- return true;
- }
- }
- return false;
-}
-
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner_identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
@@ -278,53 +134,6 @@ MemoryChunk* OldGenerationMemoryChunkIterator::next() {
UNREACHABLE();
}
-bool FreeListCategory::is_linked(FreeList* owner) const {
- return prev_ != nullptr || next_ != nullptr ||
- owner->categories_[type_] == this;
-}
-
-void FreeListCategory::UpdateCountersAfterAllocation(size_t allocation_size) {
- available_ -= allocation_size;
-}
-
-Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
- FreeListCategory* category_top = top(type);
- if (category_top != nullptr) {
- DCHECK(!category_top->top().is_null());
- return Page::FromHeapObject(category_top->top());
- } else {
- return nullptr;
- }
-}
-
-Page* FreeListLegacy::GetPageForSize(size_t size_in_bytes) {
- const int minimum_category =
- static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
- Page* page = GetPageForCategoryType(kHuge);
- if (!page && static_cast<int>(kLarge) >= minimum_category)
- page = GetPageForCategoryType(kLarge);
- if (!page && static_cast<int>(kMedium) >= minimum_category)
- page = GetPageForCategoryType(kMedium);
- if (!page && static_cast<int>(kSmall) >= minimum_category)
- page = GetPageForCategoryType(kSmall);
- if (!page && static_cast<int>(kTiny) >= minimum_category)
- page = GetPageForCategoryType(kTiny);
- if (!page && static_cast<int>(kTiniest) >= minimum_category)
- page = GetPageForCategoryType(kTiniest);
- return page;
-}
-
-Page* FreeListFastAlloc::GetPageForSize(size_t size_in_bytes) {
- const int minimum_category =
- static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
- Page* page = GetPageForCategoryType(kHuge);
- if (!page && static_cast<int>(kLarge) >= minimum_category)
- page = GetPageForCategoryType(kLarge);
- if (!page && static_cast<int>(kMedium) >= minimum_category)
- page = GetPageForCategoryType(kMedium);
- return page;
-}
-
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
@@ -343,216 +152,6 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
return AllocationResult(HeapObject::FromAddress(current_top));
}
-bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
- return true;
- }
- return SlowRefillLinearAllocationArea(size_in_bytes, origin);
-}
-
-HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
- Address current_top = allocation_info_.top();
- Address new_top = current_top + size_in_bytes;
- DCHECK_LE(new_top, allocation_info_.limit());
- allocation_info_.set_top(new_top);
- return HeapObject::FromAddress(current_top);
-}
-
-HeapObject PagedSpace::TryAllocateLinearlyAligned(
- int* size_in_bytes, AllocationAlignment alignment) {
- Address current_top = allocation_info_.top();
- int filler_size = Heap::GetFillToAlign(current_top, alignment);
-
- Address new_top = current_top + filler_size + *size_in_bytes;
- if (new_top > allocation_info_.limit()) return HeapObject();
-
- allocation_info_.set_top(new_top);
- if (filler_size > 0) {
- *size_in_bytes += filler_size;
- return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
- HeapObject::FromAddress(current_top),
- filler_size);
- }
-
- return HeapObject::FromAddress(current_top);
-}
-
-AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
- AllocationOrigin origin) {
- DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
- if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
- return AllocationResult::Retry(identity());
- }
- HeapObject object = AllocateLinearly(size_in_bytes);
- DCHECK(!object.is_null());
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return object;
-}
-
-AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
- DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
- int allocation_size = size_in_bytes;
- HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
- if (object.is_null()) {
- // We don't know exactly how much filler we need to align until space is
- // allocated, so assume the worst case.
- int filler_size = Heap::GetMaximumFillToAlign(alignment);
- allocation_size += filler_size;
- if (!EnsureLinearAllocationArea(allocation_size, origin)) {
- return AllocationResult::Retry(identity());
- }
- allocation_size = size_in_bytes;
- object = TryAllocateLinearlyAligned(&allocation_size, alignment);
- DCHECK(!object.is_null());
- }
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return object;
-}
-
-AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- if (top_on_previous_step_ && top() < top_on_previous_step_ &&
- SupportsInlineAllocation()) {
- // Generated code decreased the top() pointer to do folded allocations.
- // The top_on_previous_step_ can be one byte beyond the current page.
- DCHECK_NE(top(), kNullAddress);
- DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
- Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
- top_on_previous_step_ = top();
- }
- size_t bytes_since_last =
- top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
-
- DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
-#ifdef V8_HOST_ARCH_32_BIT
- AllocationResult result =
- alignment != kWordAligned
- ? AllocateRawAligned(size_in_bytes, alignment, origin)
- : AllocateRawUnaligned(size_in_bytes, origin);
-#else
- AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
-#endif
- HeapObject heap_obj;
- if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
- AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
- heap_obj.address(), size_in_bytes);
- StartNextInlineAllocationStep();
- DCHECK_IMPLIES(
- heap()->incremental_marking()->black_allocation(),
- heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
- }
- return result;
-}
-
-// -----------------------------------------------------------------------------
-// NewSpace
-
-AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- Address top = allocation_info_.top();
- int filler_size = Heap::GetFillToAlign(top, alignment);
- int aligned_size_in_bytes = size_in_bytes + filler_size;
-
- if (allocation_info_.limit() - top <
- static_cast<uintptr_t>(aligned_size_in_bytes)) {
- // See if we can create room.
- if (!EnsureAllocation(size_in_bytes, alignment)) {
- return AllocationResult::Retry();
- }
-
- top = allocation_info_.top();
- filler_size = Heap::GetFillToAlign(top, alignment);
- aligned_size_in_bytes = size_in_bytes + filler_size;
- }
-
- HeapObject obj = HeapObject::FromAddress(top);
- allocation_info_.set_top(top + aligned_size_in_bytes);
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- if (filler_size > 0) {
- obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
- }
-
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return obj;
-}
-
-AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
- AllocationOrigin origin) {
- Address top = allocation_info_.top();
- if (allocation_info_.limit() < top + size_in_bytes) {
- // See if we can create room.
- if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
- return AllocationResult::Retry();
- }
-
- top = allocation_info_.top();
- }
-
- HeapObject obj = HeapObject::FromAddress(top);
- allocation_info_.set_top(top + size_in_bytes);
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return obj;
-}
-
-AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- if (top() < top_on_previous_step_) {
- // Generated code decreased the top() pointer to do folded allocations
- DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
- Page::FromAllocationAreaAddress(top_on_previous_step_));
- top_on_previous_step_ = top();
- }
-#ifdef V8_HOST_ARCH_32_BIT
- return alignment != kWordAligned
- ? AllocateRawAligned(size_in_bytes, alignment, origin)
- : AllocateRawUnaligned(size_in_bytes, origin);
-#else
-#ifdef V8_COMPRESS_POINTERS
- // TODO(ishell, v8:8875): Consider using aligned allocations once the
- // allocation alignment inconsistency is fixed. For now we keep using
- // unaligned access since both x64 and arm64 architectures (where pointer
- // compression is supported) allow unaligned access to doubles and full words.
-#endif // V8_COMPRESS_POINTERS
- return AllocateRawUnaligned(size_in_bytes, origin);
-#endif
-}
-
-V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
- int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
- base::MutexGuard guard(&mutex_);
- return AllocateRaw(size_in_bytes, alignment, origin);
-}
-
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
AllocationResult result,
intptr_t size) {
diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc
index 5e8874fafde..45c1de44c20 100644
--- a/chromium/v8/src/heap/spaces.cc
+++ b/chromium/v8/src/heap/spaces.cc
@@ -9,12 +9,9 @@
#include <utility>
#include "src/base/bits.h"
+#include "src/base/bounded-page-allocator.h"
#include "src/base/macros.h"
-#include "src/base/optional.h"
-#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
-#include "src/execution/vm-state-inl.h"
-#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
@@ -26,9 +23,8 @@
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact.h"
#include "src/heap/read-only-heap.h"
-#include "src/heap/remembered-set-inl.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
-#include "src/heap/sweeper.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
#include "src/objects/free-space-inl.h"
@@ -49,55 +45,6 @@ namespace internal {
STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
-// ----------------------------------------------------------------------------
-// PagedSpaceObjectIterator
-
-PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
- PagedSpace* space)
- : cur_addr_(kNullAddress),
- cur_end_(kNullAddress),
- space_(space),
- page_range_(space->first_page(), nullptr),
- current_page_(page_range_.begin()) {
- heap->mark_compact_collector()->EnsureSweepingCompleted();
-}
-
-PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
- PagedSpace* space,
- Page* page)
- : cur_addr_(kNullAddress),
- cur_end_(kNullAddress),
- space_(space),
- page_range_(page),
- current_page_(page_range_.begin()) {
- heap->mark_compact_collector()->EnsureSweepingCompleted();
-#ifdef DEBUG
- AllocationSpace owner = page->owner_identity();
- DCHECK(owner == RO_SPACE || owner == OLD_SPACE || owner == MAP_SPACE ||
- owner == CODE_SPACE);
-#endif // DEBUG
-}
-
-PagedSpaceObjectIterator::PagedSpaceObjectIterator(OffThreadSpace* space)
- : cur_addr_(kNullAddress),
- cur_end_(kNullAddress),
- space_(space),
- page_range_(space->first_page(), nullptr),
- current_page_(page_range_.begin()) {}
-
-// We have hit the end of the page and should advance to the next block of
-// objects. This happens at the end of the page.
-bool PagedSpaceObjectIterator::AdvanceToNextPage() {
- DCHECK_EQ(cur_addr_, cur_end_);
- if (current_page_ == page_range_.end()) return false;
- Page* cur_page = *(current_page_++);
-
- cur_addr_ = cur_page->area_start();
- cur_end_ = cur_page->area_end();
- DCHECK(cur_page->SweepingDone());
- return true;
-}
-
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
@@ -113,541 +60,6 @@ PauseAllocationObserversScope::~PauseAllocationObserversScope() {
}
}
-static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
- LAZY_INSTANCE_INITIALIZER;
-
-Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
- base::MutexGuard guard(&mutex_);
- auto it = recently_freed_.find(code_range_size);
- if (it == recently_freed_.end() || it->second.empty()) {
- return reinterpret_cast<Address>(GetRandomMmapAddr());
- }
- Address result = it->second.back();
- it->second.pop_back();
- return result;
-}
-
-void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
- size_t code_range_size) {
- base::MutexGuard guard(&mutex_);
- recently_freed_[code_range_size].push_back(code_range_start);
-}
-
-// -----------------------------------------------------------------------------
-// MemoryAllocator
-//
-
-MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
- size_t code_range_size)
- : isolate_(isolate),
- data_page_allocator_(isolate->page_allocator()),
- code_page_allocator_(nullptr),
- capacity_(RoundUp(capacity, Page::kPageSize)),
- size_(0),
- size_executable_(0),
- lowest_ever_allocated_(static_cast<Address>(-1ll)),
- highest_ever_allocated_(kNullAddress),
- unmapper_(isolate->heap(), this) {
- InitializeCodePageAllocator(data_page_allocator_, code_range_size);
-}
-
-void MemoryAllocator::InitializeCodePageAllocator(
- v8::PageAllocator* page_allocator, size_t requested) {
- DCHECK_NULL(code_page_allocator_instance_.get());
-
- code_page_allocator_ = page_allocator;
-
- if (requested == 0) {
- if (!isolate_->RequiresCodeRange()) return;
- // When a target requires the code range feature, we put all code objects
- // in a kMaximalCodeRangeSize range of virtual address space, so that
- // they can call each other with near calls.
- requested = kMaximalCodeRangeSize;
- } else if (requested <= kMinimumCodeRangeSize) {
- requested = kMinimumCodeRangeSize;
- }
-
- const size_t reserved_area =
- kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
- if (requested < (kMaximalCodeRangeSize - reserved_area)) {
- requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
- // Fullfilling both reserved pages requirement and huge code area
- // alignments is not supported (requires re-implementation).
- DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
- }
- DCHECK(!isolate_->RequiresCodeRange() || requested <= kMaximalCodeRangeSize);
-
- Address hint =
- RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
- page_allocator->AllocatePageSize());
- VirtualMemory reservation(
- page_allocator, requested, reinterpret_cast<void*>(hint),
- Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
- if (!reservation.IsReserved()) {
- V8::FatalProcessOutOfMemory(isolate_,
- "CodeRange setup: allocate virtual memory");
- }
- code_range_ = reservation.region();
- isolate_->AddCodeRange(code_range_.begin(), code_range_.size());
-
- // We are sure that we have mapped a block of requested addresses.
- DCHECK_GE(reservation.size(), requested);
- Address base = reservation.address();
-
- // On some platforms, specifically Win64, we need to reserve some pages at
- // the beginning of an executable space. See
- // https://cs.chromium.org/chromium/src/components/crash/content/
- // app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
- // for details.
- if (reserved_area > 0) {
- if (!reservation.SetPermissions(base, reserved_area,
- PageAllocator::kReadWrite))
- V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
-
- base += reserved_area;
- }
- Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
- size_t size =
- RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
- MemoryChunk::kPageSize);
- DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
-
- LOG(isolate_,
- NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
- requested));
-
- code_reservation_ = std::move(reservation);
- code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
- page_allocator, aligned_base, size,
- static_cast<size_t>(MemoryChunk::kAlignment));
- code_page_allocator_ = code_page_allocator_instance_.get();
-}
-
-void MemoryAllocator::TearDown() {
- unmapper()->TearDown();
-
- // Check that spaces were torn down before MemoryAllocator.
- DCHECK_EQ(size_, 0u);
- // TODO(gc) this will be true again when we fix FreeMemory.
- // DCHECK_EQ(0, size_executable_);
- capacity_ = 0;
-
- if (last_chunk_.IsReserved()) {
- last_chunk_.Free();
- }
-
- if (code_page_allocator_instance_.get()) {
- DCHECK(!code_range_.is_empty());
- code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
- code_range_.size());
- code_range_ = base::AddressRegion();
- code_page_allocator_instance_.reset();
- }
- code_page_allocator_ = nullptr;
- data_page_allocator_ = nullptr;
-}
-
-class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
- public:
- explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
- : CancelableTask(isolate),
- unmapper_(unmapper),
- tracer_(isolate->heap()->tracer()) {}
-
- private:
- void RunInternal() override {
- TRACE_BACKGROUND_GC(tracer_,
- GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
- unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
- unmapper_->active_unmapping_tasks_--;
- unmapper_->pending_unmapping_tasks_semaphore_.Signal();
- if (FLAG_trace_unmapper) {
- PrintIsolate(unmapper_->heap_->isolate(),
- "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
- }
- }
-
- Unmapper* const unmapper_;
- GCTracer* const tracer_;
- DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
-};
-
-void MemoryAllocator::Unmapper::FreeQueuedChunks() {
- if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
- if (!MakeRoomForNewTasks()) {
- // kMaxUnmapperTasks are already running. Avoid creating any more.
- if (FLAG_trace_unmapper) {
- PrintIsolate(heap_->isolate(),
- "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
- kMaxUnmapperTasks);
- }
- return;
- }
- auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
- if (FLAG_trace_unmapper) {
- PrintIsolate(heap_->isolate(),
- "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
- task->id());
- }
- DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
- DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
- DCHECK_GE(active_unmapping_tasks_, 0);
- active_unmapping_tasks_++;
- task_ids_[pending_unmapping_tasks_++] = task->id();
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- } else {
- PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
- }
-}
-
-void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
- for (int i = 0; i < pending_unmapping_tasks_; i++) {
- if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- TryAbortResult::kTaskAborted) {
- pending_unmapping_tasks_semaphore_.Wait();
- }
- }
- pending_unmapping_tasks_ = 0;
- active_unmapping_tasks_ = 0;
-
- if (FLAG_trace_unmapper) {
- PrintIsolate(
- heap_->isolate(),
- "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
- }
-}
-
-void MemoryAllocator::Unmapper::PrepareForGC() {
- // Free non-regular chunks because they cannot be re-used.
- PerformFreeMemoryOnQueuedNonRegularChunks();
-}
-
-void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
- CancelAndWaitForPendingTasks();
- PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
-}
-
-bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
- DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
-
- if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
- // All previous unmapping tasks have been run to completion.
- // Finalize those tasks to make room for new ones.
- CancelAndWaitForPendingTasks();
- }
- return pending_unmapping_tasks_ != kMaxUnmapperTasks;
-}
-
-void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
- MemoryChunk* chunk = nullptr;
- while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
- allocator_->PerformFreeMemory(chunk);
- }
-}
-
-template <MemoryAllocator::Unmapper::FreeMode mode>
-void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
- MemoryChunk* chunk = nullptr;
- if (FLAG_trace_unmapper) {
- PrintIsolate(
- heap_->isolate(),
- "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
- NumberOfChunks());
- }
- // Regular chunks.
- while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
- bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
- allocator_->PerformFreeMemory(chunk);
- if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
- }
- if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
- // The previous loop uncommitted any pages marked as pooled and added them
- // to the pooled list. In case of kReleasePooled we need to free them
- // though.
- while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
- allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
- }
- }
- PerformFreeMemoryOnQueuedNonRegularChunks();
-}
-
-void MemoryAllocator::Unmapper::TearDown() {
- CHECK_EQ(0, pending_unmapping_tasks_);
- PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
- for (int i = 0; i < kNumberOfChunkQueues; i++) {
- DCHECK(chunks_[i].empty());
- }
-}
-
-size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
- base::MutexGuard guard(&mutex_);
- return chunks_[kRegular].size() + chunks_[kNonRegular].size();
-}
-
-int MemoryAllocator::Unmapper::NumberOfChunks() {
- base::MutexGuard guard(&mutex_);
- size_t result = 0;
- for (int i = 0; i < kNumberOfChunkQueues; i++) {
- result += chunks_[i].size();
- }
- return static_cast<int>(result);
-}
-
-size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
- base::MutexGuard guard(&mutex_);
-
- size_t sum = 0;
- // kPooled chunks are already uncommited. We only have to account for
- // kRegular and kNonRegular chunks.
- for (auto& chunk : chunks_[kRegular]) {
- sum += chunk->size();
- }
- for (auto& chunk : chunks_[kNonRegular]) {
- sum += chunk->size();
- }
- return sum;
-}
-
-bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
- Address base = reservation->address();
- size_t size = reservation->size();
- if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
- return false;
- }
- UpdateAllocatedSpaceLimits(base, base + size);
- return true;
-}
-
-bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
- size_t size = reservation->size();
- if (!reservation->SetPermissions(reservation->address(), size,
- PageAllocator::kNoAccess)) {
- return false;
- }
- return true;
-}
-
-void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
- Address base, size_t size) {
- CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
-}
-
-Address MemoryAllocator::AllocateAlignedMemory(
- size_t reserve_size, size_t commit_size, size_t alignment,
- Executability executable, void* hint, VirtualMemory* controller) {
- v8::PageAllocator* page_allocator = this->page_allocator(executable);
- DCHECK(commit_size <= reserve_size);
- VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
- if (!reservation.IsReserved()) return kNullAddress;
- Address base = reservation.address();
- size_ += reservation.size();
-
- if (executable == EXECUTABLE) {
- if (!CommitExecutableMemory(&reservation, base, commit_size,
- reserve_size)) {
- base = kNullAddress;
- }
- } else {
- if (reservation.SetPermissions(base, commit_size,
- PageAllocator::kReadWrite)) {
- UpdateAllocatedSpaceLimits(base, base + commit_size);
- } else {
- base = kNullAddress;
- }
- }
-
- if (base == kNullAddress) {
- // Failed to commit the body. Free the mapping and any partially committed
- // regions inside it.
- reservation.Free();
- size_ -= reserve_size;
- return kNullAddress;
- }
-
- *controller = std::move(reservation);
- return base;
-}
-
-void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
- auto result = code_object_registry_newly_allocated_.insert(code);
- USE(result);
- DCHECK(result.second);
-}
-
-void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) {
- code_object_registry_already_existing_.push_back(code);
-}
-
-void CodeObjectRegistry::Clear() {
- code_object_registry_already_existing_.clear();
- code_object_registry_newly_allocated_.clear();
-}
-
-void CodeObjectRegistry::Finalize() {
- code_object_registry_already_existing_.shrink_to_fit();
-}
-
-bool CodeObjectRegistry::Contains(Address object) const {
- return (code_object_registry_newly_allocated_.find(object) !=
- code_object_registry_newly_allocated_.end()) ||
- (std::binary_search(code_object_registry_already_existing_.begin(),
- code_object_registry_already_existing_.end(),
- object));
-}
-
-Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress(
- Address address) const {
- // Let's first find the object which comes right before address in the vector
- // of already existing code objects.
- Address already_existing_set_ = 0;
- Address newly_allocated_set_ = 0;
- if (!code_object_registry_already_existing_.empty()) {
- auto it =
- std::upper_bound(code_object_registry_already_existing_.begin(),
- code_object_registry_already_existing_.end(), address);
- if (it != code_object_registry_already_existing_.begin()) {
- already_existing_set_ = *(--it);
- }
- }
-
- // Next, let's find the object which comes right before address in the set
- // of newly allocated code objects.
- if (!code_object_registry_newly_allocated_.empty()) {
- auto it = code_object_registry_newly_allocated_.upper_bound(address);
- if (it != code_object_registry_newly_allocated_.begin()) {
- newly_allocated_set_ = *(--it);
- }
- }
-
- // The code objects which contains address has to be in one of the two
- // data structures.
- DCHECK(already_existing_set_ != 0 || newly_allocated_set_ != 0);
-
- // The address which is closest to the given address is the code object.
- return already_existing_set_ > newly_allocated_set_ ? already_existing_set_
- : newly_allocated_set_;
-}
-
-namespace {
-
-PageAllocator::Permission DefaultWritableCodePermissions() {
- return FLAG_jitless ? PageAllocator::kReadWrite
- : PageAllocator::kReadWriteExecute;
-}
-
-} // namespace
-
-MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
- Address area_start, Address area_end,
- Executability executable, Space* owner,
- VirtualMemory reservation) {
- MemoryChunk* chunk = FromAddress(base);
- DCHECK_EQ(base, chunk->address());
- new (chunk) BasicMemoryChunk(size, area_start, area_end);
-
- chunk->heap_ = heap;
- chunk->set_owner(owner);
- chunk->InitializeReservedMemory();
- base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
- nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
- nullptr);
- chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
- chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
- chunk->progress_bar_ = 0;
- chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
- chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
- chunk->page_protection_change_mutex_ = new base::Mutex();
- chunk->write_unprotect_counter_ = 0;
- chunk->mutex_ = new base::Mutex();
- chunk->allocated_bytes_ = chunk->area_size();
- chunk->wasted_memory_ = 0;
- chunk->young_generation_bitmap_ = nullptr;
- chunk->local_tracker_ = nullptr;
-
- chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
- 0;
- chunk->external_backing_store_bytes_
- [ExternalBackingStoreType::kExternalString] = 0;
-
- chunk->categories_ = nullptr;
-
- heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
- 0);
- if (owner->identity() == RO_SPACE) {
- heap->incremental_marking()
- ->non_atomic_marking_state()
- ->bitmap(chunk)
- ->MarkAllBits();
- chunk->SetFlag(READ_ONLY_HEAP);
- }
-
- if (executable == EXECUTABLE) {
- chunk->SetFlag(IS_EXECUTABLE);
- if (heap->write_protect_code_memory()) {
- chunk->write_unprotect_counter_ =
- heap->code_space_memory_modification_scope_depth();
- } else {
- size_t page_size = MemoryAllocator::GetCommitPageSize();
- DCHECK(IsAligned(area_start, page_size));
- size_t area_size = RoundUp(area_end - area_start, page_size);
- CHECK(reservation.SetPermissions(area_start, area_size,
- DefaultWritableCodePermissions()));
- }
- }
-
- chunk->reservation_ = std::move(reservation);
-
- if (owner->identity() == CODE_SPACE) {
- chunk->code_object_registry_ = new CodeObjectRegistry();
- } else {
- chunk->code_object_registry_ = nullptr;
- }
-
- chunk->possibly_empty_buckets_.Initialize();
-
- return chunk;
-}
-
-Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
- Page* page = static_cast<Page*>(chunk);
- DCHECK_EQ(
- MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
- page->area_size());
- // Make sure that categories are initialized before freeing the area.
- page->ResetAllocationStatistics();
- page->SetOldGenerationPageFlags(!is_off_thread_space() &&
- heap()->incremental_marking()->IsMarking());
- page->AllocateFreeListCategories();
- page->InitializeFreeListCategories();
- page->list_node().Initialize();
- page->InitializationMemoryFence();
- return page;
-}
-
-Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
- bool in_to_space = (id() != kFromSpace);
- chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
- Page* page = static_cast<Page*>(chunk);
- page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
- page->AllocateLocalTracker();
- page->list_node().Initialize();
-#ifdef ENABLE_MINOR_MC
- if (FLAG_minor_mc) {
- page->AllocateYoungGenerationBitmap();
- heap()
- ->minor_mark_compact_collector()
- ->non_atomic_marking_state()
- ->ClearLiveness(page);
- }
-#endif // ENABLE_MINOR_MC
- page->InitializationMemoryFence();
- return page;
-}
-
void Page::AllocateFreeListCategories() {
DCHECK_NULL(categories_);
categories_ =
@@ -718,169 +130,6 @@ void Page::MergeOldToNewRememberedSets() {
sweeping_slot_set_ = nullptr;
}
-size_t MemoryChunk::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
- return size();
- return high_water_mark_;
-}
-
-bool MemoryChunk::InOldSpace() const { return owner_identity() == OLD_SPACE; }
-
-bool MemoryChunk::InLargeObjectSpace() const {
- return owner_identity() == LO_SPACE;
-}
-
-MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
- size_t commit_area_size,
- Executability executable,
- Space* owner) {
- DCHECK_LE(commit_area_size, reserve_area_size);
-
- size_t chunk_size;
- Heap* heap = isolate_->heap();
- Address base = kNullAddress;
- VirtualMemory reservation;
- Address area_start = kNullAddress;
- Address area_end = kNullAddress;
- void* address_hint =
- AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
-
- //
- // MemoryChunk layout:
- //
- // Executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
- // | Header |
- // +----------------------------+<- base + CodePageGuardStartOffset
- // | Guard |
- // +----------------------------+<- area_start_
- // | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
- // | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
- // +----------------------------+<- aligned at OS page boundary
- // | Guard |
- // +----------------------------+<- base + chunk_size
- //
- // Non-executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
- // | Header |
- // +----------------------------+<- area_start_ (base + area_start_)
- // | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
- // | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
- // +----------------------------+<- base + chunk_size
- //
-
- if (executable == EXECUTABLE) {
- chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
- reserve_area_size +
- MemoryChunkLayout::CodePageGuardSize(),
- GetCommitPageSize());
-
- // Size of header (not executable) plus area (executable).
- size_t commit_size = ::RoundUp(
- MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
- GetCommitPageSize());
- base =
- AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
- executable, address_hint, &reservation);
- if (base == kNullAddress) return nullptr;
- // Update executable memory size.
- size_executable_ += reservation.size();
-
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
- ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
- commit_area_size, kZapValue);
- }
-
- area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
- area_end = area_start + commit_area_size;
- } else {
- chunk_size = ::RoundUp(
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
- GetCommitPageSize());
- size_t commit_size = ::RoundUp(
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
- GetCommitPageSize());
- base =
- AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
- executable, address_hint, &reservation);
-
- if (base == kNullAddress) return nullptr;
-
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(
- base,
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
- kZapValue);
- }
-
- area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
- area_end = area_start + commit_area_size;
- }
-
- // Use chunk_size for statistics because we assume that treat reserved but
- // not-yet committed memory regions of chunks as allocated.
- LOG(isolate_,
- NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
-
- // We cannot use the last chunk in the address space because we would
- // overflow when comparing top and limit if this chunk is used for a
- // linear allocation area.
- if ((base + chunk_size) == 0u) {
- CHECK(!last_chunk_.IsReserved());
- last_chunk_ = std::move(reservation);
- UncommitMemory(&last_chunk_);
- size_ -= chunk_size;
- if (executable == EXECUTABLE) {
- size_executable_ -= chunk_size;
- }
- CHECK(last_chunk_.IsReserved());
- return AllocateChunk(reserve_area_size, commit_area_size, executable,
- owner);
- }
-
- MemoryChunk* chunk =
- MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
- executable, owner, std::move(reservation));
-
- if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
- return chunk;
-}
-
-void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
- if (is_marking) {
- SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- SetFlag(MemoryChunk::INCREMENTAL_MARKING);
- } else {
- ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
- }
-}
-
-void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
- SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- if (is_marking) {
- SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- SetFlag(MemoryChunk::INCREMENTAL_MARKING);
- } else {
- ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
- }
-}
-
-void Page::ResetAllocationStatistics() {
- allocated_bytes_ = area_size();
- wasted_memory_ = 0;
-}
-
void Page::AllocateLocalTracker() {
DCHECK_NULL(local_tracker_);
local_tracker_ = new LocalArrayBufferTracker(this);
@@ -972,6 +221,19 @@ void Page::CreateBlackArea(Address start, Address end) {
marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
}
+void Page::CreateBlackAreaBackground(Address start, Address end) {
+ DCHECK(heap()->incremental_marking()->black_allocation());
+ DCHECK_EQ(Page::FromAddress(start), this);
+ DCHECK_LT(start, end);
+ DCHECK_EQ(Page::FromAddress(end - 1), this);
+ IncrementalMarking::AtomicMarkingState* marking_state =
+ heap()->incremental_marking()->atomic_marking_state();
+ marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
+ AddressToMarkbitIndex(end));
+ heap()->incremental_marking()->IncrementLiveBytesBackground(
+ this, static_cast<intptr_t>(end - start));
+}
+
void Page::DestroyBlackArea(Address start, Address end) {
DCHECK(heap()->incremental_marking()->black_allocation());
DCHECK_EQ(Page::FromAddress(start), this);
@@ -984,441 +246,17 @@ void Page::DestroyBlackArea(Address start, Address end) {
marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
}
-void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
- size_t bytes_to_free,
- Address new_area_end) {
- VirtualMemory* reservation = chunk->reserved_memory();
- DCHECK(reservation->IsReserved());
- chunk->set_size(chunk->size() - bytes_to_free);
- chunk->set_area_end(new_area_end);
- if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
- // Add guard page at the end.
- size_t page_size = GetCommitPageSize();
- DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
- DCHECK_EQ(chunk->address() + chunk->size(),
- chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
- reservation->SetPermissions(chunk->area_end(), page_size,
- PageAllocator::kNoAccess);
- }
- // On e.g. Windows, a reservation may be larger than a page and releasing
- // partially starting at |start_free| will also release the potentially
- // unused part behind the current page.
- const size_t released_bytes = reservation->Release(start_free);
- DCHECK_GE(size_, released_bytes);
- size_ -= released_bytes;
-}
-
-void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
- DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
- VirtualMemory* reservation = chunk->reserved_memory();
- const size_t size =
- reservation->IsReserved() ? reservation->size() : chunk->size();
- DCHECK_GE(size_, static_cast<size_t>(size));
- size_ -= size;
- if (chunk->executable() == EXECUTABLE) {
- DCHECK_GE(size_executable_, size);
- size_executable_ -= size;
- }
-
- if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
- chunk->SetFlag(MemoryChunk::UNREGISTERED);
-}
-
-void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
- DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
- LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
- UnregisterMemory(chunk);
- isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
- chunk->IsEvacuationCandidate());
- chunk->SetFlag(MemoryChunk::PRE_FREED);
-}
-
-void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
- DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
- DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
- chunk->ReleaseAllAllocatedMemory();
-
- VirtualMemory* reservation = chunk->reserved_memory();
- if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
- UncommitMemory(reservation);
- } else {
- if (reservation->IsReserved()) {
- reservation->Free();
- } else {
- // Only read-only pages can have non-initialized reservation object.
- DCHECK_EQ(RO_SPACE, chunk->owner_identity());
- FreeMemory(page_allocator(chunk->executable()), chunk->address(),
- chunk->size());
- }
- }
-}
-
-template <MemoryAllocator::FreeMode mode>
-void MemoryAllocator::Free(MemoryChunk* chunk) {
- switch (mode) {
- case kFull:
- PreFreeMemory(chunk);
- PerformFreeMemory(chunk);
- break;
- case kAlreadyPooled:
- // Pooled pages cannot be touched anymore as their memory is uncommitted.
- // Pooled pages are not-executable.
- FreeMemory(data_page_allocator(), chunk->address(),
- static_cast<size_t>(MemoryChunk::kPageSize));
- break;
- case kPooledAndQueue:
- DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
- DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
- chunk->SetFlag(MemoryChunk::POOLED);
- V8_FALLTHROUGH;
- case kPreFreeAndQueue:
- PreFreeMemory(chunk);
- // The chunks added to this queue will be freed by a concurrent thread.
- unmapper()->AddMemoryChunkSafe(chunk);
- break;
- }
-}
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kFull>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
-
-template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
-Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
- Executability executable) {
- MemoryChunk* chunk = nullptr;
- if (alloc_mode == kPooled) {
- DCHECK_EQ(size, static_cast<size_t>(
- MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
- owner->identity())));
- DCHECK_EQ(executable, NOT_EXECUTABLE);
- chunk = AllocatePagePooled(owner);
- }
- if (chunk == nullptr) {
- chunk = AllocateChunk(size, size, executable, owner);
- }
- if (chunk == nullptr) return nullptr;
- return owner->InitializePage(chunk);
-}
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- size_t size, PagedSpace* owner, Executability executable);
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-
-LargePage* MemoryAllocator::AllocateLargePage(size_t size,
- LargeObjectSpace* owner,
- Executability executable) {
- MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
- if (chunk == nullptr) return nullptr;
- return LargePage::Initialize(isolate_->heap(), chunk, executable);
-}
-
-template <typename SpaceType>
-MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
- MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
- if (chunk == nullptr) return nullptr;
- const int size = MemoryChunk::kPageSize;
- const Address start = reinterpret_cast<Address>(chunk);
- const Address area_start =
- start +
- MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
- const Address area_end = start + size;
- // Pooled pages are always regular data pages.
- DCHECK_NE(CODE_SPACE, owner->identity());
- VirtualMemory reservation(data_page_allocator(), start, size);
- if (!CommitMemory(&reservation)) return nullptr;
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(start, size, kZapValue);
- }
- MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
- NOT_EXECUTABLE, owner, std::move(reservation));
- size_ += size;
- return chunk;
-}
-
-void MemoryAllocator::ZapBlock(Address start, size_t size,
- uintptr_t zap_value) {
- DCHECK(IsAligned(start, kTaggedSize));
- DCHECK(IsAligned(size, kTaggedSize));
- MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
- size >> kTaggedSizeLog2);
-}
-
-intptr_t MemoryAllocator::GetCommitPageSize() {
- if (FLAG_v8_os_page_size != 0) {
- DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
- return FLAG_v8_os_page_size * KB;
- } else {
- return CommitPageSize();
- }
-}
-
-base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
- size_t size) {
- size_t page_size = MemoryAllocator::GetCommitPageSize();
- if (size < page_size + FreeSpace::kSize) {
- return base::AddressRegion(0, 0);
- }
- Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
- Address discardable_end = RoundDown(addr + size, page_size);
- if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
- return base::AddressRegion(discardable_start,
- discardable_end - discardable_start);
-}
-
-bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
- size_t commit_size,
- size_t reserved_size) {
- const size_t page_size = GetCommitPageSize();
- // All addresses and sizes must be aligned to the commit page size.
- DCHECK(IsAligned(start, page_size));
- DCHECK_EQ(0, commit_size % page_size);
- DCHECK_EQ(0, reserved_size % page_size);
- const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
- const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
- const size_t code_area_offset =
- MemoryChunkLayout::ObjectStartOffsetInCodePage();
- // reserved_size includes two guard regions, commit_size does not.
- DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
- const Address pre_guard_page = start + pre_guard_offset;
- const Address code_area = start + code_area_offset;
- const Address post_guard_page = start + reserved_size - guard_size;
- // Commit the non-executable header, from start to pre-code guard page.
- if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
- // Create the pre-code guard page, following the header.
- if (vm->SetPermissions(pre_guard_page, page_size,
- PageAllocator::kNoAccess)) {
- // Commit the executable code body.
- if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
- PageAllocator::kReadWrite)) {
- // Create the post-code guard page.
- if (vm->SetPermissions(post_guard_page, page_size,
- PageAllocator::kNoAccess)) {
- UpdateAllocatedSpaceLimits(start, code_area + commit_size);
- return true;
- }
- vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
- }
- }
- vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
- }
- return false;
-}
-
-
-// -----------------------------------------------------------------------------
-// MemoryChunk implementation
-
-void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
- if (mutex_ != nullptr) {
- delete mutex_;
- mutex_ = nullptr;
- }
- if (page_protection_change_mutex_ != nullptr) {
- delete page_protection_change_mutex_;
- page_protection_change_mutex_ = nullptr;
- }
- if (code_object_registry_ != nullptr) {
- delete code_object_registry_;
- code_object_registry_ = nullptr;
- }
-
- possibly_empty_buckets_.Release();
- ReleaseSlotSet<OLD_TO_NEW>();
- ReleaseSweepingSlotSet();
- ReleaseSlotSet<OLD_TO_OLD>();
- ReleaseTypedSlotSet<OLD_TO_NEW>();
- ReleaseTypedSlotSet<OLD_TO_OLD>();
- ReleaseInvalidatedSlots<OLD_TO_NEW>();
- ReleaseInvalidatedSlots<OLD_TO_OLD>();
-
- if (local_tracker_ != nullptr) ReleaseLocalTracker();
- if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
-
- if (!IsLargePage()) {
- Page* page = static_cast<Page*>(this);
- page->ReleaseFreeListCategories();
- }
-}
-
-void MemoryChunk::ReleaseAllAllocatedMemory() {
- ReleaseAllocatedMemoryNeededForWritableChunk();
- if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
-}
-
-template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
-template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-SlotSet* MemoryChunk::AllocateSlotSet() {
- return AllocateSlotSet(&slot_set_[type]);
-}
-
-SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
- return AllocateSlotSet(&sweeping_slot_set_);
-}
-
-SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
- SlotSet* new_slot_set = SlotSet::Allocate(buckets());
- SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap(
- slot_set, nullptr, new_slot_set);
- if (old_slot_set != nullptr) {
- SlotSet::Delete(new_slot_set, buckets());
- new_slot_set = old_slot_set;
- }
- DCHECK(new_slot_set);
- return new_slot_set;
-}
-
-template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
-template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-void MemoryChunk::ReleaseSlotSet() {
- ReleaseSlotSet(&slot_set_[type]);
-}
-
-void MemoryChunk::ReleaseSweepingSlotSet() {
- ReleaseSlotSet(&sweeping_slot_set_);
-}
-
-void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
- if (*slot_set) {
- SlotSet::Delete(*slot_set, buckets());
- *slot_set = nullptr;
- }
-}
-
-template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
-template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
- TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
- TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
- &typed_slot_set_[type], nullptr, typed_slot_set);
- if (old_value != nullptr) {
- delete typed_slot_set;
- typed_slot_set = old_value;
- }
- DCHECK(typed_slot_set);
- return typed_slot_set;
-}
-
-template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
-template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-void MemoryChunk::ReleaseTypedSlotSet() {
- TypedSlotSet* typed_slot_set = typed_slot_set_[type];
- if (typed_slot_set) {
- typed_slot_set_[type] = nullptr;
- delete typed_slot_set;
- }
-}
-
-template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_NEW>();
-template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
- DCHECK_NULL(invalidated_slots_[type]);
- invalidated_slots_[type] = new InvalidatedSlots();
- return invalidated_slots_[type];
-}
-
-template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_NEW>();
-template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-void MemoryChunk::ReleaseInvalidatedSlots() {
- if (invalidated_slots_[type]) {
- delete invalidated_slots_[type];
- invalidated_slots_[type] = nullptr;
- }
-}
-
-template V8_EXPORT_PRIVATE void
-MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
-template V8_EXPORT_PRIVATE void
-MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
-
-template <RememberedSetType type>
-void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
- bool skip_slot_recording;
-
- if (type == OLD_TO_NEW) {
- skip_slot_recording = InYoungGeneration();
- } else {
- skip_slot_recording = ShouldSkipEvacuationSlotRecording();
- }
-
- if (skip_slot_recording) {
- return;
- }
-
- if (invalidated_slots<type>() == nullptr) {
- AllocateInvalidatedSlots<type>();
- }
-
- invalidated_slots<type>()->insert(object);
-}
-
-void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
- if (V8_DISABLE_WRITE_BARRIERS_BOOL) return;
- if (heap()->incremental_marking()->IsCompacting()) {
- // We cannot check slot_set_[OLD_TO_OLD] here, since the
- // concurrent markers might insert slots concurrently.
- RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
- }
-
- if (!FLAG_always_promote_young_mc || slot_set_[OLD_TO_NEW] != nullptr)
- RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
-}
-
-template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
- HeapObject object);
-template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(
- HeapObject object);
-
-template <RememberedSetType type>
-bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
- if (invalidated_slots<type>() == nullptr) {
- return false;
- }
- return invalidated_slots<type>()->find(object) !=
- invalidated_slots<type>()->end();
-}
-
-void MemoryChunk::ReleaseLocalTracker() {
- DCHECK_NOT_NULL(local_tracker_);
- delete local_tracker_;
- local_tracker_ = nullptr;
-}
-
-void MemoryChunk::AllocateYoungGenerationBitmap() {
- DCHECK_NULL(young_generation_bitmap_);
- young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
-}
-
-void MemoryChunk::ReleaseYoungGenerationBitmap() {
- DCHECK_NOT_NULL(young_generation_bitmap_);
- free(young_generation_bitmap_);
- young_generation_bitmap_ = nullptr;
+void Page::DestroyBlackAreaBackground(Address start, Address end) {
+ DCHECK(heap()->incremental_marking()->black_allocation());
+ DCHECK_EQ(Page::FromAddress(start), this);
+ DCHECK_LT(start, end);
+ DCHECK_EQ(Page::FromAddress(end - 1), this);
+ IncrementalMarking::AtomicMarkingState* marking_state =
+ heap()->incremental_marking()->atomic_marking_state();
+ marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
+ AddressToMarkbitIndex(end));
+ heap()->incremental_marking()->IncrementLiveBytesBackground(
+ this, -static_cast<intptr_t>(end - start));
}
// -----------------------------------------------------------------------------
@@ -1481,293 +319,6 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
return next_step;
}
-PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
- Executability executable, FreeList* free_list,
- LocalSpaceKind local_space_kind)
- : SpaceWithLinearArea(heap, space, free_list),
- executable_(executable),
- local_space_kind_(local_space_kind) {
- area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
- accounting_stats_.Clear();
-}
-
-void PagedSpace::TearDown() {
- while (!memory_chunk_list_.Empty()) {
- MemoryChunk* chunk = memory_chunk_list_.front();
- memory_chunk_list_.Remove(chunk);
- heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
- }
- accounting_stats_.Clear();
-}
-
-void PagedSpace::RefillFreeList() {
- // Any PagedSpace might invoke RefillFreeList. We filter all but our old
- // generation spaces out.
- if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
- identity() != MAP_SPACE && identity() != RO_SPACE) {
- return;
- }
- DCHECK_NE(local_space_kind(), LocalSpaceKind::kOffThreadSpace);
- DCHECK_IMPLIES(is_local_space(), is_compaction_space());
- DCHECK(!IsDetached());
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- size_t added = 0;
-
- {
- Page* p = nullptr;
- while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
- // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
- // entries here to make them unavailable for allocations.
- if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
- p->ForAllFreeListCategories([this](FreeListCategory* category) {
- category->Reset(free_list());
- });
- }
-
- // Also merge old-to-new remembered sets if not scavenging because of
- // data races: One thread might iterate remembered set, while another
- // thread merges them.
- if (local_space_kind() != LocalSpaceKind::kCompactionSpaceForScavenge) {
- p->MergeOldToNewRememberedSets();
- }
-
- // Only during compaction pages can actually change ownership. This is
- // safe because there exists no other competing action on the page links
- // during compaction.
- if (is_compaction_space()) {
- DCHECK_NE(this, p->owner());
- PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
- base::MutexGuard guard(owner->mutex());
- owner->RefineAllocatedBytesAfterSweeping(p);
- owner->RemovePage(p);
- added += AddPage(p);
- } else {
- base::MutexGuard guard(mutex());
- DCHECK_EQ(this, p->owner());
- RefineAllocatedBytesAfterSweeping(p);
- added += RelinkFreeListCategories(p);
- }
- added += p->wasted_memory();
- if (is_compaction_space() && (added > kCompactionMemoryWanted)) break;
- }
- }
-}
-
-void OffThreadSpace::RefillFreeList() {
- // We should never try to refill the free list in off-thread space, because
- // we know it will always be fully linear.
- UNREACHABLE();
-}
-
-void PagedSpace::MergeLocalSpace(LocalSpace* other) {
- base::MutexGuard guard(mutex());
-
- DCHECK(identity() == other->identity());
-
- // Unmerged fields:
- // area_size_
- other->FreeLinearAllocationArea();
-
- for (int i = static_cast<int>(AllocationOrigin::kFirstAllocationOrigin);
- i <= static_cast<int>(AllocationOrigin::kLastAllocationOrigin); i++) {
- allocations_origins_[i] += other->allocations_origins_[i];
- }
-
- // The linear allocation area of {other} should be destroyed now.
- DCHECK_EQ(kNullAddress, other->top());
- DCHECK_EQ(kNullAddress, other->limit());
-
- bool merging_from_off_thread = other->is_off_thread_space();
-
- // Move over pages.
- for (auto it = other->begin(); it != other->end();) {
- Page* p = *(it++);
-
- if (merging_from_off_thread) {
- DCHECK_NULL(p->sweeping_slot_set());
- p->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
- if (heap()->incremental_marking()->black_allocation()) {
- p->CreateBlackArea(p->area_start(), p->HighWaterMark());
- }
- } else {
- p->MergeOldToNewRememberedSets();
- }
-
- // Relinking requires the category to be unlinked.
- other->RemovePage(p);
- AddPage(p);
- // These code pages were allocated by the CompactionSpace.
- if (identity() == CODE_SPACE) heap()->isolate()->AddCodeMemoryChunk(p);
- DCHECK_IMPLIES(
- !p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
- p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
-
- // TODO(leszeks): Here we should allocation step, but:
- // 1. Allocation groups are currently not handled properly by the sampling
- // allocation profiler, and
- // 2. Observers might try to take the space lock, which isn't reentrant.
- // We'll have to come up with a better solution for allocation stepping
- // before shipping, which will likely be using LocalHeap.
- }
-
- DCHECK_EQ(0u, other->Size());
- DCHECK_EQ(0u, other->Capacity());
-}
-
-size_t PagedSpace::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- size_t size = 0;
- for (Page* page : *this) {
- size += page->CommittedPhysicalMemory();
- }
- return size;
-}
-
-bool PagedSpace::ContainsSlow(Address addr) {
- Page* p = Page::FromAddress(addr);
- for (Page* page : *this) {
- if (page == p) return true;
- }
- return false;
-}
-
-void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
- CHECK(page->SweepingDone());
- auto marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- // The live_byte on the page was accounted in the space allocated
- // bytes counter. After sweeping allocated_bytes() contains the
- // accurate live byte count on the page.
- size_t old_counter = marking_state->live_bytes(page);
- size_t new_counter = page->allocated_bytes();
- DCHECK_GE(old_counter, new_counter);
- if (old_counter > new_counter) {
- DecreaseAllocatedBytes(old_counter - new_counter, page);
- // Give the heap a chance to adjust counters in response to the
- // more precise and smaller old generation size.
- heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
- }
- marking_state->SetLiveBytes(page, 0);
-}
-
-Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
- base::MutexGuard guard(mutex());
- Page* page = free_list()->GetPageForSize(size_in_bytes);
- if (!page) return nullptr;
- RemovePage(page);
- return page;
-}
-
-size_t PagedSpace::AddPage(Page* page) {
- CHECK(page->SweepingDone());
- page->set_owner(this);
- memory_chunk_list_.PushBack(page);
- AccountCommitted(page->size());
- IncreaseCapacity(page->area_size());
- IncreaseAllocatedBytes(page->allocated_bytes(), page);
- for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
- }
- return RelinkFreeListCategories(page);
-}
-
-void PagedSpace::RemovePage(Page* page) {
- CHECK(page->SweepingDone());
- memory_chunk_list_.Remove(page);
- UnlinkFreeListCategories(page);
- DecreaseAllocatedBytes(page->allocated_bytes(), page);
- DecreaseCapacity(page->area_size());
- AccountUncommitted(page->size());
- for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
- }
-}
-
-size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
- size_t unused = page->ShrinkToHighWaterMark();
- accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
- AccountUncommitted(unused);
- return unused;
-}
-
-void PagedSpace::ResetFreeList() {
- for (Page* page : *this) {
- free_list_->EvictFreeListItems(page);
- }
- DCHECK(free_list_->IsEmpty());
-}
-
-void PagedSpace::ShrinkImmortalImmovablePages() {
- DCHECK(!heap()->deserialization_complete());
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- FreeLinearAllocationArea();
- ResetFreeList();
- for (Page* page : *this) {
- DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
- ShrinkPageToHighWaterMark(page);
- }
-}
-
-bool PagedSpace::Expand() {
- // Always lock against the main space as we can only adjust capacity and
- // pages concurrently for the main paged space.
- base::MutexGuard guard(heap()->paged_space(identity())->mutex());
-
- const int size = AreaSize();
-
- if (!heap()->CanExpandOldGeneration(size)) return false;
-
- Page* page =
- heap()->memory_allocator()->AllocatePage(size, this, executable());
- if (page == nullptr) return false;
- // Pages created during bootstrapping may contain immortal immovable objects.
- if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
- AddPage(page);
- // If this is a non-compaction code space, this is a previously unseen page.
- if (identity() == CODE_SPACE && !is_compaction_space()) {
- heap()->isolate()->AddCodeMemoryChunk(page);
- }
- Free(page->area_start(), page->area_size(),
- SpaceAccountingMode::kSpaceAccounted);
- heap()->NotifyOldGenerationExpansion();
- return true;
-}
-
-int PagedSpace::CountTotalPages() {
- int count = 0;
- for (Page* page : *this) {
- count++;
- USE(page);
- }
- return count;
-}
-
-void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
- SetTopAndLimit(top, limit);
- if (top != kNullAddress && top != limit && !is_off_thread_space() &&
- heap()->incremental_marking()->black_allocation()) {
- Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
- }
-}
-
-void PagedSpace::DecreaseLimit(Address new_limit) {
- Address old_limit = limit();
- DCHECK_LE(top(), new_limit);
- DCHECK_GE(old_limit, new_limit);
- if (new_limit != old_limit) {
- SetTopAndLimit(top(), new_limit);
- Free(new_limit, old_limit - new_limit,
- SpaceAccountingMode::kSpaceAccounted);
- if (heap()->incremental_marking()->black_allocation()) {
- Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
- old_limit);
- }
- }
-}
-
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
size_t min_size) {
DCHECK_GE(end - start, min_size);
@@ -1802,560 +353,6 @@ void SpaceWithLinearArea::PrintAllocationsOrigins() {
allocations_origins_[2]);
}
-void PagedSpace::MarkLinearAllocationAreaBlack() {
- DCHECK(heap()->incremental_marking()->black_allocation());
- Address current_top = top();
- Address current_limit = limit();
- if (current_top != kNullAddress && current_top != current_limit) {
- Page::FromAllocationAreaAddress(current_top)
- ->CreateBlackArea(current_top, current_limit);
- }
-}
-
-void PagedSpace::UnmarkLinearAllocationArea() {
- Address current_top = top();
- Address current_limit = limit();
- if (current_top != kNullAddress && current_top != current_limit) {
- Page::FromAllocationAreaAddress(current_top)
- ->DestroyBlackArea(current_top, current_limit);
- }
-}
-
-void PagedSpace::FreeLinearAllocationArea() {
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap.
- Address current_top = top();
- Address current_limit = limit();
- if (current_top == kNullAddress) {
- DCHECK_EQ(kNullAddress, current_limit);
- return;
- }
-
- if (!is_off_thread_space() &&
- heap()->incremental_marking()->black_allocation()) {
- Page* page = Page::FromAllocationAreaAddress(current_top);
-
- // Clear the bits in the unused black area.
- if (current_top != current_limit) {
- IncrementalMarking::MarkingState* marking_state =
- heap()->incremental_marking()->marking_state();
- marking_state->bitmap(page)->ClearRange(
- page->AddressToMarkbitIndex(current_top),
- page->AddressToMarkbitIndex(current_limit));
- marking_state->IncrementLiveBytes(
- page, -static_cast<int>(current_limit - current_top));
- }
- }
-
- InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
- SetTopAndLimit(kNullAddress, kNullAddress);
- DCHECK_GE(current_limit, current_top);
-
- // The code page of the linear allocation area needs to be unprotected
- // because we are going to write a filler into that memory area below.
- if (identity() == CODE_SPACE) {
- heap()->UnprotectAndRegisterMemoryChunk(
- MemoryChunk::FromAddress(current_top));
- }
- Free(current_top, current_limit - current_top,
- SpaceAccountingMode::kSpaceAccounted);
-}
-
-void PagedSpace::ReleasePage(Page* page) {
- DCHECK_EQ(
- 0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
- page));
- DCHECK_EQ(page->owner(), this);
-
- free_list_->EvictFreeListItems(page);
-
- if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
- DCHECK(!top_on_previous_step_);
- allocation_info_.Reset(kNullAddress, kNullAddress);
- }
-
- heap()->isolate()->RemoveCodeMemoryChunk(page);
-
- AccountUncommitted(page->size());
- accounting_stats_.DecreaseCapacity(page->area_size());
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
-}
-
-void PagedSpace::SetReadable() {
- DCHECK(identity() == CODE_SPACE);
- for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadable();
- }
-}
-
-void PagedSpace::SetReadAndExecutable() {
- DCHECK(identity() == CODE_SPACE);
- for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndExecutable();
- }
-}
-
-void PagedSpace::SetReadAndWritable() {
- DCHECK(identity() == CODE_SPACE);
- for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndWritable();
- }
-}
-
-std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator(Heap* heap) {
- return std::unique_ptr<ObjectIterator>(
- new PagedSpaceObjectIterator(heap, this));
-}
-
-bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
- size_t size_in_bytes, AllocationOrigin origin) {
- DCHECK(IsAligned(size_in_bytes, kTaggedSize));
- DCHECK_LE(top(), limit());
-#ifdef DEBUG
- if (top() != limit()) {
- DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
- }
-#endif
- // Don't free list allocate if there is linear space available.
- DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
-
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap. This also puts it back in the free list
- // if it is big enough.
- FreeLinearAllocationArea();
-
- if (!is_local_space()) {
- heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- heap()->GCFlagsForIncrementalMarking(),
- kGCCallbackScheduleIdleGarbageCollection);
- }
-
- size_t new_node_size = 0;
- FreeSpace new_node =
- free_list_->Allocate(size_in_bytes, &new_node_size, origin);
- if (new_node.is_null()) return false;
- DCHECK_GE(new_node_size, size_in_bytes);
-
- // The old-space-step might have finished sweeping and restarted marking.
- // Verify that it did not turn the page of the new node into an evacuation
- // candidate.
- DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
-
- // Memory in the linear allocation area is counted as allocated. We may free
- // a little of this again immediately - see below.
- Page* page = Page::FromHeapObject(new_node);
- IncreaseAllocatedBytes(new_node_size, page);
-
- Address start = new_node.address();
- Address end = new_node.address() + new_node_size;
- Address limit = ComputeLimit(start, end, size_in_bytes);
- DCHECK_LE(limit, end);
- DCHECK_LE(size_in_bytes, limit - start);
- if (limit != end) {
- if (identity() == CODE_SPACE) {
- heap()->UnprotectAndRegisterMemoryChunk(page);
- }
- Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
- }
- SetLinearAllocationArea(start, limit);
-
- return true;
-}
-
-base::Optional<std::pair<Address, size_t>>
-PagedSpace::SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
- size_t min_size_in_bytes,
- size_t max_size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- DCHECK(!is_local_space() && identity() == OLD_SPACE);
- DCHECK_EQ(origin, AllocationOrigin::kRuntime);
- base::MutexGuard lock(&allocation_mutex_);
-
- auto result = TryAllocationFromFreeListBackground(
- min_size_in_bytes, max_size_in_bytes, alignment, origin);
- if (result) return result;
-
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- // Sweeping is still in progress.
- if (collector->sweeping_in_progress()) {
- // First try to refill the free-list, concurrent sweeper threads
- // may have freed some objects in the meantime.
- RefillFreeList();
-
- // Retry the free list allocation.
- auto result = TryAllocationFromFreeListBackground(
- min_size_in_bytes, max_size_in_bytes, alignment, origin);
- if (result) return result;
-
- Sweeper::FreeSpaceMayContainInvalidatedSlots
- invalidated_slots_in_free_space =
- Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
-
- const int kMaxPagesToSweep = 1;
- int max_freed = collector->sweeper()->ParallelSweepSpace(
- identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
- invalidated_slots_in_free_space);
- RefillFreeList();
- if (static_cast<size_t>(max_freed) >= min_size_in_bytes)
- return TryAllocationFromFreeListBackground(
- min_size_in_bytes, max_size_in_bytes, alignment, origin);
- }
-
- if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
- Expand()) {
- DCHECK((CountTotalPages() > 1) ||
- (min_size_in_bytes <= free_list_->Available()));
- return TryAllocationFromFreeListBackground(
- min_size_in_bytes, max_size_in_bytes, alignment, origin);
- }
-
- // TODO(dinfuehr): Complete sweeping here and try allocation again.
-
- return {};
-}
-
-base::Optional<std::pair<Address, size_t>>
-PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
- size_t max_size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
- DCHECK_EQ(identity(), OLD_SPACE);
-
- size_t new_node_size = 0;
- FreeSpace new_node =
- free_list_->Allocate(min_size_in_bytes, &new_node_size, origin);
- if (new_node.is_null()) return {};
- DCHECK_GE(new_node_size, min_size_in_bytes);
-
- // The old-space-step might have finished sweeping and restarted marking.
- // Verify that it did not turn the page of the new node into an evacuation
- // candidate.
- DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
-
- // Memory in the linear allocation area is counted as allocated. We may free
- // a little of this again immediately - see below.
- Page* page = Page::FromHeapObject(new_node);
- IncreaseAllocatedBytes(new_node_size, page);
-
- heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
-
- size_t used_size_in_bytes = Min(new_node_size, max_size_in_bytes);
-
- Address start = new_node.address();
- Address end = new_node.address() + new_node_size;
- Address limit = new_node.address() + used_size_in_bytes;
- DCHECK_LE(limit, end);
- DCHECK_LE(min_size_in_bytes, limit - start);
- if (limit != end) {
- Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
- }
-
- return std::make_pair(start, used_size_in_bytes);
-}
-
-#ifdef DEBUG
-void PagedSpace::Print() {}
-#endif
-
-#ifdef VERIFY_HEAP
-void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
- bool allocation_pointer_found_in_space =
- (allocation_info_.top() == allocation_info_.limit());
- size_t external_space_bytes[kNumTypes];
- size_t external_page_bytes[kNumTypes];
-
- for (int i = 0; i < kNumTypes; i++) {
- external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- for (Page* page : *this) {
-#ifdef V8_SHARED_RO_HEAP
- if (identity() == RO_SPACE) {
- CHECK_NULL(page->owner());
- } else {
- CHECK_EQ(page->owner(), this);
- }
-#else
- CHECK_EQ(page->owner(), this);
-#endif
-
- for (int i = 0; i < kNumTypes; i++) {
- external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
- allocation_pointer_found_in_space = true;
- }
- CHECK(page->SweepingDone());
- PagedSpaceObjectIterator it(isolate->heap(), this, page);
- Address end_of_previous_object = page->area_start();
- Address top = page->area_end();
-
- for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
- CHECK(end_of_previous_object <= object.address());
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map map = object.map();
- CHECK(map.IsMap());
- CHECK(ReadOnlyHeap::Contains(map) ||
- isolate->heap()->map_space()->Contains(map));
-
- // Perform space-specific object verification.
- VerifyObject(object);
-
- // The object itself should look OK.
- object.ObjectVerify(isolate);
-
- if (!FLAG_verify_heap_skip_remembered_set) {
- isolate->heap()->VerifyRememberedSetFor(object);
- }
-
- // All the interior pointers should be contained in the heap.
- int size = object.Size();
- object.IterateBody(map, size, visitor);
- CHECK(object.address() + size <= top);
- end_of_previous_object = object.address() + size;
-
- if (object.IsExternalString()) {
- ExternalString external_string = ExternalString::cast(object);
- size_t size = external_string.ExternalPayloadSize();
- external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
- } else if (object.IsJSArrayBuffer()) {
- JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
- if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size =
- ArrayBufferTracker::Lookup(isolate->heap(), array_buffer)
- ->PerIsolateAccountingLength();
- external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
- }
- }
- }
- for (int i = 0; i < kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
- external_space_bytes[t] += external_page_bytes[t];
- }
- }
- for (int i = 0; i < kNumTypes; i++) {
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
- i == ExternalBackingStoreType::kArrayBuffer)
- continue;
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
- }
- CHECK(allocation_pointer_found_in_space);
-
- if (identity() == OLD_SPACE && V8_ARRAY_BUFFER_EXTENSION_BOOL) {
- size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
- CHECK_EQ(bytes,
- ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
- }
-
-#ifdef DEBUG
- VerifyCountersAfterSweeping(isolate->heap());
-#endif
-}
-
-void PagedSpace::VerifyLiveBytes() {
- DCHECK_NE(identity(), RO_SPACE);
- IncrementalMarking::MarkingState* marking_state =
- heap()->incremental_marking()->marking_state();
- for (Page* page : *this) {
- CHECK(page->SweepingDone());
- PagedSpaceObjectIterator it(heap(), this, page);
- int black_size = 0;
- for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
- // All the interior pointers should be contained in the heap.
- if (marking_state->IsBlack(object)) {
- black_size += object.Size();
- }
- }
- CHECK_LE(black_size, marking_state->live_bytes(page));
- }
-}
-#endif // VERIFY_HEAP
-
-#ifdef DEBUG
-void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
- size_t total_capacity = 0;
- size_t total_allocated = 0;
- for (Page* page : *this) {
- DCHECK(page->SweepingDone());
- total_capacity += page->area_size();
- PagedSpaceObjectIterator it(heap, this, page);
- size_t real_allocated = 0;
- for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
- if (!object.IsFreeSpaceOrFiller()) {
- real_allocated += object.Size();
- }
- }
- total_allocated += page->allocated_bytes();
- // The real size can be smaller than the accounted size if array trimming,
- // object slack tracking happened after sweeping.
- DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
- DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
- }
- DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
- DCHECK_EQ(total_allocated, accounting_stats_.Size());
-}
-
-void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
- // We need to refine the counters on pages that are already swept and have
- // not been moved over to the actual space. Otherwise, the AccountingStats
- // are just an over approximation.
- RefillFreeList();
-
- size_t total_capacity = 0;
- size_t total_allocated = 0;
- auto marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- for (Page* page : *this) {
- size_t page_allocated =
- page->SweepingDone()
- ? page->allocated_bytes()
- : static_cast<size_t>(marking_state->live_bytes(page));
- total_capacity += page->area_size();
- total_allocated += page_allocated;
- DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
- }
- DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
- DCHECK_EQ(total_allocated, accounting_stats_.Size());
-}
-#endif
-
-// -----------------------------------------------------------------------------
-// NewSpace implementation
-
-NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
- size_t initial_semispace_capacity,
- size_t max_semispace_capacity)
- : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
- to_space_(heap, kToSpace),
- from_space_(heap, kFromSpace) {
- DCHECK(initial_semispace_capacity <= max_semispace_capacity);
-
- to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
- from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
- if (!to_space_.Commit()) {
- V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
- }
- DCHECK(!from_space_.is_committed()); // No need to use memory yet.
- ResetLinearAllocationArea();
-}
-
-void NewSpace::TearDown() {
- allocation_info_.Reset(kNullAddress, kNullAddress);
-
- to_space_.TearDown();
- from_space_.TearDown();
-}
-
-void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
-
-
-void NewSpace::Grow() {
- // Double the semispace size but only up to maximum capacity.
- DCHECK(TotalCapacity() < MaximumCapacity());
- size_t new_capacity =
- Min(MaximumCapacity(),
- static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
- if (to_space_.GrowTo(new_capacity)) {
- // Only grow from space if we managed to grow to-space.
- if (!from_space_.GrowTo(new_capacity)) {
- // If we managed to grow to-space but couldn't grow from-space,
- // attempt to shrink to-space.
- if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
- // We are in an inconsistent state because we could not
- // commit/uncommit memory from new space.
- FATAL("inconsistent state");
- }
- }
- }
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-void NewSpace::Shrink() {
- size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
- size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
- if (rounded_new_capacity < TotalCapacity() &&
- to_space_.ShrinkTo(rounded_new_capacity)) {
- // Only shrink from-space if we managed to shrink to-space.
- from_space_.Reset();
- if (!from_space_.ShrinkTo(rounded_new_capacity)) {
- // If we managed to shrink to-space but couldn't shrink from
- // space, attempt to grow to-space again.
- if (!to_space_.GrowTo(from_space_.current_capacity())) {
- // We are in an inconsistent state because we could not
- // commit/uncommit memory from new space.
- FATAL("inconsistent state");
- }
- }
- }
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-bool NewSpace::Rebalance() {
- // Order here is important to make use of the page pool.
- return to_space_.EnsureCurrentCapacity() &&
- from_space_.EnsureCurrentCapacity();
-}
-
-bool SemiSpace::EnsureCurrentCapacity() {
- if (is_committed()) {
- const int expected_pages =
- static_cast<int>(current_capacity_ / Page::kPageSize);
- MemoryChunk* current_page = first_page();
- int actual_pages = 0;
-
- // First iterate through the pages list until expected pages if so many
- // pages exist.
- while (current_page != nullptr && actual_pages < expected_pages) {
- actual_pages++;
- current_page = current_page->list_node().next();
- }
-
- // Free all overallocated pages which are behind current_page.
- while (current_page) {
- MemoryChunk* next_current = current_page->list_node().next();
- memory_chunk_list_.Remove(current_page);
- // Clear new space flags to avoid this page being treated as a new
- // space page that is potentially being swept.
- current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
- current_page);
- current_page = next_current;
- }
-
- // Add more pages if we have less than expected_pages.
- IncrementalMarking::NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- while (actual_pages < expected_pages) {
- actual_pages++;
- current_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
- if (current_page == nullptr) return false;
- DCHECK_NOT_NULL(current_page);
- memory_chunk_list_.PushBack(current_page);
- marking_state->ClearLiveness(current_page);
- current_page->SetFlags(first_page()->GetFlags(),
- static_cast<uintptr_t>(Page::kCopyAllFlags));
- heap()->CreateFillerObjectAt(current_page->area_start(),
- static_cast<int>(current_page->area_size()),
- ClearRecordedSlots::kNo);
- }
- }
- return true;
-}
-
LinearAllocationArea LocalAllocationBuffer::CloseAndMakeIterable() {
if (IsValid()) {
MakeIterable();
@@ -2400,110 +397,6 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
other.allocation_info_.Reset(kNullAddress, kNullAddress);
return *this;
}
-
-void NewSpace::UpdateLinearAllocationArea() {
- // Make sure there is no unaccounted allocations.
- DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
-
- Address new_top = to_space_.page_low();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.Reset(new_top, to_space_.page_high());
- // The order of the following two stores is important.
- // See the corresponding loads in ConcurrentMarking::Run.
- original_limit_.store(limit(), std::memory_order_relaxed);
- original_top_.store(top(), std::memory_order_release);
- StartNextInlineAllocationStep();
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-void NewSpace::ResetLinearAllocationArea() {
- // Do a step to account for memory allocated so far before resetting.
- InlineAllocationStep(top(), top(), kNullAddress, 0);
- to_space_.Reset();
- UpdateLinearAllocationArea();
- // Clear all mark-bits in the to-space.
- IncrementalMarking::NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- for (Page* p : to_space_) {
- marking_state->ClearLiveness(p);
- // Concurrent marking may have local live bytes for this page.
- heap()->concurrent_marking()->ClearMemoryChunkData(p);
- }
-}
-
-void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
- Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
- allocation_info_.set_limit(new_limit);
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
- Address new_limit = ComputeLimit(top(), limit(), min_size);
- DCHECK_LE(new_limit, limit());
- DecreaseLimit(new_limit);
-}
-
-bool NewSpace::AddFreshPage() {
- Address top = allocation_info_.top();
- DCHECK(!OldSpace::IsAtPageStart(top));
-
- // Do a step to account for memory allocated on previous page.
- InlineAllocationStep(top, top, kNullAddress, 0);
-
- if (!to_space_.AdvancePage()) {
- // No more pages left to advance.
- return false;
- }
-
- // Clear remainder of current page.
- Address limit = Page::FromAllocationAreaAddress(top)->area_end();
- int remaining_in_page = static_cast<int>(limit - top);
- heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
- UpdateLinearAllocationArea();
-
- return true;
-}
-
-
-bool NewSpace::AddFreshPageSynchronized() {
- base::MutexGuard guard(&mutex_);
- return AddFreshPage();
-}
-
-
-bool NewSpace::EnsureAllocation(int size_in_bytes,
- AllocationAlignment alignment) {
- Address old_top = allocation_info_.top();
- Address high = to_space_.page_high();
- int filler_size = Heap::GetFillToAlign(old_top, alignment);
- int aligned_size_in_bytes = size_in_bytes + filler_size;
-
- if (old_top + aligned_size_in_bytes > high) {
- // Not enough room in the page, try to allocate a new one.
- if (!AddFreshPage()) {
- return false;
- }
-
- old_top = allocation_info_.top();
- high = to_space_.page_high();
- filler_size = Heap::GetFillToAlign(old_top, alignment);
- }
-
- DCHECK(old_top + aligned_size_in_bytes <= high);
-
- if (allocation_info_.limit() < high) {
- // Either the limit has been lowered because linear allocation was disabled
- // or because incremental marking wants to get a chance to do a step,
- // or because idle scavenge job wants to get a chance to post a task.
- // Set the new limit accordingly.
- Address new_top = old_top + aligned_size_in_bytes;
- Address soon_object = old_top + filler_size;
- InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
- UpdateInlineAllocationLimit(aligned_size_in_bytes);
- }
- return true;
-}
-
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
if (heap()->allocation_step_in_progress()) {
// If we are mid-way through an existing step, don't start a new one.
@@ -2570,1043 +463,6 @@ void SpaceWithLinearArea::InlineAllocationStep(Address top,
}
}
-std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
- return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
-}
-
-#ifdef VERIFY_HEAP
-// We do not use the SemiSpaceObjectIterator because verification doesn't assume
-// that it works (it depends on the invariants we are checking).
-void NewSpace::Verify(Isolate* isolate) {
- // The allocation pointer should be in the space or at the very end.
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- // There should be objects packed in from the low address up to the
- // allocation pointer.
- Address current = to_space_.first_page()->area_start();
- CHECK_EQ(current, to_space_.space_start());
-
- size_t external_space_bytes[kNumTypes];
- for (int i = 0; i < kNumTypes; i++) {
- external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- while (current != top()) {
- if (!Page::IsAlignedToPageSize(current)) {
- // The allocation pointer should not be in the middle of an object.
- CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
- current < top());
-
- HeapObject object = HeapObject::FromAddress(current);
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space or read-only space.
- Map map = object.map();
- CHECK(map.IsMap());
- CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
-
- // The object should not be code or a map.
- CHECK(!object.IsMap());
- CHECK(!object.IsAbstractCode());
-
- // The object itself should look OK.
- object.ObjectVerify(isolate);
-
- // All the interior pointers should be contained in the heap.
- VerifyPointersVisitor visitor(heap());
- int size = object.Size();
- object.IterateBody(map, size, &visitor);
-
- if (object.IsExternalString()) {
- ExternalString external_string = ExternalString::cast(object);
- size_t size = external_string.ExternalPayloadSize();
- external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
- } else if (object.IsJSArrayBuffer()) {
- JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
- if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = ArrayBufferTracker::Lookup(heap(), array_buffer)
- ->PerIsolateAccountingLength();
- external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
- }
- }
-
- current += size;
- } else {
- // At end of page, switch to next page.
- Page* page = Page::FromAllocationAreaAddress(current)->next_page();
- current = page->area_start();
- }
- }
-
- for (int i = 0; i < kNumTypes; i++) {
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
- i == ExternalBackingStoreType::kArrayBuffer)
- continue;
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
- }
-
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
- size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
- CHECK_EQ(bytes,
- ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
- }
-
- // Check semi-spaces.
- CHECK_EQ(from_space_.id(), kFromSpace);
- CHECK_EQ(to_space_.id(), kToSpace);
- from_space_.Verify();
- to_space_.Verify();
-}
-#endif
-
-// -----------------------------------------------------------------------------
-// SemiSpace implementation
-
-void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
- DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
- minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
- current_capacity_ = minimum_capacity_;
- maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
- committed_ = false;
-}
-
-
-void SemiSpace::TearDown() {
- // Properly uncommit memory to keep the allocator counters in sync.
- if (is_committed()) {
- Uncommit();
- }
- current_capacity_ = maximum_capacity_ = 0;
-}
-
-
-bool SemiSpace::Commit() {
- DCHECK(!is_committed());
- const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
- for (int pages_added = 0; pages_added < num_pages; pages_added++) {
- // Pages in the new spaces can be moved to the old space by the full
- // collector. Therefore, they must be initialized with the same FreeList as
- // old pages.
- Page* new_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
- if (new_page == nullptr) {
- if (pages_added) RewindPages(pages_added);
- return false;
- }
- memory_chunk_list_.PushBack(new_page);
- }
- Reset();
- AccountCommitted(current_capacity_);
- if (age_mark_ == kNullAddress) {
- age_mark_ = first_page()->area_start();
- }
- committed_ = true;
- return true;
-}
-
-
-bool SemiSpace::Uncommit() {
- DCHECK(is_committed());
- while (!memory_chunk_list_.Empty()) {
- MemoryChunk* chunk = memory_chunk_list_.front();
- memory_chunk_list_.Remove(chunk);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
- }
- current_page_ = nullptr;
- AccountUncommitted(current_capacity_);
- committed_ = false;
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
- return true;
-}
-
-
-size_t SemiSpace::CommittedPhysicalMemory() {
- if (!is_committed()) return 0;
- size_t size = 0;
- for (Page* p : *this) {
- size += p->CommittedPhysicalMemory();
- }
- return size;
-}
-
-bool SemiSpace::GrowTo(size_t new_capacity) {
- if (!is_committed()) {
- if (!Commit()) return false;
- }
- DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
- DCHECK_LE(new_capacity, maximum_capacity_);
- DCHECK_GT(new_capacity, current_capacity_);
- const size_t delta = new_capacity - current_capacity_;
- DCHECK(IsAligned(delta, AllocatePageSize()));
- const int delta_pages = static_cast<int>(delta / Page::kPageSize);
- DCHECK(last_page());
- IncrementalMarking::NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
- Page* new_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
- if (new_page == nullptr) {
- if (pages_added) RewindPages(pages_added);
- return false;
- }
- memory_chunk_list_.PushBack(new_page);
- marking_state->ClearLiveness(new_page);
- // Duplicate the flags that was set on the old page.
- new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
- }
- AccountCommitted(delta);
- current_capacity_ = new_capacity;
- return true;
-}
-
-void SemiSpace::RewindPages(int num_pages) {
- DCHECK_GT(num_pages, 0);
- DCHECK(last_page());
- while (num_pages > 0) {
- MemoryChunk* last = last_page();
- memory_chunk_list_.Remove(last);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
- num_pages--;
- }
-}
-
-bool SemiSpace::ShrinkTo(size_t new_capacity) {
- DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
- DCHECK_GE(new_capacity, minimum_capacity_);
- DCHECK_LT(new_capacity, current_capacity_);
- if (is_committed()) {
- const size_t delta = current_capacity_ - new_capacity;
- DCHECK(IsAligned(delta, Page::kPageSize));
- int delta_pages = static_cast<int>(delta / Page::kPageSize);
- RewindPages(delta_pages);
- AccountUncommitted(delta);
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
- }
- current_capacity_ = new_capacity;
- return true;
-}
-
-void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
- for (Page* page : *this) {
- page->set_owner(this);
- page->SetFlags(flags, mask);
- if (id_ == kToSpace) {
- page->ClearFlag(MemoryChunk::FROM_PAGE);
- page->SetFlag(MemoryChunk::TO_PAGE);
- page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
- heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
- page, 0);
- } else {
- page->SetFlag(MemoryChunk::FROM_PAGE);
- page->ClearFlag(MemoryChunk::TO_PAGE);
- }
- DCHECK(page->InYoungGeneration());
- }
-}
-
-
-void SemiSpace::Reset() {
- DCHECK(first_page());
- DCHECK(last_page());
- current_page_ = first_page();
- pages_used_ = 0;
-}
-
-void SemiSpace::RemovePage(Page* page) {
- if (current_page_ == page) {
- if (page->prev_page()) {
- current_page_ = page->prev_page();
- }
- }
- memory_chunk_list_.Remove(page);
- for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
- }
-}
-
-void SemiSpace::PrependPage(Page* page) {
- page->SetFlags(current_page()->GetFlags(),
- static_cast<uintptr_t>(Page::kCopyAllFlags));
- page->set_owner(this);
- memory_chunk_list_.PushFront(page);
- pages_used_++;
- for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
- }
-}
-
-void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
- // We won't be swapping semispaces without data in them.
- DCHECK(from->first_page());
- DCHECK(to->first_page());
-
- intptr_t saved_to_space_flags = to->current_page()->GetFlags();
-
- // We swap all properties but id_.
- std::swap(from->current_capacity_, to->current_capacity_);
- std::swap(from->maximum_capacity_, to->maximum_capacity_);
- std::swap(from->minimum_capacity_, to->minimum_capacity_);
- std::swap(from->age_mark_, to->age_mark_);
- std::swap(from->committed_, to->committed_);
- std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
- std::swap(from->current_page_, to->current_page_);
- std::swap(from->external_backing_store_bytes_,
- to->external_backing_store_bytes_);
-
- to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
- from->FixPagesFlags(0, 0);
-}
-
-void SemiSpace::set_age_mark(Address mark) {
- DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
- age_mark_ = mark;
- // Mark all pages up to the one containing mark.
- for (Page* p : PageRange(space_start(), mark)) {
- p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
- }
-}
-
-std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator(Heap* heap) {
- // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
- UNREACHABLE();
-}
-
-#ifdef DEBUG
-void SemiSpace::Print() {}
-#endif
-
-#ifdef VERIFY_HEAP
-void SemiSpace::Verify() {
- bool is_from_space = (id_ == kFromSpace);
- size_t external_backing_store_bytes[kNumTypes];
-
- for (int i = 0; i < kNumTypes; i++) {
- external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- for (Page* page : *this) {
- CHECK_EQ(page->owner(), this);
- CHECK(page->InNewSpace());
- CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
- : MemoryChunk::TO_PAGE));
- CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
- : MemoryChunk::FROM_PAGE));
- CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
- if (!is_from_space) {
- // The pointers-from-here-are-interesting flag isn't updated dynamically
- // on from-space pages, so it might be out of sync with the marking state.
- if (page->heap()->incremental_marking()->IsMarking()) {
- CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
- } else {
- CHECK(
- !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
- }
- }
- for (int i = 0; i < kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
- }
-
- CHECK_IMPLIES(page->list_node().prev(),
- page->list_node().prev()->list_node().next() == page);
- }
- for (int i = 0; i < kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
- }
-}
-#endif
-
-#ifdef DEBUG
-void SemiSpace::AssertValidRange(Address start, Address end) {
- // Addresses belong to same semi-space
- Page* page = Page::FromAllocationAreaAddress(start);
- Page* end_page = Page::FromAllocationAreaAddress(end);
- SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
- DCHECK_EQ(space, end_page->owner());
- // Start address is before end address, either on same page,
- // or end address is on a later page in the linked list of
- // semi-space pages.
- if (page == end_page) {
- DCHECK_LE(start, end);
- } else {
- while (page != end_page) {
- page = page->next_page();
- }
- DCHECK(page);
- }
-}
-#endif
-
-// -----------------------------------------------------------------------------
-// SemiSpaceObjectIterator implementation.
-
-SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) {
- Initialize(space->first_allocatable_address(), space->top());
-}
-
-void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
- SemiSpace::AssertValidRange(start, end);
- current_ = start;
- limit_ = end;
-}
-
-size_t NewSpace::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- size_t size = to_space_.CommittedPhysicalMemory();
- if (from_space_.is_committed()) {
- size += from_space_.CommittedPhysicalMemory();
- }
- return size;
-}
-
-
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces implementation
-
-void FreeListCategory::Reset(FreeList* owner) {
- if (is_linked(owner) && !top().is_null()) {
- owner->DecreaseAvailableBytes(available_);
- }
- set_top(FreeSpace());
- set_prev(nullptr);
- set_next(nullptr);
- available_ = 0;
-}
-
-FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
- size_t* node_size) {
- FreeSpace node = top();
- DCHECK(!node.is_null());
- DCHECK(Page::FromHeapObject(node)->CanAllocate());
- if (static_cast<size_t>(node.Size()) < minimum_size) {
- *node_size = 0;
- return FreeSpace();
- }
- set_top(node.next());
- *node_size = node.Size();
- UpdateCountersAfterAllocation(*node_size);
- return node;
-}
-
-FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
- size_t* node_size) {
- FreeSpace prev_non_evac_node;
- for (FreeSpace cur_node = top(); !cur_node.is_null();
- cur_node = cur_node.next()) {
- DCHECK(Page::FromHeapObject(cur_node)->CanAllocate());
- size_t size = cur_node.size();
- if (size >= minimum_size) {
- DCHECK_GE(available_, size);
- UpdateCountersAfterAllocation(size);
- if (cur_node == top()) {
- set_top(cur_node.next());
- }
- if (!prev_non_evac_node.is_null()) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
- if (chunk->owner_identity() == CODE_SPACE) {
- chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
- }
- prev_non_evac_node.set_next(cur_node.next());
- }
- *node_size = size;
- return cur_node;
- }
-
- prev_non_evac_node = cur_node;
- }
- return FreeSpace();
-}
-
-void FreeListCategory::Free(Address start, size_t size_in_bytes, FreeMode mode,
- FreeList* owner) {
- FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
- free_space.set_next(top());
- set_top(free_space);
- available_ += size_in_bytes;
- if (mode == kLinkCategory) {
- if (is_linked(owner)) {
- owner->IncreaseAvailableBytes(size_in_bytes);
- } else {
- owner->AddCategory(this);
- }
- }
-}
-
-void FreeListCategory::RepairFreeList(Heap* heap) {
- Map free_space_map = ReadOnlyRoots(heap).free_space_map();
- FreeSpace n = top();
- while (!n.is_null()) {
- ObjectSlot map_slot = n.map_slot();
- if (map_slot.contains_value(kNullAddress)) {
- map_slot.store(free_space_map);
- } else {
- DCHECK(map_slot.contains_value(free_space_map.ptr()));
- }
- n = n.next();
- }
-}
-
-void FreeListCategory::Relink(FreeList* owner) {
- DCHECK(!is_linked(owner));
- owner->AddCategory(this);
-}
-
-// ------------------------------------------------
-// Generic FreeList methods (alloc/free related)
-
-FreeList* FreeList::CreateFreeList() {
- switch (FLAG_gc_freelist_strategy) {
- case 0:
- return new FreeListLegacy();
- case 1:
- return new FreeListFastAlloc();
- case 2:
- return new FreeListMany();
- case 3:
- return new FreeListManyCached();
- case 4:
- return new FreeListManyCachedFastPath();
- case 5:
- return new FreeListManyCachedOrigin();
- default:
- FATAL("Invalid FreeList strategy");
- }
-}
-
-FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
- size_t minimum_size, size_t* node_size) {
- FreeListCategory* category = categories_[type];
- if (category == nullptr) return FreeSpace();
- FreeSpace node = category->PickNodeFromList(minimum_size, node_size);
- if (!node.is_null()) {
- DecreaseAvailableBytes(*node_size);
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- }
- if (category->is_empty()) {
- RemoveCategory(category);
- }
- return node;
-}
-
-FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
- size_t minimum_size,
- size_t* node_size) {
- FreeListCategoryIterator it(this, type);
- FreeSpace node;
- while (it.HasNext()) {
- FreeListCategory* current = it.Next();
- node = current->SearchForNodeInList(minimum_size, node_size);
- if (!node.is_null()) {
- DecreaseAvailableBytes(*node_size);
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- if (current->is_empty()) {
- RemoveCategory(current);
- }
- return node;
- }
- }
- return node;
-}
-
-size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
- Page* page = Page::FromAddress(start);
- page->DecreaseAllocatedBytes(size_in_bytes);
-
- // Blocks have to be a minimum size to hold free list items.
- if (size_in_bytes < min_block_size_) {
- page->add_wasted_memory(size_in_bytes);
- wasted_bytes_ += size_in_bytes;
- return size_in_bytes;
- }
-
- // Insert other blocks at the head of a free list of the appropriate
- // magnitude.
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
- DCHECK_EQ(page->AvailableInFreeList(),
- page->AvailableInFreeListFromAllocatedBytes());
- return 0;
-}
-
-// ------------------------------------------------
-// FreeListLegacy implementation
-
-FreeListLegacy::FreeListLegacy() {
- // Initializing base (FreeList) fields
- number_of_categories_ = kHuge + 1;
- last_category_ = kHuge;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-FreeListLegacy::~FreeListLegacy() { delete[] categories_; }
-
-FreeSpace FreeListLegacy::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- FreeSpace node;
- // First try the allocation fast path: try to allocate the minimum element
- // size of a free list category. This operation is constant time.
- FreeListCategoryType type =
- SelectFastAllocationFreeListCategoryType(size_in_bytes);
- for (int i = type; i < kHuge && node.is_null(); i++) {
- node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
- node_size);
- }
-
- if (node.is_null()) {
- // Next search the huge list for free list nodes. This takes linear time in
- // the number of huge elements.
- node = SearchForNodeInList(kHuge, size_in_bytes, node_size);
- }
-
- if (node.is_null() && type != kHuge) {
- // We didn't find anything in the huge list.
- type = SelectFreeListCategoryType(size_in_bytes);
-
- if (type == kTiniest) {
- // For this tiniest object, the tiny list hasn't been searched yet.
- // Now searching the tiny list.
- node = TryFindNodeIn(kTiny, size_in_bytes, node_size);
- }
-
- if (node.is_null()) {
- // Now search the best fitting free list for a node that has at least the
- // requested size.
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- }
- }
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListFastAlloc implementation
-
-FreeListFastAlloc::FreeListFastAlloc() {
- // Initializing base (FreeList) fields
- number_of_categories_ = kHuge + 1;
- last_category_ = kHuge;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-FreeListFastAlloc::~FreeListFastAlloc() { delete[] categories_; }
-
-FreeSpace FreeListFastAlloc::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- FreeSpace node;
- // Try to allocate the biggest element possible (to make the most of later
- // bump-pointer allocations).
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- for (int i = kHuge; i >= type && node.is_null(); i--) {
- node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
- node_size);
- }
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListMany implementation
-
-constexpr unsigned int FreeListMany::categories_min[kNumberOfCategories];
-
-FreeListMany::FreeListMany() {
- // Initializing base (FreeList) fields
- number_of_categories_ = kNumberOfCategories;
- last_category_ = number_of_categories_ - 1;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-FreeListMany::~FreeListMany() { delete[] categories_; }
-
-size_t FreeListMany::GuaranteedAllocatable(size_t maximum_freed) {
- if (maximum_freed < categories_min[0]) {
- return 0;
- }
- for (int cat = kFirstCategory + 1; cat <= last_category_; cat++) {
- if (maximum_freed < categories_min[cat]) {
- return categories_min[cat - 1];
- }
- }
- return maximum_freed;
-}
-
-Page* FreeListMany::GetPageForSize(size_t size_in_bytes) {
- FreeListCategoryType minimum_category =
- SelectFreeListCategoryType(size_in_bytes);
- Page* page = nullptr;
- for (int cat = minimum_category + 1; !page && cat <= last_category_; cat++) {
- page = GetPageForCategoryType(cat);
- }
- if (!page) {
- // Might return a page in which |size_in_bytes| will not fit.
- page = GetPageForCategoryType(minimum_category);
- }
- return page;
-}
-
-FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- FreeSpace node;
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- for (int i = type; i < last_category_ && node.is_null(); i++) {
- node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
- node_size);
- }
-
- if (node.is_null()) {
- // Searching each element of the last category.
- node = SearchForNodeInList(last_category_, size_in_bytes, node_size);
- }
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListManyCached implementation
-
-FreeListManyCached::FreeListManyCached() { ResetCache(); }
-
-void FreeListManyCached::Reset() {
- ResetCache();
- FreeListMany::Reset();
-}
-
-bool FreeListManyCached::AddCategory(FreeListCategory* category) {
- bool was_added = FreeList::AddCategory(category);
-
- // Updating cache
- if (was_added) {
- UpdateCacheAfterAddition(category->type_);
- }
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
-
- return was_added;
-}
-
-void FreeListManyCached::RemoveCategory(FreeListCategory* category) {
- FreeList::RemoveCategory(category);
-
- // Updating cache
- int type = category->type_;
- if (categories_[type] == nullptr) {
- UpdateCacheAfterRemoval(type);
- }
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
-}
-
-size_t FreeListManyCached::Free(Address start, size_t size_in_bytes,
- FreeMode mode) {
- Page* page = Page::FromAddress(start);
- page->DecreaseAllocatedBytes(size_in_bytes);
-
- // Blocks have to be a minimum size to hold free list items.
- if (size_in_bytes < min_block_size_) {
- page->add_wasted_memory(size_in_bytes);
- wasted_bytes_ += size_in_bytes;
- return size_in_bytes;
- }
-
- // Insert other blocks at the head of a free list of the appropriate
- // magnitude.
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
-
- // Updating cache
- if (mode == kLinkCategory) {
- UpdateCacheAfterAddition(type);
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
- }
-
- DCHECK_EQ(page->AvailableInFreeList(),
- page->AvailableInFreeListFromAllocatedBytes());
- return 0;
-}
-
-FreeSpace FreeListManyCached::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- USE(origin);
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
-
- FreeSpace node;
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- type = next_nonempty_category[type];
- for (; type < last_category_; type = next_nonempty_category[type + 1]) {
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- if (!node.is_null()) break;
- }
-
- if (node.is_null()) {
- // Searching each element of the last category.
- type = last_category_;
- node = SearchForNodeInList(type, size_in_bytes, node_size);
- }
-
- // Updating cache
- if (!node.is_null() && categories_[type] == nullptr) {
- UpdateCacheAfterRemoval(type);
- }
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListManyCachedFastPath implementation
-
-FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) {
- USE(origin);
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- FreeSpace node;
-
- // Fast path part 1: searching the last categories
- FreeListCategoryType first_category =
- SelectFastAllocationFreeListCategoryType(size_in_bytes);
- FreeListCategoryType type = first_category;
- for (type = next_nonempty_category[type]; type <= last_category_;
- type = next_nonempty_category[type + 1]) {
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- if (!node.is_null()) break;
- }
-
- // Fast path part 2: searching the medium categories for tiny objects
- if (node.is_null()) {
- if (size_in_bytes <= kTinyObjectMaxSize) {
- for (type = next_nonempty_category[kFastPathFallBackTiny];
- type < kFastPathFirstCategory;
- type = next_nonempty_category[type + 1]) {
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- if (!node.is_null()) break;
- }
- }
- }
-
- // Searching the last category
- if (node.is_null()) {
- // Searching each element of the last category.
- type = last_category_;
- node = SearchForNodeInList(type, size_in_bytes, node_size);
- }
-
- // Finally, search the most precise category
- if (node.is_null()) {
- type = SelectFreeListCategoryType(size_in_bytes);
- for (type = next_nonempty_category[type]; type < first_category;
- type = next_nonempty_category[type + 1]) {
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- if (!node.is_null()) break;
- }
- }
-
- // Updating cache
- if (!node.is_null() && categories_[type] == nullptr) {
- UpdateCacheAfterRemoval(type);
- }
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListManyCachedOrigin implementation
-
-FreeSpace FreeListManyCachedOrigin::Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) {
- if (origin == AllocationOrigin::kGC) {
- return FreeListManyCached::Allocate(size_in_bytes, node_size, origin);
- } else {
- return FreeListManyCachedFastPath::Allocate(size_in_bytes, node_size,
- origin);
- }
-}
-
-// ------------------------------------------------
-// FreeListMap implementation
-
-FreeListMap::FreeListMap() {
- // Initializing base (FreeList) fields
- number_of_categories_ = 1;
- last_category_ = kOnlyCategory;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-size_t FreeListMap::GuaranteedAllocatable(size_t maximum_freed) {
- return maximum_freed;
-}
-
-Page* FreeListMap::GetPageForSize(size_t size_in_bytes) {
- return GetPageForCategoryType(kOnlyCategory);
-}
-
-FreeListMap::~FreeListMap() { delete[] categories_; }
-
-FreeSpace FreeListMap::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
-
- // The following DCHECK ensures that maps are allocated one by one (ie,
- // without folding). This assumption currently holds. However, if it were to
- // become untrue in the future, you'll get an error here. To fix it, I would
- // suggest removing the DCHECK, and replacing TryFindNodeIn by
- // SearchForNodeInList below.
- DCHECK_EQ(size_in_bytes, Map::kSize);
-
- FreeSpace node = TryFindNodeIn(kOnlyCategory, size_in_bytes, node_size);
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK_IMPLIES(node.is_null(), IsEmpty());
- return node;
-}
-
-// ------------------------------------------------
-// Generic FreeList methods (non alloc/free related)
-
-void FreeList::Reset() {
- ForAllFreeListCategories(
- [this](FreeListCategory* category) { category->Reset(this); });
- for (int i = kFirstCategory; i < number_of_categories_; i++) {
- categories_[i] = nullptr;
- }
- wasted_bytes_ = 0;
- available_ = 0;
-}
-
-size_t FreeList::EvictFreeListItems(Page* page) {
- size_t sum = 0;
- page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
- sum += category->available();
- RemoveCategory(category);
- category->Reset(this);
- });
- return sum;
-}
-
-void FreeList::RepairLists(Heap* heap) {
- ForAllFreeListCategories(
- [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
-}
-
-bool FreeList::AddCategory(FreeListCategory* category) {
- FreeListCategoryType type = category->type_;
- DCHECK_LT(type, number_of_categories_);
- FreeListCategory* top = categories_[type];
-
- if (category->is_empty()) return false;
- DCHECK_NE(top, category);
-
- // Common double-linked list insertion.
- if (top != nullptr) {
- top->set_prev(category);
- }
- category->set_next(top);
- categories_[type] = category;
-
- IncreaseAvailableBytes(category->available());
- return true;
-}
-
-void FreeList::RemoveCategory(FreeListCategory* category) {
- FreeListCategoryType type = category->type_;
- DCHECK_LT(type, number_of_categories_);
- FreeListCategory* top = categories_[type];
-
- if (category->is_linked(this)) {
- DecreaseAvailableBytes(category->available());
- }
-
- // Common double-linked list removal.
- if (top == category) {
- categories_[type] = category->next();
- }
- if (category->prev() != nullptr) {
- category->prev()->set_next(category->next());
- }
- if (category->next() != nullptr) {
- category->next()->set_prev(category->prev());
- }
- category->set_next(nullptr);
- category->set_prev(nullptr);
-}
-
-void FreeList::PrintCategories(FreeListCategoryType type) {
- FreeListCategoryIterator it(this, type);
- PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
- static_cast<void*>(categories_[type]), type);
- while (it.HasNext()) {
- FreeListCategory* current = it.Next();
- PrintF("%p -> ", static_cast<void*>(current));
- }
- PrintF("null\n");
-}
int MemoryChunk::FreeListsLength() {
int length = 0;
@@ -3619,250 +475,5 @@ int MemoryChunk::FreeListsLength() {
return length;
}
-size_t FreeListCategory::SumFreeList() {
- size_t sum = 0;
- FreeSpace cur = top();
- while (!cur.is_null()) {
- // We can't use "cur->map()" here because both cur's map and the
- // root can be null during bootstrapping.
- DCHECK(cur.map_slot().contains_value(Page::FromHeapObject(cur)
- ->heap()
- ->isolate()
- ->root(RootIndex::kFreeSpaceMap)
- .ptr()));
- sum += cur.relaxed_read_size();
- cur = cur.next();
- }
- return sum;
-}
-int FreeListCategory::FreeListLength() {
- int length = 0;
- FreeSpace cur = top();
- while (!cur.is_null()) {
- length++;
- cur = cur.next();
- }
- return length;
-}
-
-#ifdef DEBUG
-bool FreeList::IsVeryLong() {
- int len = 0;
- for (int i = kFirstCategory; i < number_of_categories_; i++) {
- FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
- while (it.HasNext()) {
- len += it.Next()->FreeListLength();
- if (len >= FreeListCategory::kVeryLongFreeList) return true;
- }
- }
- return false;
-}
-
-
-// This can take a very long time because it is linear in the number of entries
-// on the free list, so it should not be called if FreeListLength returns
-// kVeryLongFreeList.
-size_t FreeList::SumFreeLists() {
- size_t sum = 0;
- ForAllFreeListCategories(
- [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
- return sum;
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// OldSpace implementation
-
-void PagedSpace::PrepareForMarkCompact() {
- // We don't have a linear allocation area while sweeping. It will be restored
- // on the first allocation after the sweep.
- FreeLinearAllocationArea();
-
- // Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_->Reset();
-}
-
-size_t PagedSpace::SizeOfObjects() {
- CHECK_GE(limit(), top());
- DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
- return Size() - (limit() - top());
-}
-
-bool PagedSpace::EnsureSweptAndRetryAllocation(int size_in_bytes,
- AllocationOrigin origin) {
- DCHECK(!is_local_space());
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->sweeping_in_progress()) {
- // Wait for the sweeper threads here and complete the sweeping phase.
- collector->EnsureSweepingCompleted();
-
- // After waiting for the sweeper threads, there may be new free-list
- // entries.
- return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
- }
- return false;
-}
-
-bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- VMState<GC> state(heap()->isolate());
- RuntimeCallTimerScope runtime_timer(
- heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
- base::Optional<base::MutexGuard> optional_mutex;
-
- if (FLAG_concurrent_allocation && origin != AllocationOrigin::kGC &&
- identity() == OLD_SPACE) {
- optional_mutex.emplace(&allocation_mutex_);
- }
-
- return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
-}
-
-bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
-}
-
-bool OffThreadSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
- return true;
-
- if (Expand()) {
- DCHECK((CountTotalPages() > 1) ||
- (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
- return RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes), origin);
- }
-
- return false;
-}
-
-bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- // Non-compaction local spaces are not supported.
- DCHECK_IMPLIES(is_local_space(), is_compaction_space());
-
- // Allocation in this space has failed.
- DCHECK_GE(size_in_bytes, 0);
- const int kMaxPagesToSweep = 1;
-
- if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
- return true;
-
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- // Sweeping is still in progress.
- if (collector->sweeping_in_progress()) {
- if (FLAG_concurrent_sweeping && !is_compaction_space() &&
- !collector->sweeper()->AreSweeperTasksRunning()) {
- collector->EnsureSweepingCompleted();
- }
-
- // First try to refill the free-list, concurrent sweeper threads
- // may have freed some objects in the meantime.
- RefillFreeList();
-
- // Retry the free list allocation.
- if (RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes), origin))
- return true;
-
- if (SweepAndRetryAllocation(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
- origin))
- return true;
- }
-
- if (is_compaction_space()) {
- // The main thread may have acquired all swept pages. Try to steal from
- // it. This can only happen during young generation evacuation.
- PagedSpace* main_space = heap()->paged_space(identity());
- Page* page = main_space->RemovePageSafe(size_in_bytes);
- if (page != nullptr) {
- AddPage(page);
- if (RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes), origin))
- return true;
- }
- }
-
- if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
- DCHECK((CountTotalPages() > 1) ||
- (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
- return RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes), origin);
- }
-
- if (is_compaction_space()) {
- return SweepAndRetryAllocation(0, 0, size_in_bytes, origin);
-
- } else {
- // If sweeper threads are active, wait for them at that point and steal
- // elements from their free-lists. Allocation may still fail here which
- // would indicate that there is not enough memory for the given allocation.
- return EnsureSweptAndRetryAllocation(size_in_bytes, origin);
- }
-}
-
-bool PagedSpace::SweepAndRetryAllocation(int required_freed_bytes,
- int max_pages, int size_in_bytes,
- AllocationOrigin origin) {
- // Cleanup invalidated old-to-new refs for compaction space in the
- // final atomic pause.
- Sweeper::FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
- is_compaction_space() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
- : Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
-
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->sweeping_in_progress()) {
- int max_freed = collector->sweeper()->ParallelSweepSpace(
- identity(), required_freed_bytes, max_pages,
- invalidated_slots_in_free_space);
- RefillFreeList();
- if (max_freed >= size_in_bytes)
- return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
- }
- return false;
-}
-
-// -----------------------------------------------------------------------------
-// MapSpace implementation
-
-// TODO(dmercadier): use a heap instead of sorting like that.
-// Using a heap will have multiple benefits:
-// - for now, SortFreeList is only called after sweeping, which is somewhat
-// late. Using a heap, sorting could be done online: FreeListCategories would
-// be inserted in a heap (ie, in a sorted manner).
-// - SortFreeList is a bit fragile: any change to FreeListMap (or to
-// MapSpace::free_list_) could break it.
-void MapSpace::SortFreeList() {
- using LiveBytesPagePair = std::pair<size_t, Page*>;
- std::vector<LiveBytesPagePair> pages;
- pages.reserve(CountTotalPages());
-
- for (Page* p : *this) {
- free_list()->RemoveCategory(p->free_list_category(kFirstCategory));
- pages.push_back(std::make_pair(p->allocated_bytes(), p));
- }
-
- // Sorting by least-allocated-bytes first.
- std::sort(pages.begin(), pages.end(),
- [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
- return a.first < b.first;
- });
-
- for (LiveBytesPagePair const& p : pages) {
- // Since AddCategory inserts in head position, it reverts the order produced
- // by the sort above: least-allocated-bytes will be Added first, and will
- // therefore be the last element (and the first one will be
- // most-allocated-bytes).
- free_list()->AddCategory(p.second->free_list_category(kFirstCategory));
- }
-}
-
-#ifdef VERIFY_HEAP
-void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
-#endif
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/spaces.h b/chromium/v8/src/heap/spaces.h
index 72ae96cadd2..31fb4b22f5b 100644
--- a/chromium/v8/src/heap/spaces.h
+++ b/chromium/v8/src/heap/spaces.h
@@ -6,34 +6,19 @@
#define V8_HEAP_SPACES_H_
#include <atomic>
-#include <list>
-#include <map>
#include <memory>
-#include <unordered_map>
-#include <unordered_set>
#include <vector>
-#include "src/base/atomic-utils.h"
-#include "src/base/bounded-page-allocator.h"
-#include "src/base/export-template.h"
#include "src/base/iterator.h"
#include "src/base/macros.h"
-#include "src/base/optional.h"
-#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
-#include "src/flags/flags.h"
+#include "src/heap/base-space.h"
#include "src/heap/basic-memory-chunk.h"
+#include "src/heap/free-list.h"
#include "src/heap/heap.h"
-#include "src/heap/invalidated-slots.h"
#include "src/heap/list.h"
-#include "src/heap/marking.h"
#include "src/heap/memory-chunk.h"
-#include "src/heap/slot-set.h"
-#include "src/objects/free-space.h"
-#include "src/objects/heap-object.h"
-#include "src/objects/map.h"
#include "src/objects/objects.h"
-#include "src/tasks/cancelable-task.h"
#include "src/utils/allocation.h"
#include "src/utils/utils.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
@@ -47,26 +32,15 @@ class TestCodePageAllocatorScope;
} // namespace heap
class AllocationObserver;
-class CompactionSpace;
-class CompactionSpaceCollection;
class FreeList;
class Isolate;
class LargeObjectSpace;
class LargePage;
class LinearAllocationArea;
class LocalArrayBufferTracker;
-class LocalSpace;
-class MemoryAllocator;
-class MemoryChunk;
-class MemoryChunkLayout;
-class OffThreadSpace;
class Page;
class PagedSpace;
class SemiSpace;
-class SlotsBuffer;
-class SlotSet;
-class TypedSlotSet;
-class Space;
// -----------------------------------------------------------------------------
// Heap structures:
@@ -130,272 +104,14 @@ class Space;
#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
DCHECK((0 < size) && (size <= code_space->AreaSize()))
-using FreeListCategoryType = int32_t;
-
-static const FreeListCategoryType kFirstCategory = 0;
-static const FreeListCategoryType kInvalidCategory = -1;
-
-enum FreeMode { kLinkCategory, kDoNotLinkCategory };
-
-enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
-
-// A free list category maintains a linked list of free memory blocks.
-class FreeListCategory {
- public:
- void Initialize(FreeListCategoryType type) {
- type_ = type;
- available_ = 0;
- prev_ = nullptr;
- next_ = nullptr;
- }
-
- void Reset(FreeList* owner);
-
- void RepairFreeList(Heap* heap);
-
- // Relinks the category into the currently owning free list. Requires that the
- // category is currently unlinked.
- void Relink(FreeList* owner);
-
- void Free(Address address, size_t size_in_bytes, FreeMode mode,
- FreeList* owner);
-
- // Performs a single try to pick a node of at least |minimum_size| from the
- // category. Stores the actual size in |node_size|. Returns nullptr if no
- // node is found.
- FreeSpace PickNodeFromList(size_t minimum_size, size_t* node_size);
-
- // Picks a node of at least |minimum_size| from the category. Stores the
- // actual size in |node_size|. Returns nullptr if no node is found.
- FreeSpace SearchForNodeInList(size_t minimum_size, size_t* node_size);
-
- inline bool is_linked(FreeList* owner) const;
- bool is_empty() { return top().is_null(); }
- uint32_t available() const { return available_; }
-
- size_t SumFreeList();
- int FreeListLength();
-
- private:
- // For debug builds we accurately compute free lists lengths up until
- // {kVeryLongFreeList} by manually walking the list.
- static const int kVeryLongFreeList = 500;
-
- // Updates |available_|, |length_| and free_list_->Available() after an
- // allocation of size |allocation_size|.
- inline void UpdateCountersAfterAllocation(size_t allocation_size);
-
- FreeSpace top() { return top_; }
- void set_top(FreeSpace top) { top_ = top; }
- FreeListCategory* prev() { return prev_; }
- void set_prev(FreeListCategory* prev) { prev_ = prev; }
- FreeListCategory* next() { return next_; }
- void set_next(FreeListCategory* next) { next_ = next; }
-
- // |type_|: The type of this free list category.
- FreeListCategoryType type_ = kInvalidCategory;
-
- // |available_|: Total available bytes in all blocks of this free list
- // category.
- uint32_t available_ = 0;
-
- // |top_|: Points to the top FreeSpace in the free list category.
- FreeSpace top_;
-
- FreeListCategory* prev_ = nullptr;
- FreeListCategory* next_ = nullptr;
-
- friend class FreeList;
- friend class FreeListManyCached;
- friend class PagedSpace;
- friend class MapSpace;
-};
-
-// A free list maintains free blocks of memory. The free list is organized in
-// a way to encourage objects allocated around the same time to be near each
-// other. The normal way to allocate is intended to be by bumping a 'top'
-// pointer until it hits a 'limit' pointer. When the limit is hit we need to
-// find a new space to allocate from. This is done with the free list, which is
-// divided up into rough categories to cut down on waste. Having finer
-// categories would scatter allocation more.
-class FreeList {
- public:
- // Creates a Freelist of the default class (FreeListLegacy for now).
- V8_EXPORT_PRIVATE static FreeList* CreateFreeList();
-
- virtual ~FreeList() = default;
-
- // Returns how much memory can be allocated after freeing maximum_freed
- // memory.
- virtual size_t GuaranteedAllocatable(size_t maximum_freed) = 0;
-
- // Adds a node on the free list. The block of size {size_in_bytes} starting
- // at {start} is placed on the free list. The return value is the number of
- // bytes that were not added to the free list, because the freed memory block
- // was too small. Bookkeeping information will be written to the block, i.e.,
- // its contents will be destroyed. The start address should be word aligned,
- // and the size should be a non-zero multiple of the word size.
- virtual size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
-
- // Allocates a free space node frome the free list of at least size_in_bytes
- // bytes. Returns the actual node size in node_size which can be bigger than
- // size_in_bytes. This method returns null if the allocation request cannot be
- // handled by the free list.
- virtual V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) = 0;
-
- // Returns a page containing an entry for a given type, or nullptr otherwise.
- V8_EXPORT_PRIVATE virtual Page* GetPageForSize(size_t size_in_bytes) = 0;
-
- virtual void Reset();
-
- // Return the number of bytes available on the free list.
- size_t Available() {
- DCHECK(available_ == SumFreeLists());
- return available_;
- }
-
- // Update number of available bytes on the Freelists.
- void IncreaseAvailableBytes(size_t bytes) { available_ += bytes; }
- void DecreaseAvailableBytes(size_t bytes) { available_ -= bytes; }
-
- bool IsEmpty() {
- bool empty = true;
- ForAllFreeListCategories([&empty](FreeListCategory* category) {
- if (!category->is_empty()) empty = false;
- });
- return empty;
- }
-
- // Used after booting the VM.
- void RepairLists(Heap* heap);
-
- V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
-
- int number_of_categories() { return number_of_categories_; }
- FreeListCategoryType last_category() { return last_category_; }
-
- size_t wasted_bytes() { return wasted_bytes_; }
-
- template <typename Callback>
- void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
- FreeListCategory* current = categories_[type];
- while (current != nullptr) {
- FreeListCategory* next = current->next();
- callback(current);
- current = next;
- }
- }
-
- template <typename Callback>
- void ForAllFreeListCategories(Callback callback) {
- for (int i = kFirstCategory; i < number_of_categories(); i++) {
- ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
- }
- }
-
- virtual bool AddCategory(FreeListCategory* category);
- virtual V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
- void PrintCategories(FreeListCategoryType type);
-
- protected:
- class FreeListCategoryIterator final {
- public:
- FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
- : current_(free_list->categories_[type]) {}
-
- bool HasNext() const { return current_ != nullptr; }
-
- FreeListCategory* Next() {
- DCHECK(HasNext());
- FreeListCategory* tmp = current_;
- current_ = current_->next();
- return tmp;
- }
-
- private:
- FreeListCategory* current_;
- };
-
-#ifdef DEBUG
- V8_EXPORT_PRIVATE size_t SumFreeLists();
- bool IsVeryLong();
-#endif
-
- // Tries to retrieve a node from the first category in a given |type|.
- // Returns nullptr if the category is empty or the top entry is smaller
- // than minimum_size.
- FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size);
-
- // Searches a given |type| for a node of at least |minimum_size|.
- FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size);
-
- // Returns the smallest category in which an object of |size_in_bytes| could
- // fit.
- virtual FreeListCategoryType SelectFreeListCategoryType(
- size_t size_in_bytes) = 0;
-
- FreeListCategory* top(FreeListCategoryType type) const {
- return categories_[type];
- }
-
- inline Page* GetPageForCategoryType(FreeListCategoryType type);
-
- int number_of_categories_ = 0;
- FreeListCategoryType last_category_ = 0;
- size_t min_block_size_ = 0;
-
- std::atomic<size_t> wasted_bytes_{0};
- FreeListCategory** categories_ = nullptr;
-
- // |available_|: The number of bytes in this freelist.
- size_t available_ = 0;
-
- friend class FreeListCategory;
- friend class Page;
- friend class MemoryChunk;
- friend class ReadOnlyPage;
- friend class MapSpace;
-};
-
-// FreeList used for spaces that don't have freelists
-// (only the LargeObject space for now).
-class NoFreeList final : public FreeList {
- public:
- size_t GuaranteedAllocatable(size_t maximum_freed) final {
- FATAL("NoFreeList can't be used as a standard FreeList. ");
- }
- size_t Free(Address start, size_t size_in_bytes, FreeMode mode) final {
- FATAL("NoFreeList can't be used as a standard FreeList.");
- }
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) final {
- FATAL("NoFreeList can't be used as a standard FreeList.");
- }
- Page* GetPageForSize(size_t size_in_bytes) final {
- FATAL("NoFreeList can't be used as a standard FreeList.");
- }
-
- private:
- FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) final {
- FATAL("NoFreeList can't be used as a standard FreeList.");
- }
-};
-
// ----------------------------------------------------------------------------
-// Space is the abstract superclass for all allocation spaces.
-class V8_EXPORT_PRIVATE Space : public Malloced {
+// Space is the abstract superclass for all allocation spaces that are not
+// sealed after startup (i.e. not ReadOnlySpace).
+class V8_EXPORT_PRIVATE Space : public BaseSpace {
public:
Space(Heap* heap, AllocationSpace id, FreeList* free_list)
- : allocation_observers_paused_(false),
- heap_(heap),
- id_(id),
- committed_(0),
- max_committed_(0),
+ : BaseSpace(heap, id),
+ allocation_observers_paused_(false),
free_list_(std::unique_ptr<FreeList>(free_list)) {
external_backing_store_bytes_ =
new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
@@ -407,22 +123,11 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
static inline void MoveExternalBackingStoreBytes(
ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
- virtual ~Space() {
+ ~Space() override {
delete[] external_backing_store_bytes_;
external_backing_store_bytes_ = nullptr;
}
- Heap* heap() const {
- DCHECK_NOT_NULL(heap_);
- return heap_;
- }
-
- bool IsDetached() const { return heap_ == nullptr; }
-
- AllocationSpace identity() { return id_; }
-
- const char* name() { return Heap::GetSpaceName(id_); }
-
virtual void AddAllocationObserver(AllocationObserver* observer);
virtual void RemoveAllocationObserver(AllocationObserver* observer);
@@ -440,22 +145,10 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
// single allocation-folding group.
void AllocationStepAfterMerge(Address first_object_in_chunk, int size);
- // Return the total amount committed memory for this space, i.e., allocatable
- // memory and page headers.
- virtual size_t CommittedMemory() { return committed_; }
-
- virtual size_t MaximumCommittedMemory() { return max_committed_; }
-
- // Returns allocated size.
- virtual size_t Size() = 0;
-
// Returns size of objects. Can differ from the allocated size
// (e.g. see OldLargeObjectSpace).
virtual size_t SizeOfObjects() { return Size(); }
- // Approximate amount of physical memory committed for this space.
- virtual size_t CommittedPhysicalMemory() = 0;
-
// Return the available bytes without growing.
virtual size_t Available() = 0;
@@ -469,19 +162,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
virtual std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) = 0;
- void AccountCommitted(size_t bytes) {
- DCHECK_GE(committed_ + bytes, committed_);
- committed_ += bytes;
- if (committed_ > max_committed_) {
- max_committed_ = committed_;
- }
- }
-
- void AccountUncommitted(size_t bytes) {
- DCHECK_GE(committed_, committed_ - bytes);
- committed_ -= bytes;
- }
-
inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount);
@@ -494,15 +174,18 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
return external_backing_store_bytes_[type];
}
- void* GetRandomMmapAddr();
-
MemoryChunk* first_page() { return memory_chunk_list_.front(); }
MemoryChunk* last_page() { return memory_chunk_list_.back(); }
+ const MemoryChunk* first_page() const { return memory_chunk_list_.front(); }
+ const MemoryChunk* last_page() const { return memory_chunk_list_.back(); }
+
heap::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
FreeList* free_list() { return free_list_.get(); }
+ Address FirstPageAddress() const { return first_page()->address(); }
+
#ifdef DEBUG
virtual void Print() = 0;
#endif
@@ -513,8 +196,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
return !allocation_observers_paused_ && !allocation_observers_.empty();
}
- void DetachFromHeap() { heap_ = nullptr; }
-
std::vector<AllocationObserver*> allocation_observers_;
// The List manages the pages that belong to the given space.
@@ -524,36 +205,12 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
std::atomic<size_t>* external_backing_store_bytes_;
bool allocation_observers_paused_;
- Heap* heap_;
- AllocationSpace id_;
-
- // Keeps track of committed memory in a space.
- std::atomic<size_t> committed_;
- size_t max_committed_;
std::unique_ptr<FreeList> free_list_;
DISALLOW_COPY_AND_ASSIGN(Space);
};
-// The CodeObjectRegistry holds all start addresses of code objects of a given
-// MemoryChunk. Each MemoryChunk owns a separate CodeObjectRegistry. The
-// CodeObjectRegistry allows fast lookup from an inner pointer of a code object
-// to the actual code object.
-class V8_EXPORT_PRIVATE CodeObjectRegistry {
- public:
- void RegisterNewlyAllocatedCodeObject(Address code);
- void RegisterAlreadyExistingCodeObject(Address code);
- void Clear();
- void Finalize();
- bool Contains(Address code) const;
- Address GetCodeObjectStartFromInnerAddress(Address address) const;
-
- private:
- std::vector<Address> code_object_registry_already_existing_;
- std::set<Address> code_object_registry_newly_allocated_;
-};
-
STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// -----------------------------------------------------------------------------
@@ -609,6 +266,13 @@ class Page : public MemoryChunk {
Page* next_page() { return static_cast<Page*>(list_node_.next()); }
Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
+ const Page* next_page() const {
+ return static_cast<const Page*>(list_node_.next());
+ }
+ const Page* prev_page() const {
+ return static_cast<const Page*>(list_node_.prev());
+ }
+
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
for (int i = kFirstCategory;
@@ -617,17 +281,6 @@ class Page : public MemoryChunk {
}
}
- // Returns the offset of a given address to this page.
- inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
-
- // Returns the address for a given offset to the this page.
- Address OffsetToAddress(size_t offset) {
- Address address_in_page = address() + offset;
- DCHECK_GE(address_in_page, area_start());
- DCHECK_LT(address_in_page, area_end());
- return address_in_page;
- }
-
void AllocateLocalTracker();
inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
bool contains_array_buffers();
@@ -643,25 +296,12 @@ class Page : public MemoryChunk {
return categories_[type];
}
- size_t wasted_memory() { return wasted_memory_; }
- void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
- size_t allocated_bytes() { return allocated_bytes_; }
- void IncreaseAllocatedBytes(size_t bytes) {
- DCHECK_LE(bytes, area_size());
- allocated_bytes_ += bytes;
- }
- void DecreaseAllocatedBytes(size_t bytes) {
- DCHECK_LE(bytes, area_size());
- DCHECK_GE(allocated_bytes(), bytes);
- allocated_bytes_ -= bytes;
- }
-
- void ResetAllocationStatistics();
-
size_t ShrinkToHighWaterMark();
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
+ V8_EXPORT_PRIVATE void CreateBlackAreaBackground(Address start, Address end);
void DestroyBlackArea(Address start, Address end);
+ void DestroyBlackAreaBackground(Address start, Address end);
void InitializeFreeListCategories();
void AllocateFreeListCategories();
@@ -679,403 +319,6 @@ STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
-// The process-wide singleton that keeps track of code range regions with the
-// intention to reuse free code range regions as a workaround for CFG memory
-// leaks (see crbug.com/870054).
-class CodeRangeAddressHint {
- public:
- // Returns the most recently freed code range start address for the given
- // size. If there is no such entry, then a random address is returned.
- V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
-
- V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
- size_t code_range_size);
-
- private:
- base::Mutex mutex_;
- // A map from code range size to an array of recently freed code range
- // addresses. There should be O(1) different code range sizes.
- // The length of each array is limited by the peak number of code ranges,
- // which should be also O(1).
- std::unordered_map<size_t, std::vector<Address>> recently_freed_;
-};
-
-// ----------------------------------------------------------------------------
-// A space acquires chunks of memory from the operating system. The memory
-// allocator allocates and deallocates pages for the paged heap spaces and large
-// pages for large object space.
-class MemoryAllocator {
- public:
- // Unmapper takes care of concurrently unmapping and uncommitting memory
- // chunks.
- class Unmapper {
- public:
- class UnmapFreeMemoryTask;
-
- Unmapper(Heap* heap, MemoryAllocator* allocator)
- : heap_(heap),
- allocator_(allocator),
- pending_unmapping_tasks_semaphore_(0),
- pending_unmapping_tasks_(0),
- active_unmapping_tasks_(0) {
- chunks_[kRegular].reserve(kReservedQueueingSlots);
- chunks_[kPooled].reserve(kReservedQueueingSlots);
- }
-
- void AddMemoryChunkSafe(MemoryChunk* chunk) {
- if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
- AddMemoryChunkSafe<kRegular>(chunk);
- } else {
- AddMemoryChunkSafe<kNonRegular>(chunk);
- }
- }
-
- MemoryChunk* TryGetPooledMemoryChunkSafe() {
- // Procedure:
- // (1) Try to get a chunk that was declared as pooled and already has
- // been uncommitted.
- // (2) Try to steal any memory chunk of kPageSize that would've been
- // unmapped.
- MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
- if (chunk == nullptr) {
- chunk = GetMemoryChunkSafe<kRegular>();
- if (chunk != nullptr) {
- // For stolen chunks we need to manually free any allocated memory.
- chunk->ReleaseAllAllocatedMemory();
- }
- }
- return chunk;
- }
-
- V8_EXPORT_PRIVATE void FreeQueuedChunks();
- void CancelAndWaitForPendingTasks();
- void PrepareForGC();
- V8_EXPORT_PRIVATE void EnsureUnmappingCompleted();
- V8_EXPORT_PRIVATE void TearDown();
- size_t NumberOfCommittedChunks();
- V8_EXPORT_PRIVATE int NumberOfChunks();
- size_t CommittedBufferedMemory();
-
- private:
- static const int kReservedQueueingSlots = 64;
- static const int kMaxUnmapperTasks = 4;
-
- enum ChunkQueueType {
- kRegular, // Pages of kPageSize that do not live in a CodeRange and
- // can thus be used for stealing.
- kNonRegular, // Large chunks and executable chunks.
- kPooled, // Pooled chunks, already uncommited and ready for reuse.
- kNumberOfChunkQueues,
- };
-
- enum class FreeMode {
- kUncommitPooled,
- kReleasePooled,
- };
-
- template <ChunkQueueType type>
- void AddMemoryChunkSafe(MemoryChunk* chunk) {
- base::MutexGuard guard(&mutex_);
- chunks_[type].push_back(chunk);
- }
-
- template <ChunkQueueType type>
- MemoryChunk* GetMemoryChunkSafe() {
- base::MutexGuard guard(&mutex_);
- if (chunks_[type].empty()) return nullptr;
- MemoryChunk* chunk = chunks_[type].back();
- chunks_[type].pop_back();
- return chunk;
- }
-
- bool MakeRoomForNewTasks();
-
- template <FreeMode mode>
- void PerformFreeMemoryOnQueuedChunks();
-
- void PerformFreeMemoryOnQueuedNonRegularChunks();
-
- Heap* const heap_;
- MemoryAllocator* const allocator_;
- base::Mutex mutex_;
- std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
- CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
- base::Semaphore pending_unmapping_tasks_semaphore_;
- intptr_t pending_unmapping_tasks_;
- std::atomic<intptr_t> active_unmapping_tasks_;
-
- friend class MemoryAllocator;
- };
-
- enum AllocationMode {
- kRegular,
- kPooled,
- };
-
- enum FreeMode {
- kFull,
- kAlreadyPooled,
- kPreFreeAndQueue,
- kPooledAndQueue,
- };
-
- V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize();
-
- // Computes the memory area of discardable memory within a given memory area
- // [addr, addr+size) and returns the result as base::AddressRegion. If the
- // memory is not discardable base::AddressRegion is an empty region.
- V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
- Address addr, size_t size);
-
- V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate, size_t max_capacity,
- size_t code_range_size);
-
- V8_EXPORT_PRIVATE void TearDown();
-
- // Allocates a Page from the allocator. AllocationMode is used to indicate
- // whether pooled allocation, which only works for MemoryChunk::kPageSize,
- // should be tried first.
- template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
- typename SpaceType>
- EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
-
- LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
- Executability executable);
-
- template <MemoryAllocator::FreeMode mode = kFull>
- EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- void Free(MemoryChunk* chunk);
-
- // Returns allocated spaces in bytes.
- size_t Size() { return size_; }
-
- // Returns allocated executable spaces in bytes.
- size_t SizeExecutable() { return size_executable_; }
-
- // Returns the maximum available bytes of heaps.
- size_t Available() {
- const size_t size = Size();
- return capacity_ < size ? 0 : capacity_ - size;
- }
-
- // Returns an indication of whether a pointer is in a space that has
- // been allocated by this MemoryAllocator.
- V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
- return address < lowest_ever_allocated_ ||
- address >= highest_ever_allocated_;
- }
-
- // Returns a MemoryChunk in which the memory region from commit_area_size to
- // reserve_area_size of the chunk area is reserved but not committed, it
- // could be committed later by calling MemoryChunk::CommitArea.
- V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
- size_t commit_area_size,
- Executability executable,
- Space* space);
-
- Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
- size_t alignment, Executability executable,
- void* hint, VirtualMemory* controller);
-
- void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
-
- // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
- // internally memory is freed from |start_free| to the end of the reservation.
- // Additional memory beyond the page is not accounted though, so
- // |bytes_to_free| is computed by the caller.
- void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
- size_t bytes_to_free, Address new_area_end);
-
- // Checks if an allocated MemoryChunk was intended to be used for executable
- // memory.
- bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
- return executable_memory_.find(chunk) != executable_memory_.end();
- }
-
- // Commit memory region owned by given reservation object. Returns true if
- // it succeeded and false otherwise.
- bool CommitMemory(VirtualMemory* reservation);
-
- // Uncommit memory region owned by given reservation object. Returns true if
- // it succeeded and false otherwise.
- bool UncommitMemory(VirtualMemory* reservation);
-
- // Zaps a contiguous block of memory [start..(start+size)[ with
- // a given zap value.
- void ZapBlock(Address start, size_t size, uintptr_t zap_value);
-
- V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
- Address start,
- size_t commit_size,
- size_t reserved_size);
-
- // Page allocator instance for allocating non-executable pages.
- // Guaranteed to be a valid pointer.
- v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
-
- // Page allocator instance for allocating executable pages.
- // Guaranteed to be a valid pointer.
- v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
-
- // Returns page allocator suitable for allocating pages with requested
- // executability.
- v8::PageAllocator* page_allocator(Executability executable) {
- return executable == EXECUTABLE ? code_page_allocator_
- : data_page_allocator_;
- }
-
- // A region of memory that may contain executable code including reserved
- // OS page with read-write access in the beginning.
- const base::AddressRegion& code_range() const {
- // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
- DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
- DCHECK_IMPLIES(!code_range_.is_empty(),
- code_range_.contains(code_page_allocator_instance_->begin(),
- code_page_allocator_instance_->size()));
- return code_range_;
- }
-
- Unmapper* unmapper() { return &unmapper_; }
-
- // Performs all necessary bookkeeping to free the memory, but does not free
- // it.
- void UnregisterMemory(MemoryChunk* chunk);
-
- private:
- void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
- size_t requested);
-
- // PreFreeMemory logically frees the object, i.e., it unregisters the memory,
- // logs a delete event and adds the chunk to remembered unmapped pages.
- void PreFreeMemory(MemoryChunk* chunk);
-
- // PerformFreeMemory can be called concurrently when PreFree was executed
- // before.
- void PerformFreeMemory(MemoryChunk* chunk);
-
- // See AllocatePage for public interface. Note that currently we only support
- // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
- template <typename SpaceType>
- MemoryChunk* AllocatePagePooled(SpaceType* owner);
-
- // Initializes pages in a chunk. Returns the first page address.
- // This function and GetChunkId() are provided for the mark-compact
- // collector to rebuild page headers in the from space, which is
- // used as a marking stack and its page headers are destroyed.
- Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
- PagedSpace* owner);
-
- void UpdateAllocatedSpaceLimits(Address low, Address high) {
- // The use of atomic primitives does not guarantee correctness (wrt.
- // desired semantics) by default. The loop here ensures that we update the
- // values only if they did not change in between.
- Address ptr = lowest_ever_allocated_.load(std::memory_order_relaxed);
- while ((low < ptr) && !lowest_ever_allocated_.compare_exchange_weak(
- ptr, low, std::memory_order_acq_rel)) {
- }
- ptr = highest_ever_allocated_.load(std::memory_order_relaxed);
- while ((high > ptr) && !highest_ever_allocated_.compare_exchange_weak(
- ptr, high, std::memory_order_acq_rel)) {
- }
- }
-
- void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
- DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
- executable_memory_.insert(chunk);
- }
-
- void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
- DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
- executable_memory_.erase(chunk);
- chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
- }
-
- Isolate* isolate_;
-
- // This object controls virtual space reserved for code on the V8 heap. This
- // is only valid for 64-bit architectures where kRequiresCodeRange.
- VirtualMemory code_reservation_;
-
- // Page allocator used for allocating data pages. Depending on the
- // configuration it may be a page allocator instance provided by v8::Platform
- // or a BoundedPageAllocator (when pointer compression is enabled).
- v8::PageAllocator* data_page_allocator_;
-
- // Page allocator used for allocating code pages. Depending on the
- // configuration it may be a page allocator instance provided by v8::Platform
- // or a BoundedPageAllocator (when pointer compression is enabled or
- // on those 64-bit architectures where pc-relative 32-bit displacement
- // can be used for call and jump instructions).
- v8::PageAllocator* code_page_allocator_;
-
- // A part of the |code_reservation_| that may contain executable code
- // including reserved page with read-write access in the beginning.
- // See details below.
- base::AddressRegion code_range_;
-
- // This unique pointer owns the instance of bounded code allocator
- // that controls executable pages allocation. It does not control the
- // optionally existing page in the beginning of the |code_range_|.
- // So, summarizing all above, the following conditions hold:
- // 1) |code_reservation_| >= |code_range_|
- // 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
- // 3) |code_reservation_| is AllocatePageSize()-aligned
- // 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
- // 5) |code_range_| is CommitPageSize()-aligned
- std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
-
- // Maximum space size in bytes.
- size_t capacity_;
-
- // Allocated space size in bytes.
- std::atomic<size_t> size_;
- // Allocated executable space size in bytes.
- std::atomic<size_t> size_executable_;
-
- // We keep the lowest and highest addresses allocated as a quick way
- // of determining that pointers are outside the heap. The estimate is
- // conservative, i.e. not all addresses in 'allocated' space are allocated
- // to our heap. The range is [lowest, highest[, inclusive on the low end
- // and exclusive on the high end.
- std::atomic<Address> lowest_ever_allocated_;
- std::atomic<Address> highest_ever_allocated_;
-
- VirtualMemory last_chunk_;
- Unmapper unmapper_;
-
- // Data structure to remember allocated executable memory chunks.
- std::unordered_set<MemoryChunk*> executable_memory_;
-
- friend class heap::TestCodePageAllocatorScope;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
-};
-
-extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- size_t size, PagedSpace* owner, Executability executable);
-extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
-
// -----------------------------------------------------------------------------
// Interface for heap object iterator to be implemented by all object space
// object iterators.
@@ -1107,6 +350,7 @@ class PageIteratorImpl
};
using PageIterator = PageIteratorImpl<Page>;
+using ConstPageIterator = PageIteratorImpl<const Page>;
using LargePageIterator = PageIteratorImpl<LargePage>;
class PageRange {
@@ -1125,44 +369,6 @@ class PageRange {
};
// -----------------------------------------------------------------------------
-// Heap object iterator in new/old/map spaces.
-//
-// A PagedSpaceObjectIterator iterates objects from the bottom of the given
-// space to its top or from the bottom of the given page to its top.
-//
-// If objects are allocated in the page during iteration the iterator may
-// or may not iterate over those objects. The caller must create a new
-// iterator in order to be sure to visit these new objects.
-class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
- public:
- // Creates a new object iterator in a given space.
- PagedSpaceObjectIterator(Heap* heap, PagedSpace* space);
- PagedSpaceObjectIterator(Heap* heap, PagedSpace* space, Page* page);
-
- // Creates a new object iterator in a given off-thread space.
- explicit PagedSpaceObjectIterator(OffThreadSpace* space);
-
- // Advance to the next object, skipping free spaces and other fillers and
- // skipping the special garbage section of which there is one per space.
- // Returns nullptr when the iteration has ended.
- inline HeapObject Next() override;
-
- private:
- // Fast (inlined) path of next().
- inline HeapObject FromCurrentPage();
-
- // Slow path of next(), goes into the next page. Returns false if the
- // iteration has ended.
- bool AdvanceToNextPage();
-
- Address cur_addr_; // Current iteration point.
- Address cur_end_; // End iteration point.
- PagedSpace* space_;
- PageRange page_range_;
- PageRange::iterator current_page_;
-};
-
-// -----------------------------------------------------------------------------
// A space has a circular list of pages. The next page can be accessed via
// Page::next_page() call.
@@ -1211,477 +417,6 @@ class LinearAllocationArea {
Address limit_;
};
-// An abstraction of the accounting statistics of a page-structured space.
-//
-// The stats are only set by functions that ensure they stay balanced. These
-// functions increase or decrease one of the non-capacity stats in conjunction
-// with capacity, or else they always balance increases and decreases to the
-// non-capacity stats.
-class AllocationStats {
- public:
- AllocationStats() { Clear(); }
-
- AllocationStats& operator=(const AllocationStats& stats) V8_NOEXCEPT {
- capacity_ = stats.capacity_.load();
- max_capacity_ = stats.max_capacity_;
- size_.store(stats.size_);
-#ifdef DEBUG
- allocated_on_page_ = stats.allocated_on_page_;
-#endif
- return *this;
- }
-
- // Zero out all the allocation statistics (i.e., no capacity).
- void Clear() {
- capacity_ = 0;
- max_capacity_ = 0;
- ClearSize();
- }
-
- void ClearSize() {
- size_ = 0;
-#ifdef DEBUG
- allocated_on_page_.clear();
-#endif
- }
-
- // Accessors for the allocation statistics.
- size_t Capacity() { return capacity_; }
- size_t MaxCapacity() { return max_capacity_; }
- size_t Size() { return size_; }
-#ifdef DEBUG
- size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
-#endif
-
- void IncreaseAllocatedBytes(size_t bytes, Page* page) {
-#ifdef DEBUG
- size_t size = size_;
- DCHECK_GE(size + bytes, size);
-#endif
- size_.fetch_add(bytes);
-#ifdef DEBUG
- allocated_on_page_[page] += bytes;
-#endif
- }
-
- void DecreaseAllocatedBytes(size_t bytes, Page* page) {
- DCHECK_GE(size_, bytes);
- size_.fetch_sub(bytes);
-#ifdef DEBUG
- DCHECK_GE(allocated_on_page_[page], bytes);
- allocated_on_page_[page] -= bytes;
-#endif
- }
-
- void DecreaseCapacity(size_t bytes) {
- DCHECK_GE(capacity_, bytes);
- DCHECK_GE(capacity_ - bytes, size_);
- capacity_ -= bytes;
- }
-
- void IncreaseCapacity(size_t bytes) {
- DCHECK_GE(capacity_ + bytes, capacity_);
- capacity_ += bytes;
- if (capacity_ > max_capacity_) {
- max_capacity_ = capacity_;
- }
- }
-
- private:
- // |capacity_|: The number of object-area bytes (i.e., not including page
- // bookkeeping structures) currently in the space.
- // During evacuation capacity of the main spaces is accessed from multiple
- // threads to check the old generation hard limit.
- std::atomic<size_t> capacity_;
-
- // |max_capacity_|: The maximum capacity ever observed.
- size_t max_capacity_;
-
- // |size_|: The number of allocated bytes.
- std::atomic<size_t> size_;
-
-#ifdef DEBUG
- std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
-#endif
-};
-
-// The free list is organized in categories as follows:
-// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
-// allocation, when categories >= small do not have entries anymore.
-// 11-31 words (tiny): The tiny blocks are only used for allocation, when
-// categories >= small do not have entries anymore.
-// 32-255 words (small): Used for allocating free space between 1-31 words in
-// size.
-// 256-2047 words (medium): Used for allocating free space between 32-255 words
-// in size.
-// 1048-16383 words (large): Used for allocating free space between 256-2047
-// words in size.
-// At least 16384 words (huge): This list is for objects of 2048 words or
-// larger. Empty pages are also added to this list.
-class V8_EXPORT_PRIVATE FreeListLegacy : public FreeList {
- public:
- size_t GuaranteedAllocatable(size_t maximum_freed) override {
- if (maximum_freed <= kTiniestListMax) {
- // Since we are not iterating over all list entries, we cannot guarantee
- // that we can find the maximum freed block in that free list.
- return 0;
- } else if (maximum_freed <= kTinyListMax) {
- return kTinyAllocationMax;
- } else if (maximum_freed <= kSmallListMax) {
- return kSmallAllocationMax;
- } else if (maximum_freed <= kMediumListMax) {
- return kMediumAllocationMax;
- } else if (maximum_freed <= kLargeListMax) {
- return kLargeAllocationMax;
- }
- return maximum_freed;
- }
-
- inline Page* GetPageForSize(size_t size_in_bytes) override;
-
- FreeListLegacy();
- ~FreeListLegacy() override;
-
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- private:
- enum { kTiniest, kTiny, kSmall, kMedium, kLarge, kHuge };
-
- static const size_t kMinBlockSize = 3 * kTaggedSize;
-
- // This is a conservative upper bound. The actual maximum block size takes
- // padding and alignment of data and code pages into account.
- static const size_t kMaxBlockSize = Page::kPageSize;
-
- static const size_t kTiniestListMax = 0xa * kTaggedSize;
- static const size_t kTinyListMax = 0x1f * kTaggedSize;
- static const size_t kSmallListMax = 0xff * kTaggedSize;
- static const size_t kMediumListMax = 0x7ff * kTaggedSize;
- static const size_t kLargeListMax = 0x1fff * kTaggedSize;
- static const size_t kTinyAllocationMax = kTiniestListMax;
- static const size_t kSmallAllocationMax = kTinyListMax;
- static const size_t kMediumAllocationMax = kSmallListMax;
- static const size_t kLargeAllocationMax = kMediumListMax;
-
- FreeListCategoryType SelectFreeListCategoryType(
- size_t size_in_bytes) override {
- if (size_in_bytes <= kTiniestListMax) {
- return kTiniest;
- } else if (size_in_bytes <= kTinyListMax) {
- return kTiny;
- } else if (size_in_bytes <= kSmallListMax) {
- return kSmall;
- } else if (size_in_bytes <= kMediumListMax) {
- return kMedium;
- } else if (size_in_bytes <= kLargeListMax) {
- return kLarge;
- }
- return kHuge;
- }
-
- // Returns the category to be used to allocate |size_in_bytes| in the fast
- // path. The tiny categories are not used for fast allocation.
- FreeListCategoryType SelectFastAllocationFreeListCategoryType(
- size_t size_in_bytes) {
- if (size_in_bytes <= kSmallAllocationMax) {
- return kSmall;
- } else if (size_in_bytes <= kMediumAllocationMax) {
- return kMedium;
- } else if (size_in_bytes <= kLargeAllocationMax) {
- return kLarge;
- }
- return kHuge;
- }
-
- friend class FreeListCategory;
- friend class heap::HeapTester;
-};
-
-// Inspired by FreeListLegacy.
-// Only has 3 categories: Medium, Large and Huge.
-// Any block that would have belong to tiniest, tiny or small in FreeListLegacy
-// is considered wasted.
-// Allocation is done only in Huge, Medium and Large (in that order),
-// using a first-fit strategy (only the first block of each freelist is ever
-// considered though). Performances is supposed to be better than
-// FreeListLegacy, but memory usage should be higher (because fragmentation will
-// probably be higher).
-class V8_EXPORT_PRIVATE FreeListFastAlloc : public FreeList {
- public:
- size_t GuaranteedAllocatable(size_t maximum_freed) override {
- if (maximum_freed <= kMediumListMax) {
- // Since we are not iterating over all list entries, we cannot guarantee
- // that we can find the maximum freed block in that free list.
- return 0;
- } else if (maximum_freed <= kLargeListMax) {
- return kLargeAllocationMax;
- }
- return kHugeAllocationMax;
- }
-
- inline Page* GetPageForSize(size_t size_in_bytes) override;
-
- FreeListFastAlloc();
- ~FreeListFastAlloc() override;
-
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- private:
- enum { kMedium, kLarge, kHuge };
-
- static const size_t kMinBlockSize = 0xff * kTaggedSize;
-
- // This is a conservative upper bound. The actual maximum block size takes
- // padding and alignment of data and code pages into account.
- static const size_t kMaxBlockSize = Page::kPageSize;
-
- static const size_t kMediumListMax = 0x7ff * kTaggedSize;
- static const size_t kLargeListMax = 0x1fff * kTaggedSize;
- static const size_t kMediumAllocationMax = kMinBlockSize;
- static const size_t kLargeAllocationMax = kMediumListMax;
- static const size_t kHugeAllocationMax = kLargeListMax;
-
- // Returns the category used to hold an object of size |size_in_bytes|.
- FreeListCategoryType SelectFreeListCategoryType(
- size_t size_in_bytes) override {
- if (size_in_bytes <= kMediumListMax) {
- return kMedium;
- } else if (size_in_bytes <= kLargeListMax) {
- return kLarge;
- }
- return kHuge;
- }
-};
-
-// Use 24 Freelists: on per 16 bytes between 24 and 256, and then a few ones for
-// larger sizes. See the variable |categories_min| for the size of each
-// Freelist. Allocation is done using a best-fit strategy (considering only the
-// first element of each category though).
-// Performances are expected to be worst than FreeListLegacy, but memory
-// consumption should be lower (since fragmentation should be lower).
-class V8_EXPORT_PRIVATE FreeListMany : public FreeList {
- public:
- size_t GuaranteedAllocatable(size_t maximum_freed) override;
-
- Page* GetPageForSize(size_t size_in_bytes) override;
-
- FreeListMany();
- ~FreeListMany() override;
-
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- protected:
- static const size_t kMinBlockSize = 3 * kTaggedSize;
-
- // This is a conservative upper bound. The actual maximum block size takes
- // padding and alignment of data and code pages into account.
- static const size_t kMaxBlockSize = Page::kPageSize;
- // Largest size for which categories are still precise, and for which we can
- // therefore compute the category in constant time.
- static const size_t kPreciseCategoryMaxSize = 256;
-
- // Categories boundaries generated with:
- // perl -E '
- // @cat = (24, map {$_*16} 2..16, 48, 64);
- // while ($cat[-1] <= 32768) {
- // push @cat, $cat[-1]*2
- // }
- // say join ", ", @cat;
- // say "\n", scalar @cat'
- static const int kNumberOfCategories = 24;
- static constexpr unsigned int categories_min[kNumberOfCategories] = {
- 24, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192,
- 208, 224, 240, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536};
-
- // Return the smallest category that could hold |size_in_bytes| bytes.
- FreeListCategoryType SelectFreeListCategoryType(
- size_t size_in_bytes) override {
- if (size_in_bytes <= kPreciseCategoryMaxSize) {
- if (size_in_bytes < categories_min[1]) return 0;
- return static_cast<FreeListCategoryType>(size_in_bytes >> 4) - 1;
- }
- for (int cat = (kPreciseCategoryMaxSize >> 4) - 1; cat < last_category_;
- cat++) {
- if (size_in_bytes < categories_min[cat + 1]) {
- return cat;
- }
- }
- return last_category_;
- }
-
- FRIEND_TEST(SpacesTest, FreeListManySelectFreeListCategoryType);
- FRIEND_TEST(SpacesTest, FreeListManyGuaranteedAllocatable);
-};
-
-// Same as FreeListMany but uses a cache to know which categories are empty.
-// The cache (|next_nonempty_category|) is maintained in a way such that for
-// each category c, next_nonempty_category[c] contains the first non-empty
-// category greater or equal to c, that may hold an object of size c.
-// Allocation is done using the same strategy as FreeListMany (ie, best fit).
-class V8_EXPORT_PRIVATE FreeListManyCached : public FreeListMany {
- public:
- FreeListManyCached();
-
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- size_t Free(Address start, size_t size_in_bytes, FreeMode mode) override;
-
- void Reset() override;
-
- bool AddCategory(FreeListCategory* category) override;
- void RemoveCategory(FreeListCategory* category) override;
-
- protected:
- // Updates the cache after adding something in the category |cat|.
- void UpdateCacheAfterAddition(FreeListCategoryType cat) {
- for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] > cat;
- i--) {
- next_nonempty_category[i] = cat;
- }
- }
-
- // Updates the cache after emptying category |cat|.
- void UpdateCacheAfterRemoval(FreeListCategoryType cat) {
- for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] == cat;
- i--) {
- next_nonempty_category[i] = next_nonempty_category[cat + 1];
- }
- }
-
-#ifdef DEBUG
- void CheckCacheIntegrity() {
- for (int i = 0; i <= last_category_; i++) {
- DCHECK(next_nonempty_category[i] == last_category_ + 1 ||
- categories_[next_nonempty_category[i]] != nullptr);
- for (int j = i; j < next_nonempty_category[i]; j++) {
- DCHECK(categories_[j] == nullptr);
- }
- }
- }
-#endif
-
- // The cache is overallocated by one so that the last element is always
- // defined, and when updating the cache, we can always use cache[i+1] as long
- // as i is < kNumberOfCategories.
- int next_nonempty_category[kNumberOfCategories + 1];
-
- private:
- void ResetCache() {
- for (int i = 0; i < kNumberOfCategories; i++) {
- next_nonempty_category[i] = kNumberOfCategories;
- }
- // Setting the after-last element as well, as explained in the cache's
- // declaration.
- next_nonempty_category[kNumberOfCategories] = kNumberOfCategories;
- }
-};
-
-// Same as FreeListManyCached but uses a fast path.
-// The fast path overallocates by at least 1.85k bytes. The idea of this 1.85k
-// is: we want the fast path to always overallocate, even for larger
-// categories. Therefore, we have two choices: either overallocate by
-// "size_in_bytes * something" or overallocate by "size_in_bytes +
-// something". We choose the later, as the former will tend to overallocate too
-// much for larger objects. The 1.85k (= 2048 - 128) has been chosen such that
-// for tiny objects (size <= 128 bytes), the first category considered is the
-// 36th (which holds objects of 2k to 3k), while for larger objects, the first
-// category considered will be one that guarantees a 1.85k+ bytes
-// overallocation. Using 2k rather than 1.85k would have resulted in either a
-// more complex logic for SelectFastAllocationFreeListCategoryType, or the 36th
-// category (2k to 3k) not being used; both of which are undesirable.
-// A secondary fast path is used for tiny objects (size <= 128), in order to
-// consider categories from 256 to 2048 bytes for them.
-// Note that this class uses a precise GetPageForSize (inherited from
-// FreeListMany), which makes its fast path less fast in the Scavenger. This is
-// done on purpose, since this class's only purpose is to be used by
-// FreeListManyCachedOrigin, which is precise for the scavenger.
-class V8_EXPORT_PRIVATE FreeListManyCachedFastPath : public FreeListManyCached {
- public:
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- protected:
- // Objects in the 18th category are at least 2048 bytes
- static const FreeListCategoryType kFastPathFirstCategory = 18;
- static const size_t kFastPathStart = 2048;
- static const size_t kTinyObjectMaxSize = 128;
- static const size_t kFastPathOffset = kFastPathStart - kTinyObjectMaxSize;
- // Objects in the 15th category are at least 256 bytes
- static const FreeListCategoryType kFastPathFallBackTiny = 15;
-
- STATIC_ASSERT(categories_min[kFastPathFirstCategory] == kFastPathStart);
- STATIC_ASSERT(categories_min[kFastPathFallBackTiny] ==
- kTinyObjectMaxSize * 2);
-
- FreeListCategoryType SelectFastAllocationFreeListCategoryType(
- size_t size_in_bytes) {
- DCHECK(size_in_bytes < kMaxBlockSize);
-
- if (size_in_bytes >= categories_min[last_category_]) return last_category_;
-
- size_in_bytes += kFastPathOffset;
- for (int cat = kFastPathFirstCategory; cat < last_category_; cat++) {
- if (size_in_bytes <= categories_min[cat]) {
- return cat;
- }
- }
- return last_category_;
- }
-
- FRIEND_TEST(
- SpacesTest,
- FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType);
-};
-
-// Uses FreeListManyCached if in the GC; FreeListManyCachedFastPath otherwise.
-// The reasonning behind this FreeList is the following: the GC runs in
-// parallel, and therefore, more expensive allocations there are less
-// noticeable. On the other hand, the generated code and runtime need to be very
-// fast. Therefore, the strategy for the former is one that is not very
-// efficient, but reduces fragmentation (FreeListManyCached), while the strategy
-// for the later is one that is very efficient, but introduces some
-// fragmentation (FreeListManyCachedFastPath).
-class V8_EXPORT_PRIVATE FreeListManyCachedOrigin
- : public FreeListManyCachedFastPath {
- public:
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-};
-
-// FreeList for maps: since maps are all the same size, uses a single freelist.
-class V8_EXPORT_PRIVATE FreeListMap : public FreeList {
- public:
- size_t GuaranteedAllocatable(size_t maximum_freed) override;
-
- Page* GetPageForSize(size_t size_in_bytes) override;
-
- FreeListMap();
- ~FreeListMap() override;
-
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- private:
- static const size_t kMinBlockSize = Map::kSize;
- static const size_t kMaxBlockSize = Page::kPageSize;
- static const FreeListCategoryType kOnlyCategory = 0;
-
- FreeListCategoryType SelectFreeListCategoryType(
- size_t size_in_bytes) override {
- return kOnlyCategory;
- }
-};
// LocalAllocationBuffer represents a linear allocation area that is created
// from a given {AllocationResult} and can be used to allocate memory without
@@ -1740,6 +475,9 @@ class LocalAllocationBuffer {
V8_EXPORT_PRIVATE LinearAllocationArea CloseAndMakeIterable();
void MakeIterable();
+ Address top() const { return allocation_info_.top(); }
+ Address limit() const { return allocation_info_.limit(); }
+
private:
V8_EXPORT_PRIVATE LocalAllocationBuffer(
Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT;
@@ -1811,794 +549,6 @@ class SpaceWithLinearArea : public Space {
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
};
-class V8_EXPORT_PRIVATE PagedSpace
- : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
- public:
- using iterator = PageIterator;
-
- static const size_t kCompactionMemoryWanted = 500 * KB;
-
- // Creates a space with an id.
- PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
- FreeList* free_list,
- LocalSpaceKind local_space_kind = LocalSpaceKind::kNone);
-
- ~PagedSpace() override { TearDown(); }
-
- // Checks whether an object/address is in this space.
- inline bool Contains(Address a);
- inline bool Contains(Object o);
- bool ContainsSlow(Address addr);
-
- // Does the space need executable memory?
- Executability executable() { return executable_; }
-
- // Prepares for a mark-compact GC.
- void PrepareForMarkCompact();
-
- // Current capacity without growing (Size() + Available()).
- size_t Capacity() { return accounting_stats_.Capacity(); }
-
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory() override;
-
- // Sets the capacity, the available space and the wasted space to zero.
- // The stats are rebuilt during sweeping by adding each page to the
- // capacity and the size when it is encountered. As free spaces are
- // discovered during the sweeping they are subtracted from the size and added
- // to the available and wasted totals. The free list is cleared as well.
- void ClearAllocatorState() {
- accounting_stats_.ClearSize();
- free_list_->Reset();
- }
-
- // Available bytes without growing. These are the bytes on the free list.
- // The bytes in the linear allocation area are not included in this total
- // because updating the stats would slow down allocation. New pages are
- // immediately added to the free list so they show up here.
- size_t Available() override { return free_list_->Available(); }
-
- // Allocated bytes in this space. Garbage bytes that were not found due to
- // concurrent sweeping are counted as being allocated! The bytes in the
- // current linear allocation area (between top and limit) are also counted
- // here.
- size_t Size() override { return accounting_stats_.Size(); }
-
- // As size, but the bytes in lazily swept pages are estimated and the bytes
- // in the current linear allocation area are not included.
- size_t SizeOfObjects() override;
-
- // Wasted bytes in this space. These are just the bytes that were thrown away
- // due to being too small to use for allocation.
- virtual size_t Waste() { return free_list_->wasted_bytes(); }
-
- // Allocate the requested number of bytes in the space if possible, return a
- // failure object if not.
- V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
- int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- // Allocate the requested number of bytes in the space double aligned if
- // possible, return a failure object if not.
- V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
- int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- // Allocate the requested number of bytes in the space and consider allocation
- // alignment if needed.
- V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
- int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- // Allocate the requested number of bytes in the space from a background
- // thread.
- V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
- SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
- size_t min_size_in_bytes,
- size_t max_size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin);
-
- size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
- if (size_in_bytes == 0) return 0;
- heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
- ClearRecordedSlots::kNo);
- if (mode == SpaceAccountingMode::kSpaceAccounted) {
- return AccountedFree(start, size_in_bytes);
- } else {
- return UnaccountedFree(start, size_in_bytes);
- }
- }
-
- // Give a block of memory to the space's free list. It might be added to
- // the free list or accounted as waste.
- // If add_to_freelist is false then just accounting stats are updated and
- // no attempt to add area to free list is made.
- size_t AccountedFree(Address start, size_t size_in_bytes) {
- size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
- Page* page = Page::FromAddress(start);
- accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
- DCHECK_GE(size_in_bytes, wasted);
- return size_in_bytes - wasted;
- }
-
- size_t UnaccountedFree(Address start, size_t size_in_bytes) {
- size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
- DCHECK_GE(size_in_bytes, wasted);
- return size_in_bytes - wasted;
- }
-
- inline bool TryFreeLast(HeapObject object, int object_size);
-
- void ResetFreeList();
-
- // Empty space linear allocation area, returning unused area to free list.
- void FreeLinearAllocationArea();
-
- void MarkLinearAllocationAreaBlack();
- void UnmarkLinearAllocationArea();
-
- void DecreaseAllocatedBytes(size_t bytes, Page* page) {
- accounting_stats_.DecreaseAllocatedBytes(bytes, page);
- }
- void IncreaseAllocatedBytes(size_t bytes, Page* page) {
- accounting_stats_.IncreaseAllocatedBytes(bytes, page);
- }
- void DecreaseCapacity(size_t bytes) {
- accounting_stats_.DecreaseCapacity(bytes);
- }
- void IncreaseCapacity(size_t bytes) {
- accounting_stats_.IncreaseCapacity(bytes);
- }
-
- void RefineAllocatedBytesAfterSweeping(Page* page);
-
- Page* InitializePage(MemoryChunk* chunk);
-
- void ReleasePage(Page* page);
-
- // Adds the page to this space and returns the number of bytes added to the
- // free list of the space.
- size_t AddPage(Page* page);
- void RemovePage(Page* page);
- // Remove a page if it has at least |size_in_bytes| bytes available that can
- // be used for allocation.
- Page* RemovePageSafe(int size_in_bytes);
-
- void SetReadable();
- void SetReadAndExecutable();
- void SetReadAndWritable();
-
- void SetDefaultCodePermissions() {
- if (FLAG_jitless) {
- SetReadable();
- } else {
- SetReadAndExecutable();
- }
- }
-
-#ifdef VERIFY_HEAP
- // Verify integrity of this space.
- virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
-
- void VerifyLiveBytes();
-
- // Overridden by subclasses to verify space-specific object
- // properties (e.g., only maps or free-list nodes are in map space).
- virtual void VerifyObject(HeapObject obj) {}
-#endif
-
-#ifdef DEBUG
- void VerifyCountersAfterSweeping(Heap* heap);
- void VerifyCountersBeforeConcurrentSweeping();
- // Print meta info and objects in this space.
- void Print() override;
-
- // Report code object related statistics
- static void ReportCodeStatistics(Isolate* isolate);
- static void ResetCodeStatistics(Isolate* isolate);
-#endif
-
- bool CanExpand(size_t size);
-
- // Returns the number of total pages in this space.
- int CountTotalPages();
-
- // Return size of allocatable area on a page in this space.
- inline int AreaSize() { return static_cast<int>(area_size_); }
-
- bool is_local_space() { return local_space_kind_ != LocalSpaceKind::kNone; }
-
- bool is_off_thread_space() {
- return local_space_kind_ == LocalSpaceKind::kOffThreadSpace;
- }
-
- bool is_compaction_space() {
- return base::IsInRange(local_space_kind_,
- LocalSpaceKind::kFirstCompactionSpace,
- LocalSpaceKind::kLastCompactionSpace);
- }
-
- LocalSpaceKind local_space_kind() { return local_space_kind_; }
-
- // Merges {other} into the current space. Note that this modifies {other},
- // e.g., removes its bump pointer area and resets statistics.
- void MergeLocalSpace(LocalSpace* other);
-
- // Refills the free list from the corresponding free list filled by the
- // sweeper.
- virtual void RefillFreeList();
-
- base::Mutex* mutex() { return &space_mutex_; }
-
- inline void UnlinkFreeListCategories(Page* page);
- inline size_t RelinkFreeListCategories(Page* page);
-
- Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
-
- iterator begin() { return iterator(first_page()); }
- iterator end() { return iterator(nullptr); }
-
- // Shrink immortal immovable pages of the space to be exactly the size needed
- // using the high water mark.
- void ShrinkImmortalImmovablePages();
-
- size_t ShrinkPageToHighWaterMark(Page* page);
-
- std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
-
- void SetLinearAllocationArea(Address top, Address limit);
-
- private:
- // Set space linear allocation area.
- void SetTopAndLimit(Address top, Address limit) {
- DCHECK(top == limit ||
- Page::FromAddress(top) == Page::FromAddress(limit - 1));
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.Reset(top, limit);
- }
- void DecreaseLimit(Address new_limit);
- void UpdateInlineAllocationLimit(size_t min_size) override;
- bool SupportsInlineAllocation() override {
- return identity() == OLD_SPACE && !is_local_space();
- }
-
- protected:
- // PagedSpaces that should be included in snapshots have different, i.e.,
- // smaller, initial pages.
- virtual bool snapshotable() { return true; }
-
- bool HasPages() { return first_page() != nullptr; }
-
- // Cleans up the space, frees all pages in this space except those belonging
- // to the initial chunk, uncommits addresses in the initial chunk.
- void TearDown();
-
- // Expands the space by allocating a fixed number of pages. Returns false if
- // it cannot allocate requested number of pages from OS, or if the hard heap
- // size limit has been hit.
- bool Expand();
-
- // Sets up a linear allocation area that fits the given number of bytes.
- // Returns false if there is not enough space and the caller has to retry
- // after collecting garbage.
- inline bool EnsureLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin);
- // Allocates an object from the linear allocation area. Assumes that the
- // linear allocation area is large enought to fit the object.
- inline HeapObject AllocateLinearly(int size_in_bytes);
- // Tries to allocate an aligned object from the linear allocation area.
- // Returns nullptr if the linear allocation area does not fit the object.
- // Otherwise, returns the object pointer and writes the allocation size
- // (object size + alignment filler size) to the size_in_bytes.
- inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
- AllocationAlignment alignment);
-
- V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
- size_t size_in_bytes, AllocationOrigin origin);
-
- // If sweeping is still in progress try to sweep unswept pages. If that is
- // not successful, wait for the sweeper threads and retry free-list
- // allocation. Returns false if there is not enough space and the caller
- // has to retry after collecting garbage.
- V8_WARN_UNUSED_RESULT bool EnsureSweptAndRetryAllocation(
- int size_in_bytes, AllocationOrigin origin);
-
- V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(int required_freed_bytes,
- int max_pages,
- int size_in_bytes,
- AllocationOrigin origin);
-
- // Slow path of AllocateRaw. This function is space-dependent. Returns false
- // if there is not enough space and the caller has to retry after
- // collecting garbage.
- V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
- int size_in_bytes, AllocationOrigin origin);
-
- // Implementation of SlowAllocateRaw. Returns false if there is not enough
- // space and the caller has to retry after collecting garbage.
- V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
- int size_in_bytes, AllocationOrigin origin);
-
- V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
- TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
- size_t max_size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin);
-
- Executability executable_;
-
- LocalSpaceKind local_space_kind_;
-
- size_t area_size_;
-
- // Accounting information for this space.
- AllocationStats accounting_stats_;
-
- // Mutex guarding any concurrent access to the space.
- base::Mutex space_mutex_;
-
- // Mutex guarding concurrent allocation.
- base::Mutex allocation_mutex_;
-
- friend class IncrementalMarking;
- friend class MarkCompactCollector;
-
- // Used in cctest.
- friend class heap::HeapTester;
-};
-
-enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
-
-// -----------------------------------------------------------------------------
-// SemiSpace in young generation
-//
-// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
-// The mark-compact collector uses the memory of the first page in the from
-// space as a marking stack when tracing live objects.
-class SemiSpace : public Space {
- public:
- using iterator = PageIterator;
-
- static void Swap(SemiSpace* from, SemiSpace* to);
-
- SemiSpace(Heap* heap, SemiSpaceId semispace)
- : Space(heap, NEW_SPACE, new NoFreeList()),
- current_capacity_(0),
- maximum_capacity_(0),
- minimum_capacity_(0),
- age_mark_(kNullAddress),
- committed_(false),
- id_(semispace),
- current_page_(nullptr),
- pages_used_(0) {}
-
- inline bool Contains(HeapObject o);
- inline bool Contains(Object o);
- inline bool ContainsSlow(Address a);
-
- void SetUp(size_t initial_capacity, size_t maximum_capacity);
- void TearDown();
-
- bool Commit();
- bool Uncommit();
- bool is_committed() { return committed_; }
-
- // Grow the semispace to the new capacity. The new capacity requested must
- // be larger than the current capacity and less than the maximum capacity.
- bool GrowTo(size_t new_capacity);
-
- // Shrinks the semispace to the new capacity. The new capacity requested
- // must be more than the amount of used memory in the semispace and less
- // than the current capacity.
- bool ShrinkTo(size_t new_capacity);
-
- bool EnsureCurrentCapacity();
-
- Address space_end() { return memory_chunk_list_.back()->area_end(); }
-
- // Returns the start address of the first page of the space.
- Address space_start() {
- DCHECK_NE(memory_chunk_list_.front(), nullptr);
- return memory_chunk_list_.front()->area_start();
- }
-
- Page* current_page() { return current_page_; }
- int pages_used() { return pages_used_; }
-
- // Returns the start address of the current page of the space.
- Address page_low() { return current_page_->area_start(); }
-
- // Returns one past the end address of the current page of the space.
- Address page_high() { return current_page_->area_end(); }
-
- bool AdvancePage() {
- Page* next_page = current_page_->next_page();
- // We cannot expand if we reached the maximum number of pages already. Note
- // that we need to account for the next page already for this check as we
- // could potentially fill the whole page after advancing.
- const bool reached_max_pages = (pages_used_ + 1) == max_pages();
- if (next_page == nullptr || reached_max_pages) {
- return false;
- }
- current_page_ = next_page;
- pages_used_++;
- return true;
- }
-
- // Resets the space to using the first page.
- void Reset();
-
- void RemovePage(Page* page);
- void PrependPage(Page* page);
-
- Page* InitializePage(MemoryChunk* chunk);
-
- // Age mark accessors.
- Address age_mark() { return age_mark_; }
- void set_age_mark(Address mark);
-
- // Returns the current capacity of the semispace.
- size_t current_capacity() { return current_capacity_; }
-
- // Returns the maximum capacity of the semispace.
- size_t maximum_capacity() { return maximum_capacity_; }
-
- // Returns the initial capacity of the semispace.
- size_t minimum_capacity() { return minimum_capacity_; }
-
- SemiSpaceId id() { return id_; }
-
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory() override;
-
- // If we don't have these here then SemiSpace will be abstract. However
- // they should never be called:
-
- size_t Size() override { UNREACHABLE(); }
-
- size_t SizeOfObjects() override { return Size(); }
-
- size_t Available() override { UNREACHABLE(); }
-
- Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
- Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
-
- iterator begin() { return iterator(first_page()); }
- iterator end() { return iterator(nullptr); }
-
- std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
-
-#ifdef DEBUG
- V8_EXPORT_PRIVATE void Print() override;
- // Validate a range of of addresses in a SemiSpace.
- // The "from" address must be on a page prior to the "to" address,
- // in the linked page order, or it must be earlier on the same page.
- static void AssertValidRange(Address from, Address to);
-#else
- // Do nothing.
- inline static void AssertValidRange(Address from, Address to) {}
-#endif
-
-#ifdef VERIFY_HEAP
- virtual void Verify();
-#endif
-
- private:
- void RewindPages(int num_pages);
-
- inline int max_pages() {
- return static_cast<int>(current_capacity_ / Page::kPageSize);
- }
-
- // Copies the flags into the masked positions on all pages in the space.
- void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
-
- // The currently committed space capacity.
- size_t current_capacity_;
-
- // The maximum capacity that can be used by this space. A space cannot grow
- // beyond that size.
- size_t maximum_capacity_;
-
- // The minimum capacity for the space. A space cannot shrink below this size.
- size_t minimum_capacity_;
-
- // Used to govern object promotion during mark-compact collection.
- Address age_mark_;
-
- bool committed_;
- SemiSpaceId id_;
-
- Page* current_page_;
-
- int pages_used_;
-
- friend class NewSpace;
- friend class SemiSpaceObjectIterator;
-};
-
-// A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
-// semispace of the heap's new space. It iterates over the objects in the
-// semispace from a given start address (defaulting to the bottom of the
-// semispace) to the top of the semispace. New objects allocated after the
-// iterator is created are not iterated.
-class SemiSpaceObjectIterator : public ObjectIterator {
- public:
- // Create an iterator over the allocated objects in the given to-space.
- explicit SemiSpaceObjectIterator(NewSpace* space);
-
- inline HeapObject Next() override;
-
- private:
- void Initialize(Address start, Address end);
-
- // The current iteration point.
- Address current_;
- // The end of iteration.
- Address limit_;
-};
-
-// -----------------------------------------------------------------------------
-// The young generation space.
-//
-// The new space consists of a contiguous pair of semispaces. It simply
-// forwards most functions to the appropriate semispace.
-
-class V8_EXPORT_PRIVATE NewSpace
- : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
- public:
- using iterator = PageIterator;
-
- NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
- size_t initial_semispace_capacity, size_t max_semispace_capacity);
-
- ~NewSpace() override { TearDown(); }
-
- inline bool ContainsSlow(Address a);
- inline bool Contains(Object o);
- inline bool Contains(HeapObject o);
-
- // Tears down the space. Heap memory was not allocated by the space, so it
- // is not deallocated here.
- void TearDown();
-
- // Flip the pair of spaces.
- void Flip();
-
- // Grow the capacity of the semispaces. Assumes that they are not at
- // their maximum capacity.
- void Grow();
-
- // Shrink the capacity of the semispaces.
- void Shrink();
-
- // Return the allocated bytes in the active semispace.
- size_t Size() final {
- DCHECK_GE(top(), to_space_.page_low());
- return to_space_.pages_used() *
- MemoryChunkLayout::AllocatableMemoryInDataPage() +
- static_cast<size_t>(top() - to_space_.page_low());
- }
-
- size_t SizeOfObjects() final { return Size(); }
-
- // Return the allocatable capacity of a semispace.
- size_t Capacity() {
- SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
- return (to_space_.current_capacity() / Page::kPageSize) *
- MemoryChunkLayout::AllocatableMemoryInDataPage();
- }
-
- // Return the current size of a semispace, allocatable and non-allocatable
- // memory.
- size_t TotalCapacity() {
- DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
- return to_space_.current_capacity();
- }
-
- // Committed memory for NewSpace is the committed memory of both semi-spaces
- // combined.
- size_t CommittedMemory() final {
- return from_space_.CommittedMemory() + to_space_.CommittedMemory();
- }
-
- size_t MaximumCommittedMemory() final {
- return from_space_.MaximumCommittedMemory() +
- to_space_.MaximumCommittedMemory();
- }
-
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory() final;
-
- // Return the available bytes without growing.
- size_t Available() final {
- DCHECK_GE(Capacity(), Size());
- return Capacity() - Size();
- }
-
- size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
- type == ExternalBackingStoreType::kArrayBuffer)
- return heap()->YoungArrayBufferBytes();
- DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
- return to_space_.ExternalBackingStoreBytes(type);
- }
-
- size_t ExternalBackingStoreBytes() {
- size_t result = 0;
- for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- result +=
- ExternalBackingStoreBytes(static_cast<ExternalBackingStoreType>(i));
- }
- return result;
- }
-
- size_t AllocatedSinceLastGC() {
- const Address age_mark = to_space_.age_mark();
- DCHECK_NE(age_mark, kNullAddress);
- DCHECK_NE(top(), kNullAddress);
- Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
- Page* const last_page = Page::FromAllocationAreaAddress(top());
- Page* current_page = age_mark_page;
- size_t allocated = 0;
- if (current_page != last_page) {
- DCHECK_EQ(current_page, age_mark_page);
- DCHECK_GE(age_mark_page->area_end(), age_mark);
- allocated += age_mark_page->area_end() - age_mark;
- current_page = current_page->next_page();
- } else {
- DCHECK_GE(top(), age_mark);
- return top() - age_mark;
- }
- while (current_page != last_page) {
- DCHECK_NE(current_page, age_mark_page);
- allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
- current_page = current_page->next_page();
- }
- DCHECK_GE(top(), current_page->area_start());
- allocated += top() - current_page->area_start();
- DCHECK_LE(allocated, Size());
- return allocated;
- }
-
- void MovePageFromSpaceToSpace(Page* page) {
- DCHECK(page->IsFromPage());
- from_space_.RemovePage(page);
- to_space_.PrependPage(page);
- }
-
- bool Rebalance();
-
- // Return the maximum capacity of a semispace.
- size_t MaximumCapacity() {
- DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
- return to_space_.maximum_capacity();
- }
-
- bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
-
- // Returns the initial capacity of a semispace.
- size_t InitialTotalCapacity() {
- DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
- return to_space_.minimum_capacity();
- }
-
- void ResetOriginalTop() {
- DCHECK_GE(top(), original_top_);
- DCHECK_LE(top(), original_limit_);
- original_top_.store(top(), std::memory_order_release);
- }
-
- Address original_top_acquire() {
- return original_top_.load(std::memory_order_acquire);
- }
- Address original_limit_relaxed() {
- return original_limit_.load(std::memory_order_relaxed);
- }
-
- // Return the address of the first allocatable address in the active
- // semispace. This may be the address where the first object resides.
- Address first_allocatable_address() { return to_space_.space_start(); }
-
- // Get the age mark of the inactive semispace.
- Address age_mark() { return from_space_.age_mark(); }
- // Set the age mark in the active semispace.
- void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
-
- V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
- AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
- int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
- AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
- int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- // Reset the allocation pointer to the beginning of the active semispace.
- void ResetLinearAllocationArea();
-
- // When inline allocation stepping is active, either because of incremental
- // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
- // inline allocation every once in a while. This is done by setting
- // allocation_info_.limit to be lower than the actual limit and and increasing
- // it in steps to guarantee that the observers are notified periodically.
- void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
-
- inline bool ToSpaceContainsSlow(Address a);
- inline bool ToSpaceContains(Object o);
- inline bool FromSpaceContains(Object o);
-
- // Try to switch the active semispace to a new, empty, page.
- // Returns false if this isn't possible or reasonable (i.e., there
- // are no pages, or the current page is already empty), or true
- // if successful.
- bool AddFreshPage();
- bool AddFreshPageSynchronized();
-
-#ifdef VERIFY_HEAP
- // Verify the active semispace.
- virtual void Verify(Isolate* isolate);
-#endif
-
-#ifdef DEBUG
- // Print the active semispace.
- void Print() override { to_space_.Print(); }
-#endif
-
- // Return whether the operation succeeded.
- bool CommitFromSpaceIfNeeded() {
- if (from_space_.is_committed()) return true;
- return from_space_.Commit();
- }
-
- bool UncommitFromSpace() {
- if (!from_space_.is_committed()) return true;
- return from_space_.Uncommit();
- }
-
- bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
-
- SemiSpace* active_space() { return &to_space_; }
-
- Page* first_page() { return to_space_.first_page(); }
- Page* last_page() { return to_space_.last_page(); }
-
- iterator begin() { return to_space_.begin(); }
- iterator end() { return to_space_.end(); }
-
- std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
-
- SemiSpace& from_space() { return from_space_; }
- SemiSpace& to_space() { return to_space_; }
-
- private:
- // Update linear allocation area to match the current to-space page.
- void UpdateLinearAllocationArea();
-
- base::Mutex mutex_;
-
- // The top and the limit at the time of setting the linear allocation area.
- // These values can be accessed by background tasks.
- std::atomic<Address> original_top_;
- std::atomic<Address> original_limit_;
-
- // The semispaces.
- SemiSpace to_space_;
- SemiSpace from_space_;
- VirtualMemory reservation_;
-
- bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
- bool SupportsInlineAllocation() override { return true; }
-
- friend class SemiSpaceObjectIterator;
-};
-
class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
public:
explicit PauseAllocationObserversScope(Heap* heap);
@@ -2609,180 +559,6 @@ class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
};
-// -----------------------------------------------------------------------------
-// Base class for compaction space and off-thread space.
-
-class V8_EXPORT_PRIVATE LocalSpace : public PagedSpace {
- public:
- LocalSpace(Heap* heap, AllocationSpace id, Executability executable,
- LocalSpaceKind local_space_kind)
- : PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
- local_space_kind) {
- DCHECK_NE(local_space_kind, LocalSpaceKind::kNone);
- }
-
- protected:
- // The space is temporary and not included in any snapshots.
- bool snapshotable() override { return false; }
-};
-
-// -----------------------------------------------------------------------------
-// Compaction space that is used temporarily during compaction.
-
-class V8_EXPORT_PRIVATE CompactionSpace : public LocalSpace {
- public:
- CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
- LocalSpaceKind local_space_kind)
- : LocalSpace(heap, id, executable, local_space_kind) {
- DCHECK(is_compaction_space());
- }
-
- protected:
- V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
- int size_in_bytes, AllocationOrigin origin) override;
-};
-
-// A collection of |CompactionSpace|s used by a single compaction task.
-class CompactionSpaceCollection : public Malloced {
- public:
- explicit CompactionSpaceCollection(Heap* heap,
- LocalSpaceKind local_space_kind)
- : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
- local_space_kind),
- code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
- local_space_kind) {}
-
- CompactionSpace* Get(AllocationSpace space) {
- switch (space) {
- case OLD_SPACE:
- return &old_space_;
- case CODE_SPACE:
- return &code_space_;
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- }
-
- private:
- CompactionSpace old_space_;
- CompactionSpace code_space_;
-};
-
-// -----------------------------------------------------------------------------
-// Old generation regular object space.
-
-class OldSpace : public PagedSpace {
- public:
- // Creates an old space object. The constructor does not allocate pages
- // from OS.
- explicit OldSpace(Heap* heap)
- : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
- FreeList::CreateFreeList()) {}
-
- static bool IsAtPageStart(Address addr) {
- return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
- MemoryChunkLayout::ObjectStartOffsetInDataPage();
- }
-
- size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
- type == ExternalBackingStoreType::kArrayBuffer)
- return heap()->OldArrayBufferBytes();
- return external_backing_store_bytes_[type];
- }
-};
-
-// -----------------------------------------------------------------------------
-// Old generation code object space.
-
-class CodeSpace : public PagedSpace {
- public:
- // Creates an old space object. The constructor does not allocate pages
- // from OS.
- explicit CodeSpace(Heap* heap)
- : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
-};
-
-// For contiguous spaces, top should be in the space (or at the end) and limit
-// should be the end of the space.
-#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
- SLOW_DCHECK((space).page_low() <= (info).top() && \
- (info).top() <= (space).page_high() && \
- (info).limit() <= (space).page_high())
-
-// -----------------------------------------------------------------------------
-// Old space for all map objects
-
-class MapSpace : public PagedSpace {
- public:
- // Creates a map space object.
- explicit MapSpace(Heap* heap)
- : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, new FreeListMap()) {}
-
- int RoundSizeDownToObjectAlignment(int size) override {
- if (base::bits::IsPowerOfTwo(Map::kSize)) {
- return RoundDown(size, Map::kSize);
- } else {
- return (size / Map::kSize) * Map::kSize;
- }
- }
-
- void SortFreeList();
-
-#ifdef VERIFY_HEAP
- void VerifyObject(HeapObject obj) override;
-#endif
-};
-
-// -----------------------------------------------------------------------------
-// Off-thread space that is used for folded allocation on a different thread.
-
-class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
- public:
- explicit OffThreadSpace(Heap* heap)
- : LocalSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
- LocalSpaceKind::kOffThreadSpace) {
-#ifdef V8_ENABLE_THIRD_PARTY_HEAP
- // OffThreadSpace doesn't work with third-party heap.
- UNREACHABLE();
-#endif
- }
-
- protected:
- V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
- int size_in_bytes, AllocationOrigin origin) override;
-
- void RefillFreeList() override;
-};
-
-// Iterates over the chunks (pages and large object pages) that can contain
-// pointers to new space or to evacuation candidates.
-class OldGenerationMemoryChunkIterator {
- public:
- inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
-
- // Return nullptr when the iterator is done.
- inline MemoryChunk* next();
-
- private:
- enum State {
- kOldSpaceState,
- kMapState,
- kCodeState,
- kLargeObjectState,
- kCodeLargeObjectState,
- kFinishedState
- };
- Heap* heap_;
- State state_;
- PageIterator old_iterator_;
- PageIterator code_iterator_;
- PageIterator map_iterator_;
- LargePageIterator lo_iterator_;
- LargePageIterator code_lo_iterator_;
-};
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/sweeper.cc b/chromium/v8/src/heap/sweeper.cc
index 155b970ef64..c6019b0c086 100644
--- a/chromium/v8/src/heap/sweeper.cc
+++ b/chromium/v8/src/heap/sweeper.cc
@@ -6,10 +6,12 @@
#include "src/execution/vm-state-inl.h"
#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/code-object-registry.h"
+#include "src/heap/free-list-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
-#include "src/heap/remembered-set-inl.h"
+#include "src/heap/remembered-set.h"
#include "src/objects/objects-inl.h"
namespace v8 {
@@ -245,6 +247,13 @@ void Sweeper::EnsureCompleted() {
sweeping_in_progress_ = false;
}
+void Sweeper::SupportConcurrentSweeping() {
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ const int kMaxPagesToSweepPerSpace = 1;
+ ParallelSweepSpace(space, 0, kMaxPagesToSweepPerSpace);
+ });
+}
+
bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
@@ -257,14 +266,15 @@ V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
if (free_space_mode == ZAP_FREE_SPACE) {
ZapCode(free_start, size);
}
+ ClearFreedMemoryMode clear_memory_mode =
+ (free_list_mode == REBUILD_FREE_LIST)
+ ? ClearFreedMemoryMode::kDontClearFreedMemory
+ : ClearFreedMemoryMode::kClearFreedMemory;
+ page->heap()->CreateFillerObjectAtBackground(
+ free_start, static_cast<int>(size), clear_memory_mode);
if (free_list_mode == REBUILD_FREE_LIST) {
- freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
- free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
-
- } else {
- Heap::CreateFillerObjectAt(ReadOnlyRoots(page->heap()), free_start,
- static_cast<int>(size),
- ClearFreedMemoryMode::kClearFreedMemory);
+ freed_bytes =
+ reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(free_start, size);
}
if (should_reduce_memory_) page->DiscardUnusedMemory(free_start, size);
diff --git a/chromium/v8/src/heap/sweeper.h b/chromium/v8/src/heap/sweeper.h
index 3bc199a92d2..7cd1bafd4fb 100644
--- a/chromium/v8/src/heap/sweeper.h
+++ b/chromium/v8/src/heap/sweeper.h
@@ -47,7 +47,7 @@ class Sweeper {
// after exiting this scope.
class FilterSweepingPagesScope final {
public:
- explicit FilterSweepingPagesScope(
+ FilterSweepingPagesScope(
Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope);
~FilterSweepingPagesScope();
@@ -108,6 +108,9 @@ class Sweeper {
void EnsureCompleted();
bool AreSweeperTasksRunning();
+ // Support concurrent sweepers from main thread
+ void SupportConcurrentSweeping();
+
Page* GetSweptPageSafe(PagedSpace* space);
void AddPageForIterability(Page* page);
diff --git a/chromium/v8/src/ic/accessor-assembler.cc b/chromium/v8/src/ic/accessor-assembler.cc
index fb82a23c32f..18287ecc482 100644
--- a/chromium/v8/src/ic/accessor-assembler.cc
+++ b/chromium/v8/src/ic/accessor-assembler.cc
@@ -1334,12 +1334,13 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
if (do_transitioning_store) {
StoreMap(object, object_map);
} else {
- Label if_mutable(this);
- GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
+ Label store_value(this);
+ GotoIfNot(IsPropertyDetailsConst(details), &store_value);
TNode<Float64T> current_value =
LoadObjectField<Float64T>(object, field_offset);
- BranchIfSameNumberValue(current_value, double_value, &done, slow);
- BIND(&if_mutable);
+ BranchIfSameNumberValue(current_value, double_value, &store_value,
+ slow);
+ BIND(&store_value);
}
StoreObjectFieldNoWriteBarrier(object, field_offset, double_value);
} else {
@@ -1351,11 +1352,12 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
} else {
TNode<HeapNumber> heap_number =
CAST(LoadObjectField(object, field_offset));
- Label if_mutable(this);
- GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
+ Label store_value(this);
+ GotoIfNot(IsPropertyDetailsConst(details), &store_value);
TNode<Float64T> current_value = LoadHeapNumberValue(heap_number);
- BranchIfSameNumberValue(current_value, double_value, &done, slow);
- BIND(&if_mutable);
+ BranchIfSameNumberValue(current_value, double_value, &store_value,
+ slow);
+ BIND(&store_value);
StoreHeapNumberValue(heap_number, double_value);
}
}
@@ -1968,23 +1970,21 @@ TNode<PropertyArray> AccessorAssembler::ExtendPropertiesBackingStore(
// Previous property deletion could have left behind unused backing store
// capacity even for a map that think it doesn't have any unused fields.
// Perform a bounds check to see if we actually have to grow the array.
- GotoIf(UintPtrLessThan(index, ParameterToIntPtr(var_length.value(), mode)),
+ GotoIf(UintPtrLessThan(index, ParameterToIntPtr(var_length.value())),
&done);
TNode<BInt> delta = BIntConstant(JSObject::kFieldsAdded);
- Node* new_capacity = IntPtrOrSmiAdd(var_length.value(), delta, mode);
+ TNode<BInt> new_capacity = IntPtrOrSmiAdd(var_length.value(), delta);
// Grow properties array.
DCHECK(kMaxNumberOfDescriptors + JSObject::kFieldsAdded <
FixedArrayBase::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
// The size of a new properties backing store is guaranteed to be small
// enough that the new backing store will be allocated in new space.
- CSA_ASSERT(this,
- UintPtrOrSmiLessThan(
- new_capacity,
- IntPtrOrSmiConstant(
- kMaxNumberOfDescriptors + JSObject::kFieldsAdded, mode),
- mode));
+ CSA_ASSERT(this, UintPtrOrSmiLessThan(
+ new_capacity,
+ IntPtrOrSmiConstant<BInt>(kMaxNumberOfDescriptors +
+ JSObject::kFieldsAdded)));
TNode<PropertyArray> new_properties =
AllocatePropertyArray(new_capacity, mode);
@@ -2002,7 +2002,7 @@ TNode<PropertyArray> AccessorAssembler::ExtendPropertiesBackingStore(
// TODO(gsathya): Clean up the type conversions by creating smarter
// helpers that do the correct op based on the mode.
TNode<Int32T> new_capacity_int32 =
- TruncateIntPtrToInt32(ParameterToIntPtr(new_capacity, mode));
+ TruncateIntPtrToInt32(ParameterToIntPtr(new_capacity));
TNode<Int32T> new_length_and_hash_int32 =
Word32Or(var_encoded_hash.value(), new_capacity_int32);
StoreObjectField(new_properties, PropertyArray::kLengthAndHashOffset,
diff --git a/chromium/v8/src/ic/binary-op-assembler.cc b/chromium/v8/src/ic/binary-op-assembler.cc
index 25c2181ab2d..8cba7172a2e 100644
--- a/chromium/v8/src/ic/binary-op-assembler.cc
+++ b/chromium/v8/src/ic/binary-op-assembler.cc
@@ -69,6 +69,8 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
// Not overflowed.
{
var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
+ slot_id);
var_result = smi_result;
Goto(&end);
}
@@ -116,6 +118,7 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
BIND(&do_fadd);
{
var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
TNode<Float64T> value =
Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
TNode<HeapNumber> result = AllocateHeapNumberWithValue(value);
@@ -166,6 +169,8 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
&call_with_any_feedback);
var_type_feedback = SmiConstant(BinaryOperationFeedback::kString);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
+ slot_id);
var_result =
CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs);
@@ -194,6 +199,7 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big);
var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
Goto(&end);
BIND(&bigint_too_big);
@@ -219,12 +225,12 @@ TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
BIND(&call_add_stub);
{
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
var_result = CallBuiltin(Builtins::kAdd, context, lhs, rhs);
Goto(&end);
}
BIND(&end);
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
return var_result.value();
}
@@ -279,6 +285,7 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
{
Comment("perform smi operation");
var_result = smiOperation(lhs_smi, CAST(rhs), &var_type_feedback);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
Goto(&end);
}
}
@@ -321,6 +328,7 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&do_float_operation);
{
var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
TNode<Float64T> lhs_value = var_float_lhs.value();
TNode<Float64T> rhs_value = var_float_rhs.value();
TNode<Float64T> value = floatOperation(lhs_value, rhs_value);
@@ -384,6 +392,7 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&if_both_bigint);
{
var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
if (op == Operation::kSubtract) {
Label bigint_too_big(this);
var_result =
@@ -415,6 +424,7 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&call_stub);
{
+ UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
TNode<Object> result;
switch (op) {
case Operation::kSubtract:
@@ -437,7 +447,6 @@ TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
}
BIND(&end);
- UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
return var_result.value();
}
diff --git a/chromium/v8/src/ic/binary-op-assembler.h b/chromium/v8/src/ic/binary-op-assembler.h
index 79b8cd221b9..4bd0d516089 100644
--- a/chromium/v8/src/ic/binary-op-assembler.h
+++ b/chromium/v8/src/ic/binary-op-assembler.h
@@ -50,6 +50,72 @@ class BinaryOpAssembler : public CodeStubAssembler {
TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
bool rhs_known_smi);
+ TNode<Object> Generate_BitwiseOrWithFeedback(
+ TNode<Context> context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool /* unused */) {
+ TVARIABLE(Smi, feedback);
+ TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
+ Operation::kBitwiseOr, left, right, context, &feedback);
+ UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
+ return result;
+ }
+
+ TNode<Object> Generate_BitwiseXorWithFeedback(
+ TNode<Context> context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool /* unused */) {
+ TVARIABLE(Smi, feedback);
+ TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
+ Operation::kBitwiseXor, left, right, context, &feedback);
+ UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
+ return result;
+ }
+
+ TNode<Object> Generate_BitwiseAndWithFeedback(
+ TNode<Context> context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool /* unused */) {
+ TVARIABLE(Smi, feedback);
+ TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
+ Operation::kBitwiseAnd, left, right, context, &feedback);
+ UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
+ return result;
+ }
+
+ TNode<Object> Generate_ShiftLeftWithFeedback(
+ TNode<Context> context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool /* unused */) {
+ TVARIABLE(Smi, feedback);
+ TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
+ Operation::kShiftLeft, left, right, context, &feedback);
+ UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
+ return result;
+ }
+
+ TNode<Object> Generate_ShiftRightWithFeedback(
+ TNode<Context> context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool /* unused */) {
+ TVARIABLE(Smi, feedback);
+ TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
+ Operation::kShiftRight, left, right, context, &feedback);
+ UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
+ return result;
+ }
+
+ TNode<Object> Generate_ShiftRightLogicalWithFeedback(
+ TNode<Context> context, TNode<Object> left, TNode<Object> right,
+ TNode<UintPtrT> slot, TNode<HeapObject> maybe_feedback_vector,
+ bool /* unused */) {
+ TVARIABLE(Smi, feedback);
+ TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
+ Operation::kShiftRightLogical, left, right, context, &feedback);
+ UpdateFeedback(feedback.value(), maybe_feedback_vector, slot);
+ return result;
+ }
+
TNode<Object> Generate_BitwiseBinaryOpWithFeedback(Operation bitwise_op,
TNode<Object> left,
TNode<Object> right,
diff --git a/chromium/v8/src/ic/ic.cc b/chromium/v8/src/ic/ic.cc
index 452275d13f2..9251d772ddc 100644
--- a/chromium/v8/src/ic/ic.cc
+++ b/chromium/v8/src/ic/ic.cc
@@ -8,6 +8,7 @@
#include "src/api/api.h"
#include "src/ast/ast.h"
#include "src/base/bits.h"
+#include "src/base/logging.h"
#include "src/builtins/accessors.h"
#include "src/codegen/code-factory.h"
#include "src/execution/arguments-inl.h"
@@ -947,7 +948,9 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalDH);
if (receiver_is_holder) return smi_handler;
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalFromPrototypeDH);
-
+ } else if (lookup->IsElement(*holder)) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+ return LoadHandler::LoadSlow(isolate());
} else {
DCHECK_EQ(kField, lookup->property_details().location());
FieldIndex field = lookup->GetFieldIndex();
@@ -1769,6 +1772,12 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
return MaybeObjectHandle(StoreHandler::StoreNormal(isolate()));
}
+ // -------------- Elements (for TypedArrays) -------------
+ if (lookup->IsElement(*holder)) {
+ TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+ return MaybeObjectHandle(StoreHandler::StoreSlow(isolate()));
+ }
+
// -------------- Fields --------------
if (lookup->property_details().location() == kField) {
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldDH);
@@ -1856,6 +1865,12 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
if (receiver_map.is_identical_to(previous_receiver_map) &&
new_receiver_map.is_identical_to(receiver_map) &&
old_store_mode == STANDARD_STORE && store_mode != STANDARD_STORE) {
+ if (receiver_map->IsJSArrayMap() &&
+ JSArray::MayHaveReadOnlyLength(*receiver_map)) {
+ set_slow_stub_reason(
+ "can't generalize store mode (potentially read-only length)");
+ return;
+ }
// A "normal" IC that handles stores can switch to a version that can
// grow at the end of the array, handle OOB accesses or copy COW arrays
// and still stay MONOMORPHIC.
@@ -1900,13 +1915,18 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
}
// If the store mode isn't the standard mode, make sure that all polymorphic
- // receivers are either external arrays, or all "normal" arrays. Otherwise,
- // use the megamorphic stub.
+ // receivers are either external arrays, or all "normal" arrays with writable
+ // length. Otherwise, use the megamorphic stub.
if (store_mode != STANDARD_STORE) {
size_t external_arrays = 0;
for (MapAndHandler map_and_handler : target_maps_and_handlers) {
Handle<Map> map = map_and_handler.first;
- if (map->has_typed_array_elements()) {
+ if (map->IsJSArrayMap() && JSArray::MayHaveReadOnlyLength(*map)) {
+ set_slow_stub_reason(
+ "unsupported combination of arrays (potentially read-only length)");
+ return;
+
+ } else if (map->has_typed_array_elements()) {
DCHECK(!IsStoreInArrayLiteralICKind(kind()));
external_arrays++;
}
diff --git a/chromium/v8/src/ic/unary-op-assembler.cc b/chromium/v8/src/ic/unary-op-assembler.cc
new file mode 100644
index 00000000000..6580601a1f4
--- /dev/null
+++ b/chromium/v8/src/ic/unary-op-assembler.cc
@@ -0,0 +1,283 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ic/unary-op-assembler.h"
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class UnaryOpAssemblerImpl final : public CodeStubAssembler {
+ public:
+ explicit UnaryOpAssemblerImpl(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<Object> BitwiseNot(TNode<Context> context, TNode<Object> value,
+ TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector) {
+ // TODO(jgruber): Make this implementation more consistent with other unary
+ // ops (i.e. have them all use UnaryOpWithFeedback or some other common
+ // mechanism).
+ TVARIABLE(Word32T, var_word32);
+ TVARIABLE(Smi, var_feedback);
+ TVARIABLE(BigInt, var_bigint);
+ TVARIABLE(Object, var_result);
+ Label if_number(this), if_bigint(this, Label::kDeferred), out(this);
+ TaggedToWord32OrBigIntWithFeedback(context, value, &if_number, &var_word32,
+ &if_bigint, &var_bigint, &var_feedback);
+
+ // Number case.
+ BIND(&if_number);
+ var_result =
+ ChangeInt32ToTagged(Signed(Word32BitwiseNot(var_word32.value())));
+ TNode<Smi> result_type = SelectSmiConstant(
+ TaggedIsSmi(var_result.value()), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ UpdateFeedback(SmiOr(result_type, var_feedback.value()),
+ maybe_feedback_vector, slot);
+ Goto(&out);
+
+ // BigInt case.
+ BIND(&if_bigint);
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kBigInt),
+ maybe_feedback_vector, slot);
+ var_result =
+ CallRuntime(Runtime::kBigIntUnaryOp, context, var_bigint.value(),
+ SmiConstant(Operation::kBitwiseNot));
+ Goto(&out);
+
+ BIND(&out);
+ return var_result.value();
+ }
+
+ TNode<Object> Decrement(TNode<Context> context, TNode<Object> value,
+ TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector) {
+ return IncrementOrDecrement<Operation::kDecrement>(context, value, slot,
+ maybe_feedback_vector);
+ }
+
+ TNode<Object> Increment(TNode<Context> context, TNode<Object> value,
+ TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector) {
+ return IncrementOrDecrement<Operation::kIncrement>(context, value, slot,
+ maybe_feedback_vector);
+ }
+
+ TNode<Object> Negate(TNode<Context> context, TNode<Object> value,
+ TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector) {
+ SmiOperation smi_op = [=](TNode<Smi> smi_value,
+ TVariable<Smi>* var_feedback, Label* do_float_op,
+ TVariable<Float64T>* var_float) {
+ TVARIABLE(Number, var_result);
+ Label if_zero(this), if_min_smi(this), end(this);
+ // Return -0 if operand is 0.
+ GotoIf(SmiEqual(smi_value, SmiConstant(0)), &if_zero);
+
+ // Special-case the minimum Smi to avoid overflow.
+ GotoIf(SmiEqual(smi_value, SmiConstant(Smi::kMinValue)), &if_min_smi);
+
+ // Else simply subtract operand from 0.
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
+ var_result = SmiSub(SmiConstant(0), smi_value);
+ Goto(&end);
+
+ BIND(&if_zero);
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber);
+ var_result = MinusZeroConstant();
+ Goto(&end);
+
+ BIND(&if_min_smi);
+ *var_float = SmiToFloat64(smi_value);
+ Goto(do_float_op);
+
+ BIND(&end);
+ return var_result.value();
+ };
+ FloatOperation float_op = [=](TNode<Float64T> float_value) {
+ return Float64Neg(float_value);
+ };
+ BigIntOperation bigint_op = [=](TNode<Context> context,
+ TNode<HeapObject> bigint_value) {
+ return CAST(CallRuntime(Runtime::kBigIntUnaryOp, context, bigint_value,
+ SmiConstant(Operation::kNegate)));
+ };
+ return UnaryOpWithFeedback(context, value, slot, maybe_feedback_vector,
+ smi_op, float_op, bigint_op);
+ }
+
+ private:
+ using SmiOperation = std::function<TNode<Number>(
+ TNode<Smi> /* smi_value */, TVariable<Smi>* /* var_feedback */,
+ Label* /* do_float_op */, TVariable<Float64T>* /* var_float */)>;
+ using FloatOperation =
+ std::function<TNode<Float64T>(TNode<Float64T> /* float_value */)>;
+ using BigIntOperation = std::function<TNode<HeapObject>(
+ TNode<Context> /* context */, TNode<HeapObject> /* bigint_value */)>;
+
+ TNode<Object> UnaryOpWithFeedback(TNode<Context> context, TNode<Object> value,
+ TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector,
+ const SmiOperation& smi_op,
+ const FloatOperation& float_op,
+ const BigIntOperation& bigint_op) {
+ TVARIABLE(Object, var_value, value);
+ TVARIABLE(Object, var_result);
+ TVARIABLE(Float64T, var_float_value);
+ TVARIABLE(Smi, var_feedback, SmiConstant(BinaryOperationFeedback::kNone));
+ Label start(this, {&var_value, &var_feedback}), end(this);
+ Label do_float_op(this, &var_float_value);
+ Goto(&start);
+ // We might have to try again after ToNumeric conversion.
+ BIND(&start);
+ {
+ Label if_smi(this), if_heapnumber(this), if_oddball(this);
+ Label if_bigint(this, Label::kDeferred);
+ Label if_other(this, Label::kDeferred);
+ TNode<Object> value = var_value.value();
+ GotoIf(TaggedIsSmi(value), &if_smi);
+
+ TNode<HeapObject> value_heap_object = CAST(value);
+ TNode<Map> map = LoadMap(value_heap_object);
+ GotoIf(IsHeapNumberMap(map), &if_heapnumber);
+ TNode<Uint16T> instance_type = LoadMapInstanceType(map);
+ GotoIf(IsBigIntInstanceType(instance_type), &if_bigint);
+ Branch(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &if_oddball,
+ &if_other);
+
+ BIND(&if_smi);
+ {
+ var_result =
+ smi_op(CAST(value), &var_feedback, &do_float_op, &var_float_value);
+ Goto(&end);
+ }
+
+ BIND(&if_heapnumber);
+ {
+ var_float_value = LoadHeapNumberValue(value_heap_object);
+ Goto(&do_float_op);
+ }
+
+ BIND(&if_bigint);
+ {
+ var_result = bigint_op(context, value_heap_object);
+ CombineFeedback(&var_feedback, BinaryOperationFeedback::kBigInt);
+ Goto(&end);
+ }
+
+ BIND(&if_oddball);
+ {
+ // We do not require an Or with earlier feedback here because once we
+ // convert the value to a number, we cannot reach this path. We can
+ // only reach this path on the first pass when the feedback is kNone.
+ CSA_ASSERT(this, SmiEqual(var_feedback.value(),
+ SmiConstant(BinaryOperationFeedback::kNone)));
+ OverwriteFeedback(&var_feedback,
+ BinaryOperationFeedback::kNumberOrOddball);
+ var_value =
+ LoadObjectField(value_heap_object, Oddball::kToNumberOffset);
+ Goto(&start);
+ }
+
+ BIND(&if_other);
+ {
+ // We do not require an Or with earlier feedback here because once we
+ // convert the value to a number, we cannot reach this path. We can
+ // only reach this path on the first pass when the feedback is kNone.
+ CSA_ASSERT(this, SmiEqual(var_feedback.value(),
+ SmiConstant(BinaryOperationFeedback::kNone)));
+ OverwriteFeedback(&var_feedback, BinaryOperationFeedback::kAny);
+ var_value = CallBuiltin(Builtins::kNonNumberToNumeric, context,
+ value_heap_object);
+ Goto(&start);
+ }
+ }
+
+ BIND(&do_float_op);
+ {
+ CombineFeedback(&var_feedback, BinaryOperationFeedback::kNumber);
+ var_result =
+ AllocateHeapNumberWithValue(float_op(var_float_value.value()));
+ Goto(&end);
+ }
+
+ BIND(&end);
+ UpdateFeedback(var_feedback.value(), maybe_feedback_vector, slot);
+ return var_result.value();
+ }
+
+ template <Operation kOperation>
+ TNode<Object> IncrementOrDecrement(TNode<Context> context,
+ TNode<Object> value, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector) {
+ STATIC_ASSERT(kOperation == Operation::kIncrement ||
+ kOperation == Operation::kDecrement);
+ static constexpr int kAddValue =
+ (kOperation == Operation::kIncrement) ? 1 : -1;
+
+ SmiOperation smi_op = [=](TNode<Smi> smi_value,
+ TVariable<Smi>* var_feedback, Label* do_float_op,
+ TVariable<Float64T>* var_float) {
+ Label if_overflow(this), out(this);
+ TNode<Smi> result =
+ TrySmiAdd(smi_value, SmiConstant(kAddValue), &if_overflow);
+ CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
+ Goto(&out);
+
+ BIND(&if_overflow);
+ *var_float = SmiToFloat64(smi_value);
+ Goto(do_float_op);
+
+ BIND(&out);
+ return result;
+ };
+ FloatOperation float_op = [=](TNode<Float64T> float_value) {
+ return Float64Add(float_value, Float64Constant(kAddValue));
+ };
+ BigIntOperation bigint_op = [=](TNode<Context> context,
+ TNode<HeapObject> bigint_value) {
+ return CAST(CallRuntime(Runtime::kBigIntUnaryOp, context, bigint_value,
+ SmiConstant(kOperation)));
+ };
+ return UnaryOpWithFeedback(context, value, slot, maybe_feedback_vector,
+ smi_op, float_op, bigint_op);
+ }
+};
+
+} // namespace
+
+TNode<Object> UnaryOpAssembler::Generate_BitwiseNotWithFeedback(
+ TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector) {
+ UnaryOpAssemblerImpl a(state_);
+ return a.BitwiseNot(context, value, slot, maybe_feedback_vector);
+}
+
+TNode<Object> UnaryOpAssembler::Generate_DecrementWithFeedback(
+ TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector) {
+ UnaryOpAssemblerImpl a(state_);
+ return a.Decrement(context, value, slot, maybe_feedback_vector);
+}
+
+TNode<Object> UnaryOpAssembler::Generate_IncrementWithFeedback(
+ TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector) {
+ UnaryOpAssemblerImpl a(state_);
+ return a.Increment(context, value, slot, maybe_feedback_vector);
+}
+
+TNode<Object> UnaryOpAssembler::Generate_NegateWithFeedback(
+ TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector) {
+ UnaryOpAssemblerImpl a(state_);
+ return a.Negate(context, value, slot, maybe_feedback_vector);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/ic/unary-op-assembler.h b/chromium/v8/src/ic/unary-op-assembler.h
new file mode 100644
index 00000000000..b22322ddf2a
--- /dev/null
+++ b/chromium/v8/src/ic/unary-op-assembler.h
@@ -0,0 +1,45 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_UNARY_OP_ASSEMBLER_H_
+#define V8_IC_UNARY_OP_ASSEMBLER_H_
+
+#include "src/codegen/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class CodeAssemblerState;
+}
+
+class UnaryOpAssembler final {
+ public:
+ explicit UnaryOpAssembler(compiler::CodeAssemblerState* state)
+ : state_(state) {}
+
+ TNode<Object> Generate_BitwiseNotWithFeedback(
+ TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector);
+
+ TNode<Object> Generate_DecrementWithFeedback(
+ TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector);
+
+ TNode<Object> Generate_IncrementWithFeedback(
+ TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector);
+
+ TNode<Object> Generate_NegateWithFeedback(
+ TNode<Context> context, TNode<Object> value, TNode<UintPtrT> slot,
+ TNode<HeapObject> maybe_feedback_vector);
+
+ private:
+ compiler::CodeAssemblerState* const state_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_IC_UNARY_OP_ASSEMBLER_H_
diff --git a/chromium/v8/src/init/bootstrapper.cc b/chromium/v8/src/init/bootstrapper.cc
index f4049e328ec..2f71c258e88 100644
--- a/chromium/v8/src/init/bootstrapper.cc
+++ b/chromium/v8/src/init/bootstrapper.cc
@@ -34,7 +34,6 @@
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
#endif // V8_INTL_SUPPORT
-#include "src/objects/js-aggregate-error.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#ifdef V8_INTL_SUPPORT
@@ -919,13 +918,14 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
Handle<JSObject> async_from_sync_iterator_prototype = factory()->NewJSObject(
isolate()->object_function(), AllocationType::kOld);
SimpleInstallFunction(isolate(), async_from_sync_iterator_prototype, "next",
- Builtins::kAsyncFromSyncIteratorPrototypeNext, 1, true);
+ Builtins::kAsyncFromSyncIteratorPrototypeNext, 1,
+ false);
SimpleInstallFunction(isolate(), async_from_sync_iterator_prototype, "return",
Builtins::kAsyncFromSyncIteratorPrototypeReturn, 1,
- true);
+ false);
SimpleInstallFunction(isolate(), async_from_sync_iterator_prototype, "throw",
Builtins::kAsyncFromSyncIteratorPrototypeThrow, 1,
- true);
+ false);
InstallToStringTag(isolate(), async_from_sync_iterator_prototype,
"Async-from-Sync Iterator");
@@ -1319,18 +1319,16 @@ static void InstallError(
Isolate* isolate, Handle<JSObject> global, Handle<String> name,
int context_index,
Builtins::Name error_constructor = Builtins::kErrorConstructor,
- InstanceType error_type = JS_ERROR_TYPE, int error_function_length = 1,
- int header_size = JSObject::kHeaderSize) {
+ int error_function_length = 1, int in_object_properties = 2) {
Factory* factory = isolate->factory();
// Most Error objects consist of a message and a stack trace.
// Reserve two in-object properties for these.
- const int kInObjectPropertiesCount = 2;
const int kErrorObjectSize =
- header_size + kInObjectPropertiesCount * kTaggedSize;
+ JSObject::kHeaderSize + in_object_properties * kTaggedSize;
Handle<JSFunction> error_fun = InstallFunction(
- isolate, global, name, error_type, kErrorObjectSize,
- kInObjectPropertiesCount, factory->the_hole_value(), error_constructor);
+ isolate, global, name, JS_ERROR_TYPE, kErrorObjectSize,
+ in_object_properties, factory->the_hole_value(), error_constructor);
error_fun->shared().DontAdaptArguments();
error_fun->shared().set_length(error_function_length);
@@ -2778,7 +2776,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> prototype(
JSObject::cast(date_time_format_constructor->prototype()), isolate_);
- InstallToStringTag(isolate_, prototype, factory->Object_string());
+ InstallToStringTag(isolate_, prototype, "Intl.DateTimeFormat");
SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
Builtins::kDateTimeFormatPrototypeResolvedOptions,
@@ -2817,7 +2815,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> prototype(
JSObject::cast(number_format_constructor->prototype()), isolate_);
- InstallToStringTag(isolate_, prototype, factory->Object_string());
+ InstallToStringTag(isolate_, prototype, "Intl.NumberFormat");
SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
Builtins::kNumberFormatPrototypeResolvedOptions, 0,
@@ -2845,7 +2843,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> prototype(
JSObject::cast(collator_constructor->prototype()), isolate_);
- InstallToStringTag(isolate_, prototype, factory->Object_string());
+ InstallToStringTag(isolate_, prototype, "Intl.Collator");
SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
Builtins::kCollatorPrototypeResolvedOptions, 0,
@@ -2908,7 +2906,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> prototype(
JSObject::cast(plural_rules_constructor->prototype()), isolate_);
- InstallToStringTag(isolate_, prototype, factory->Object_string());
+ InstallToStringTag(isolate_, prototype, "Intl.PluralRules");
SimpleInstallFunction(isolate_, prototype, "resolvedOptions",
Builtins::kPluralRulesPrototypeResolvedOptions, 0,
@@ -4234,8 +4232,7 @@ void Genesis::InitializeGlobal_harmony_promise_any() {
InstallError(isolate_, global, factory->AggregateError_string(),
Context::AGGREGATE_ERROR_FUNCTION_INDEX,
- Builtins::kAggregateErrorConstructor, JS_AGGREGATE_ERROR_TYPE, 2,
- JSAggregateError::kHeaderSize);
+ Builtins::kAggregateErrorConstructor, 2, 2);
// Setup %AggregateErrorPrototype%.
Handle<JSFunction> aggregate_error_function(
@@ -4244,26 +4241,13 @@ void Genesis::InitializeGlobal_harmony_promise_any() {
JSObject::cast(aggregate_error_function->instance_prototype()),
isolate());
- Handle<String> getter_name =
- Name::ToFunctionName(isolate_, factory->errors_string(),
- isolate_->factory()->get_string())
- .ToHandleChecked();
-
- Handle<JSFunction> getter = SimpleCreateFunction(
- isolate(), getter_name, Builtins::kAggregateErrorPrototypeErrorsGetter, 0,
- true);
-
- JSObject::DefineAccessor(prototype, factory->errors_string(), getter,
- factory->undefined_value(), DONT_ENUM);
-
Handle<JSFunction> promise_fun(
JSFunction::cast(
isolate()->native_context()->get(Context::PROMISE_FUNCTION_INDEX)),
isolate());
- InstallFunctionWithBuiltinId(isolate_, promise_fun, "any",
- Builtins::kPromiseAny, 1, true);
-
- DCHECK(promise_fun->HasFastProperties());
+ Handle<JSFunction> promise_any = InstallFunctionWithBuiltinId(
+ isolate_, promise_fun, "any", Builtins::kPromiseAny, 1, true);
+ native_context()->set_promise_any(*promise_any);
}
void Genesis::InitializeGlobal_harmony_promise_all_settled() {
diff --git a/chromium/v8/src/init/heap-symbols.h b/chromium/v8/src/init/heap-symbols.h
index 3ac1420f75e..6048f5e2e96 100644
--- a/chromium/v8/src/init/heap-symbols.h
+++ b/chromium/v8/src/init/heap-symbols.h
@@ -243,7 +243,7 @@
V(_, NFD_string, "NFD") \
V(_, NFKC_string, "NFKC") \
V(_, NFKD_string, "NFKD") \
- V(_, not_equal, "not-equal") \
+ V(_, not_equal_string, "not-equal") \
V(_, null_string, "null") \
V(_, null_to_string, "[object Null]") \
V(_, Number_string, "Number") \
@@ -253,7 +253,7 @@
V(_, object_string, "object") \
V(_, object_to_string, "[object Object]") \
V(_, of_string, "of") \
- V(_, ok, "ok") \
+ V(_, ok_string, "ok") \
V(_, one_string, "1") \
V(_, other_string, "other") \
V(_, ownKeys_string, "ownKeys") \
@@ -305,7 +305,7 @@
V(_, this_function_string, ".this_function") \
V(_, this_string, "this") \
V(_, throw_string, "throw") \
- V(_, timed_out, "timed-out") \
+ V(_, timed_out_string, "timed-out") \
V(_, toJSON_string, "toJSON") \
V(_, toString_string, "toString") \
V(_, true_string, "true") \
diff --git a/chromium/v8/src/init/v8.cc b/chromium/v8/src/init/v8.cc
index 52676447372..2b2194ca735 100644
--- a/chromium/v8/src/init/v8.cc
+++ b/chromium/v8/src/init/v8.cc
@@ -98,9 +98,7 @@ void V8::InitializeOncePerProcessImpl() {
// The --jitless and --interpreted-frames-native-stack flags are incompatible
// since the latter requires code generation while the former prohibits code
// generation.
- CHECK_WITH_MSG(!FLAG_interpreted_frames_native_stack || !FLAG_jitless,
- "The --jitless and --interpreted-frames-native-stack flags "
- "are incompatible.");
+ CHECK(!FLAG_interpreted_frames_native_stack || !FLAG_jitless);
base::OS::Initialize(FLAG_hard_abort, FLAG_gc_fake_mmap);
diff --git a/chromium/v8/src/inspector/v8-console.cc b/chromium/v8/src/inspector/v8-console.cc
index 4fd33e346ae..6dda6ef90c3 100644
--- a/chromium/v8/src/inspector/v8-console.cc
+++ b/chromium/v8/src/inspector/v8-console.cc
@@ -411,7 +411,7 @@ static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
helper.consoleMessageStorage()->timeEnd(helper.contextId(), title);
}
String16 message =
- protocolTitle + ": " + String16::fromDouble(elapsed) + "ms";
+ protocolTitle + ": " + String16::fromDouble(elapsed) + " ms";
if (timeLog)
helper.reportCallAndReplaceFirstArgument(ConsoleAPIType::kLog, message);
else
diff --git a/chromium/v8/src/inspector/v8-debugger-agent-impl.cc b/chromium/v8/src/inspector/v8-debugger-agent-impl.cc
index afefd4e14c7..840f9c66d82 100644
--- a/chromium/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/chromium/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -1581,8 +1581,6 @@ void V8DebuggerAgentImpl::didParseSource(
return;
}
- // TODO(herhut, dgozman): Report correct length for Wasm if needed for
- // coverage. Or do not send the length at all and change coverage instead.
if (scriptRef->isSourceLoadedLazily()) {
m_frontend.scriptParsed(
scriptId, scriptURL, 0, 0, 0, 0, contextId, scriptRef->hash(),
@@ -1842,7 +1840,6 @@ void V8DebuggerAgentImpl::reset() {
m_scripts.clear();
m_cachedScriptIds.clear();
m_cachedScriptSize = 0;
- m_breakpointIdToDebuggerBreakpointIds.clear();
}
void V8DebuggerAgentImpl::ScriptCollected(const V8DebuggerScript* script) {
diff --git a/chromium/v8/src/inspector/v8-debugger-script.cc b/chromium/v8/src/inspector/v8-debugger-script.cc
index 7905341481b..6e54656d40b 100644
--- a/chromium/v8/src/inspector/v8-debugger-script.cc
+++ b/chromium/v8/src/inspector/v8-debugger-script.cc
@@ -148,9 +148,14 @@ class ActualScript : public V8DebuggerScript {
}
bool isSourceLoadedLazily() const override { return false; }
int length() const override {
+ auto script = this->script();
+ if (script->IsWasm()) {
+ return static_cast<int>(
+ v8::debug::WasmScript::Cast(*script)->Bytecode().size());
+ }
v8::HandleScope scope(m_isolate);
v8::Local<v8::String> v8Source;
- return script()->Source().ToLocal(&v8Source) ? v8Source->Length() : 0;
+ return script->Source().ToLocal(&v8Source) ? v8Source->Length() : 0;
}
const String16& sourceMappingURL() const override {
@@ -290,6 +295,12 @@ class ActualScript : public V8DebuggerScript {
} else {
m_endColumn = source_length + m_startColumn;
}
+ } else if (script->IsWasm()) {
+ DCHECK_EQ(0, m_startLine);
+ DCHECK_EQ(0, m_startColumn);
+ m_endLine = 0;
+ m_endColumn = static_cast<int>(
+ v8::debug::WasmScript::Cast(*script)->Bytecode().size());
} else {
m_endLine = m_startLine;
m_endColumn = m_startColumn;
diff --git a/chromium/v8/src/inspector/v8-inspector-impl.cc b/chromium/v8/src/inspector/v8-inspector-impl.cc
index 18c592ef118..7bb8029a3d3 100644
--- a/chromium/v8/src/inspector/v8-inspector-impl.cc
+++ b/chromium/v8/src/inspector/v8-inspector-impl.cc
@@ -131,8 +131,7 @@ void V8InspectorImpl::unmuteExceptions(int contextGroupId) {
V8ConsoleMessageStorage* V8InspectorImpl::ensureConsoleMessageStorage(
int contextGroupId) {
- ConsoleStorageMap::iterator storageIt =
- m_consoleStorageMap.find(contextGroupId);
+ auto storageIt = m_consoleStorageMap.find(contextGroupId);
if (storageIt == m_consoleStorageMap.end())
storageIt = m_consoleStorageMap
.insert(std::make_pair(
@@ -144,8 +143,7 @@ V8ConsoleMessageStorage* V8InspectorImpl::ensureConsoleMessageStorage(
}
bool V8InspectorImpl::hasConsoleMessageStorage(int contextGroupId) {
- ConsoleStorageMap::iterator storageIt =
- m_consoleStorageMap.find(contextGroupId);
+ auto storageIt = m_consoleStorageMap.find(contextGroupId);
return storageIt != m_consoleStorageMap.end();
}
@@ -174,10 +172,10 @@ InspectedContext* V8InspectorImpl::getContext(int groupId,
int contextId) const {
if (!groupId || !contextId) return nullptr;
- ContextsByGroupMap::const_iterator contextGroupIt = m_contexts.find(groupId);
+ auto contextGroupIt = m_contexts.find(groupId);
if (contextGroupIt == m_contexts.end()) return nullptr;
- ContextByIdMap::iterator contextIt = contextGroupIt->second->find(contextId);
+ auto contextIt = contextGroupIt->second->find(contextId);
if (contextIt == contextGroupIt->second->end()) return nullptr;
return contextIt->second.get();
@@ -194,10 +192,10 @@ v8::MaybeLocal<v8::Context> V8InspectorImpl::contextById(int contextId) {
void V8InspectorImpl::contextCreated(const V8ContextInfo& info) {
int contextId = ++m_lastContextId;
- InspectedContext* context = new InspectedContext(this, info, contextId);
+ auto* context = new InspectedContext(this, info, contextId);
m_contextIdToGroupIdMap[contextId] = info.contextGroupId;
- ContextsByGroupMap::iterator contextIt = m_contexts.find(info.contextGroupId);
+ auto contextIt = m_contexts.find(info.contextGroupId);
if (contextIt == m_contexts.end())
contextIt = m_contexts
.insert(std::make_pair(
@@ -224,7 +222,7 @@ void V8InspectorImpl::contextDestroyed(v8::Local<v8::Context> context) {
void V8InspectorImpl::contextCollected(int groupId, int contextId) {
m_contextIdToGroupIdMap.erase(contextId);
- ConsoleStorageMap::iterator storageIt = m_consoleStorageMap.find(groupId);
+ auto storageIt = m_consoleStorageMap.find(groupId);
if (storageIt != m_consoleStorageMap.end())
storageIt->second->contextDestroyed(contextId);
@@ -330,7 +328,7 @@ void V8InspectorImpl::allAsyncTasksCanceled() {
V8Inspector::Counters::Counters(v8::Isolate* isolate) : m_isolate(isolate) {
CHECK(m_isolate);
- V8InspectorImpl* inspector =
+ auto* inspector =
static_cast<V8InspectorImpl*>(v8::debug::GetInspector(m_isolate));
CHECK(inspector);
CHECK(!inspector->m_counters);
@@ -339,7 +337,7 @@ V8Inspector::Counters::Counters(v8::Isolate* isolate) : m_isolate(isolate) {
}
V8Inspector::Counters::~Counters() {
- V8InspectorImpl* inspector =
+ auto* inspector =
static_cast<V8InspectorImpl*>(v8::debug::GetInspector(m_isolate));
CHECK(inspector);
inspector->m_counters = nullptr;
diff --git a/chromium/v8/src/inspector/v8-runtime-agent-impl.cc b/chromium/v8/src/inspector/v8-runtime-agent-impl.cc
index 5a2a49154c1..ac505be5ccc 100644
--- a/chromium/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/chromium/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -237,6 +237,7 @@ void V8RuntimeAgentImpl::evaluate(
Maybe<bool> generatePreview, Maybe<bool> userGesture,
Maybe<bool> maybeAwaitPromise, Maybe<bool> throwOnSideEffect,
Maybe<double> timeout, Maybe<bool> disableBreaks, Maybe<bool> maybeReplMode,
+ Maybe<bool> allowUnsafeEvalBlockedByCSP,
std::unique_ptr<EvaluateCallback> callback) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
"EvaluateScript");
@@ -262,8 +263,10 @@ void V8RuntimeAgentImpl::evaluate(
const bool replMode = maybeReplMode.fromMaybe(false);
- // Temporarily enable allow evals for inspector.
- scope.allowCodeGenerationFromStrings();
+ if (allowUnsafeEvalBlockedByCSP.fromMaybe(true)) {
+ // Temporarily enable allow evals for inspector.
+ scope.allowCodeGenerationFromStrings();
+ }
v8::MaybeLocal<v8::Value> maybeResultValue;
{
V8InspectorImpl::EvaluateScope evaluateScope(scope);
diff --git a/chromium/v8/src/inspector/v8-runtime-agent-impl.h b/chromium/v8/src/inspector/v8-runtime-agent-impl.h
index c99cfcef195..d0491eac5ab 100644
--- a/chromium/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/chromium/v8/src/inspector/v8-runtime-agent-impl.h
@@ -68,7 +68,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Maybe<bool> generatePreview, Maybe<bool> userGesture,
Maybe<bool> awaitPromise, Maybe<bool> throwOnSideEffect,
Maybe<double> timeout, Maybe<bool> disableBreaks,
- Maybe<bool> replMode,
+ Maybe<bool> replMode, Maybe<bool> allowUnsafeEvalBlockedByCSP,
std::unique_ptr<EvaluateCallback>) override;
void awaitPromise(const String16& promiseObjectId, Maybe<bool> returnByValue,
Maybe<bool> generatePreview,
diff --git a/chromium/v8/src/inspector/value-mirror.cc b/chromium/v8/src/inspector/value-mirror.cc
index 78e7417b8fb..2d830026114 100644
--- a/chromium/v8/src/inspector/value-mirror.cc
+++ b/chromium/v8/src/inspector/value-mirror.cc
@@ -39,6 +39,26 @@ V8InternalValueType v8InternalValueTypeFrom(v8::Local<v8::Context> context,
return inspectedContext->getInternalType(value.As<v8::Object>());
}
+template <typename ResultType>
+ResultType unpackWasmValue(v8::Local<v8::Context> context,
+ v8::Local<v8::Array> array) {
+ ResultType result;
+ constexpr int kSize = sizeof(result);
+ uint8_t buffer[kSize];
+ for (int i = 0; i < kSize; i++) {
+ v8::Local<v8::Int32> i32 =
+ array->Get(context, i).ToLocalChecked().As<v8::Int32>();
+ buffer[i] = static_cast<uint8_t>(i32->Value());
+ }
+ memcpy(&result, buffer, kSize);
+ return result;
+}
+
+// Partial list of Wasm's ValueType, copied here to avoid including internal
+// header. Using an unscoped enumeration here to allow implicit conversions from
+// int. Keep in sync with ValueType::Kind in wasm/value-type.h.
+enum WasmValueType { kStmt, kI32, kI64, kF32, kF64, kS128, kExternRef };
+
Response toProtocolValue(v8::Local<v8::Context> context,
v8::Local<v8::Value> value, int maxDepth,
std::unique_ptr<protocol::Value>* result) {
@@ -128,6 +148,49 @@ Response toProtocolValue(v8::Local<v8::Context> context,
*result = std::move(jsonObject);
return Response::Success();
}
+
+ if (v8::debug::WasmValue::IsWasmValue(value)) {
+ auto wasmValue = value.As<v8::debug::WasmValue>();
+
+ // Convert serializable Wasm values (i32, f32, f64) into protocol values.
+ // Not all i64 values are representable by double, so always represent it as
+ // a String here.
+ switch (wasmValue->value_type()) {
+ case kI32: {
+ *result = protocol::FundamentalValue::create(
+ unpackWasmValue<int32_t>(context, wasmValue->bytes()));
+ break;
+ }
+ case kI64: {
+ *result = protocol::StringValue::create(String16::fromInteger64(
+ unpackWasmValue<int64_t>(context, wasmValue->bytes())));
+ break;
+ }
+ case kF32: {
+ *result = protocol::FundamentalValue::create(
+ unpackWasmValue<float>(context, wasmValue->bytes()));
+ break;
+ }
+ case kF64: {
+ *result = protocol::FundamentalValue::create(
+ unpackWasmValue<double>(context, wasmValue->bytes()));
+ break;
+ }
+ case kExternRef: {
+ std::unique_ptr<protocol::Value> externrefValue;
+ Response response = toProtocolValue(context, wasmValue->ref(), maxDepth,
+ &externrefValue);
+ if (!response.IsSuccess()) return response;
+ *result = std::move(externrefValue);
+ break;
+ }
+ default: {
+ UNIMPLEMENTED();
+ }
+ }
+ return Response::Success();
+ }
+
return Response::ServerError("Object couldn't be returned by value");
}
@@ -398,6 +461,112 @@ class PrimitiveValueMirror final : public ValueMirror {
String16 m_subtype;
};
+class WasmValueMirror final : public ValueMirror {
+ public:
+ explicit WasmValueMirror(v8::Local<v8::debug::WasmValue> value)
+ : m_value(value) {}
+
+ v8::Local<v8::Value> v8Value() const override { return m_value; }
+
+ Response buildRemoteObject(
+ v8::Local<v8::Context> context, WrapMode mode,
+ std::unique_ptr<RemoteObject>* result) const override {
+ bool serializable;
+ String16 descriptionValue = description(context, &serializable);
+ *result = RemoteObject::create()
+ .setType(RemoteObject::TypeEnum::Wasm)
+ .setSubtype(subtype())
+ .setDescription(descriptionValue)
+ .build();
+ if (serializable) {
+ std::unique_ptr<protocol::Value> protocolValue;
+ toProtocolValue(context, m_value, &protocolValue);
+ (*result)->setValue(std::move(protocolValue));
+ } else {
+ (*result)->setUnserializableValue(descriptionValue);
+ }
+ return Response::Success();
+ }
+
+ void buildPropertyPreview(
+ v8::Local<v8::Context> context, const String16& name,
+ std::unique_ptr<PropertyPreview>* result) const override {
+ bool serializable;
+ *result = PropertyPreview::create()
+ .setName(name)
+ .setType(RemoteObject::TypeEnum::Wasm)
+ .setSubtype(subtype())
+ .setValue(description(context, &serializable))
+ .build();
+ }
+
+ void buildEntryPreview(
+ v8::Local<v8::Context> context, int* nameLimit, int* indexLimit,
+ std::unique_ptr<ObjectPreview>* preview) const override {
+ bool serializable;
+ *preview =
+ ObjectPreview::create()
+ .setType(RemoteObject::TypeEnum::Wasm)
+ .setSubtype(subtype())
+ .setDescription(description(context, &serializable))
+ .setOverflow(false)
+ .setProperties(std::make_unique<protocol::Array<PropertyPreview>>())
+ .build();
+ }
+
+ private:
+ String16 subtype() const {
+ switch (m_value->value_type()) {
+ case kI32:
+ return RemoteObject::SubtypeEnum::I32;
+ case kI64:
+ return RemoteObject::SubtypeEnum::I64;
+ case kF32:
+ return RemoteObject::SubtypeEnum::F32;
+ case kF64:
+ return RemoteObject::SubtypeEnum::F64;
+ case kExternRef:
+ return RemoteObject::SubtypeEnum::Externref;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ String16 description(v8::Local<v8::Context> context,
+ bool* serializable) const {
+ *serializable = true;
+ switch (m_value->value_type()) {
+ case kI32: {
+ return String16::fromInteger(
+ unpackWasmValue<int32_t>(context, m_value->bytes()));
+ }
+ case kI64: {
+ *serializable = false;
+ return String16::fromInteger64(
+ unpackWasmValue<int64_t>(context, m_value->bytes()));
+ }
+ case kF32: {
+ return String16::fromDouble(
+ unpackWasmValue<float>(context, m_value->bytes()));
+ }
+ case kF64: {
+ return String16::fromDouble(
+ unpackWasmValue<double>(context, m_value->bytes()));
+ }
+ case kExternRef: {
+ return descriptionForObject(context->GetIsolate(),
+ m_value->ref().As<v8::Object>());
+ }
+ default: {
+ *serializable = false;
+ return String16("Unknown");
+ }
+ }
+ }
+
+ v8::Local<v8::debug::WasmValue> m_value;
+};
+
class NumberMirror final : public ValueMirror {
public:
explicit NumberMirror(v8::Local<v8::Number> value) : m_value(value) {}
@@ -727,11 +896,11 @@ struct EntryMirror {
class PreviewPropertyAccumulator : public ValueMirror::PropertyAccumulator {
public:
- PreviewPropertyAccumulator(const std::vector<String16>& blacklist,
+ PreviewPropertyAccumulator(const std::vector<String16>& blocklist,
int skipIndex, int* nameLimit, int* indexLimit,
bool* overflow,
std::vector<PropertyMirror>* mirrors)
- : m_blacklist(blacklist),
+ : m_blocklist(blocklist),
m_skipIndex(skipIndex),
m_nameLimit(nameLimit),
m_indexLimit(indexLimit),
@@ -745,8 +914,8 @@ class PreviewPropertyAccumulator : public ValueMirror::PropertyAccumulator {
return true;
}
if (!mirror.isOwn) return true;
- if (std::find(m_blacklist.begin(), m_blacklist.end(), mirror.name) !=
- m_blacklist.end()) {
+ if (std::find(m_blocklist.begin(), m_blocklist.end(), mirror.name) !=
+ m_blocklist.end()) {
return true;
}
if (mirror.isIndex && m_skipIndex > 0) {
@@ -764,7 +933,7 @@ class PreviewPropertyAccumulator : public ValueMirror::PropertyAccumulator {
}
private:
- std::vector<String16> m_blacklist;
+ std::vector<String16> m_blocklist;
int m_skipIndex;
int* m_nameLimit;
int* m_indexLimit;
@@ -776,27 +945,27 @@ bool getPropertiesForPreview(v8::Local<v8::Context> context,
v8::Local<v8::Object> object, int* nameLimit,
int* indexLimit, bool* overflow,
std::vector<PropertyMirror>* properties) {
- std::vector<String16> blacklist;
+ std::vector<String16> blocklist;
size_t length = 0;
if (object->IsArray() || isArrayLike(context, object, &length) ||
object->IsStringObject()) {
- blacklist.push_back("length");
+ blocklist.push_back("length");
} else {
auto clientSubtype = clientFor(context)->valueSubtype(object);
if (clientSubtype && toString16(clientSubtype->string()) == "array") {
- blacklist.push_back("length");
+ blocklist.push_back("length");
}
}
if (object->IsArrayBuffer() || object->IsSharedArrayBuffer()) {
- blacklist.push_back("[[Int8Array]]");
- blacklist.push_back("[[Uint8Array]]");
- blacklist.push_back("[[Int16Array]]");
- blacklist.push_back("[[Int32Array]]");
+ blocklist.push_back("[[Int8Array]]");
+ blocklist.push_back("[[Uint8Array]]");
+ blocklist.push_back("[[Int16Array]]");
+ blocklist.push_back("[[Int32Array]]");
}
int skipIndex = object->IsStringObject()
? object.As<v8::StringObject>()->ValueOf()->Length() + 1
: -1;
- PreviewPropertyAccumulator accumulator(blacklist, skipIndex, nameLimit,
+ PreviewPropertyAccumulator accumulator(blocklist, skipIndex, nameLimit,
indexLimit, overflow, properties);
return ValueMirror::getProperties(context, object, false, false,
&accumulator);
@@ -808,20 +977,20 @@ void getInternalPropertiesForPreview(
std::vector<InternalPropertyMirror>* properties) {
std::vector<InternalPropertyMirror> mirrors;
ValueMirror::getInternalProperties(context, object, &mirrors);
- std::vector<String16> whitelist;
+ std::vector<String16> allowlist;
if (object->IsBooleanObject() || object->IsNumberObject() ||
object->IsStringObject() || object->IsSymbolObject() ||
object->IsBigIntObject()) {
- whitelist.emplace_back("[[PrimitiveValue]]");
+ allowlist.emplace_back("[[PrimitiveValue]]");
} else if (object->IsPromise()) {
- whitelist.emplace_back("[[PromiseStatus]]");
- whitelist.emplace_back("[[PromiseValue]]");
+ allowlist.emplace_back("[[PromiseState]]");
+ allowlist.emplace_back("[[PromiseResult]]");
} else if (object->IsGeneratorObject()) {
- whitelist.emplace_back("[[GeneratorStatus]]");
+ allowlist.emplace_back("[[GeneratorState]]");
}
for (auto& mirror : mirrors) {
- if (std::find(whitelist.begin(), whitelist.end(), mirror.name) ==
- whitelist.end()) {
+ if (std::find(allowlist.begin(), allowlist.end(), mirror.name) ==
+ allowlist.end()) {
continue;
}
if (!*nameLimit) {
@@ -839,7 +1008,6 @@ void getPrivatePropertiesForPreview(
protocol::Array<PropertyPreview>* privateProperties) {
std::vector<PrivatePropertyMirror> mirrors =
ValueMirror::getPrivateProperties(context, object);
- std::vector<String16> whitelist;
for (auto& mirror : mirrors) {
std::unique_ptr<PropertyPreview> propertyPreview;
if (mirror.value) {
@@ -1603,8 +1771,7 @@ std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context,
return std::make_unique<SymbolMirror>(value.As<v8::Symbol>());
}
if (v8::debug::WasmValue::IsWasmValue(value)) {
- // TODO(v8:10347) WasmValue is not created anywhere yet.
- UNIMPLEMENTED();
+ return std::make_unique<WasmValueMirror>(value.As<v8::debug::WasmValue>());
}
auto clientSubtype = (value->IsUndefined() || value->IsObject())
? clientFor(context)->valueSubtype(value)
diff --git a/chromium/v8/src/interpreter/bytecode-array-writer.cc b/chromium/v8/src/interpreter/bytecode-array-writer.cc
index a1b9d9d5f67..34a19b4b5ff 100644
--- a/chromium/v8/src/interpreter/bytecode-array-writer.cc
+++ b/chromium/v8/src/interpreter/bytecode-array-writer.cc
@@ -27,7 +27,7 @@ BytecodeArrayWriter::BytecodeArrayWriter(
SourcePositionTableBuilder::RecordingMode source_position_mode)
: bytecodes_(zone),
unbound_jumps_(0),
- source_position_table_builder_(source_position_mode),
+ source_position_table_builder_(zone, source_position_mode),
constant_array_builder_(constant_array_builder),
last_bytecode_(Bytecode::kIllegal),
last_bytecode_offset_(0),
diff --git a/chromium/v8/src/interpreter/bytecode-generator.cc b/chromium/v8/src/interpreter/bytecode-generator.cc
index 4a1c045927e..9a758370635 100644
--- a/chromium/v8/src/interpreter/bytecode-generator.cc
+++ b/chromium/v8/src/interpreter/bytecode-generator.cc
@@ -4886,7 +4886,8 @@ void BytecodeGenerator::VisitCall(Call* expr) {
break;
}
case Call::NAMED_OPTIONAL_CHAIN_PROPERTY_CALL:
- case Call::KEYED_OPTIONAL_CHAIN_PROPERTY_CALL: {
+ case Call::KEYED_OPTIONAL_CHAIN_PROPERTY_CALL:
+ case Call::PRIVATE_OPTIONAL_CHAIN_CALL: {
OptionalChain* chain = callee_expr->AsOptionalChain();
Property* property = chain->expression()->AsProperty();
BuildOptionalChain([&]() {
diff --git a/chromium/v8/src/interpreter/interpreter-assembler.cc b/chromium/v8/src/interpreter/interpreter-assembler.cc
index 49adee5bf75..1d15ed77a09 100644
--- a/chromium/v8/src/interpreter/interpreter-assembler.cc
+++ b/chromium/v8/src/interpreter/interpreter-assembler.cc
@@ -683,8 +683,9 @@ TNode<Uint32T> InterpreterAssembler::BytecodeOperandIntrinsicId(
TNode<Object> InterpreterAssembler::LoadConstantPoolEntry(TNode<WordT> index) {
TNode<FixedArray> constant_pool = CAST(LoadObjectField(
BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
- return UnsafeLoadFixedArrayElement(
- constant_pool, UncheckedCast<IntPtrT>(index), LoadSensitivity::kCritical);
+ return UnsafeLoadFixedArrayElement(constant_pool,
+ UncheckedCast<IntPtrT>(index), 0,
+ LoadSensitivity::kCritical);
}
TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
diff --git a/chromium/v8/src/interpreter/interpreter-generator.cc b/chromium/v8/src/interpreter/interpreter-generator.cc
index 9fef9ac0a02..481c90ab0f8 100644
--- a/chromium/v8/src/interpreter/interpreter-generator.cc
+++ b/chromium/v8/src/interpreter/interpreter-generator.cc
@@ -14,6 +14,7 @@
#include "src/ic/accessor-assembler.h"
#include "src/ic/binary-op-assembler.h"
#include "src/ic/ic.h"
+#include "src/ic/unary-op-assembler.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
@@ -1083,38 +1084,17 @@ IGNITION_HANDLER(BitwiseAndSmi, InterpreterBitwiseBinaryOpAssembler) {
//
// Perform bitwise-not on the accumulator.
IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
- TNode<Object> operand = GetAccumulator();
+ TNode<Object> value = GetAccumulator();
+ TNode<Context> context = GetContext();
TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- TNode<Context> context = GetContext();
- TVARIABLE(Word32T, var_word32);
- TVARIABLE(Smi, var_feedback);
- TVARIABLE(BigInt, var_bigint);
- Label if_number(this), if_bigint(this);
- TaggedToWord32OrBigIntWithFeedback(context, operand, &if_number, &var_word32,
- &if_bigint, &var_bigint, &var_feedback);
+ UnaryOpAssembler unary_op_asm(state());
+ TNode<Object> result = unary_op_asm.Generate_BitwiseNotWithFeedback(
+ context, value, slot_index, maybe_feedback_vector);
- // Number case.
- BIND(&if_number);
- TNode<Number> result =
- ChangeInt32ToTagged(Signed(Word32BitwiseNot(var_word32.value())));
- TNode<Smi> result_type = SelectSmiConstant(
- TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber);
- UpdateFeedback(SmiOr(result_type, var_feedback.value()),
- maybe_feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
-
- // BigInt case.
- BIND(&if_bigint);
- UpdateFeedback(SmiConstant(BinaryOperationFeedback::kBigInt),
- maybe_feedback_vector, slot_index);
- SetAccumulator(CallRuntime(Runtime::kBigIntUnaryOp, context,
- var_bigint.value(),
- SmiConstant(Operation::kBitwiseNot)));
- Dispatch();
}
// ShiftLeftSmi <imm>
@@ -1144,162 +1124,22 @@ IGNITION_HANDLER(ShiftRightLogicalSmi, InterpreterBitwiseBinaryOpAssembler) {
BitwiseBinaryOpWithSmi(Operation::kShiftRightLogical);
}
-class UnaryNumericOpAssembler : public InterpreterAssembler {
- public:
- UnaryNumericOpAssembler(CodeAssemblerState* state, Bytecode bytecode,
- OperandScale operand_scale)
- : InterpreterAssembler(state, bytecode, operand_scale) {}
-
- virtual ~UnaryNumericOpAssembler() = default;
-
- // Must return a tagged value.
- virtual TNode<Number> SmiOp(TNode<Smi> smi_value,
- TVariable<Smi>* var_feedback, Label* do_float_op,
- TVariable<Float64T>* var_float) = 0;
- // Must return a Float64 value.
- virtual TNode<Float64T> FloatOp(TNode<Float64T> float_value) = 0;
- // Must return a tagged value.
- virtual TNode<HeapObject> BigIntOp(TNode<HeapObject> bigint_value) = 0;
-
- void UnaryOpWithFeedback() {
- TVARIABLE(Object, var_value, GetAccumulator());
- TVARIABLE(Object, var_result);
- TVARIABLE(Float64T, var_float_value);
- TVARIABLE(Smi, var_feedback, SmiConstant(BinaryOperationFeedback::kNone));
- Label start(this, {&var_value, &var_feedback}), end(this);
- Label do_float_op(this, &var_float_value);
- Goto(&start);
- // We might have to try again after ToNumeric conversion.
- BIND(&start);
- {
- Label if_smi(this), if_heapnumber(this), if_oddball(this);
- Label if_bigint(this, Label::kDeferred);
- Label if_other(this, Label::kDeferred);
- TNode<Object> value = var_value.value();
- GotoIf(TaggedIsSmi(value), &if_smi);
-
- TNode<HeapObject> value_heap_object = CAST(value);
- TNode<Map> map = LoadMap(value_heap_object);
- GotoIf(IsHeapNumberMap(map), &if_heapnumber);
- TNode<Uint16T> instance_type = LoadMapInstanceType(map);
- GotoIf(IsBigIntInstanceType(instance_type), &if_bigint);
- Branch(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &if_oddball,
- &if_other);
-
- BIND(&if_smi);
- {
- var_result =
- SmiOp(CAST(value), &var_feedback, &do_float_op, &var_float_value);
- Goto(&end);
- }
-
- BIND(&if_heapnumber);
- {
- var_float_value = LoadHeapNumberValue(value_heap_object);
- Goto(&do_float_op);
- }
-
- BIND(&if_bigint);
- {
- var_result = BigIntOp(value_heap_object);
- CombineFeedback(&var_feedback, BinaryOperationFeedback::kBigInt);
- Goto(&end);
- }
-
- BIND(&if_oddball);
- {
- // We do not require an Or with earlier feedback here because once we
- // convert the value to a number, we cannot reach this path. We can
- // only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this, SmiEqual(var_feedback.value(),
- SmiConstant(BinaryOperationFeedback::kNone)));
- OverwriteFeedback(&var_feedback,
- BinaryOperationFeedback::kNumberOrOddball);
- var_value =
- LoadObjectField(value_heap_object, Oddball::kToNumberOffset);
- Goto(&start);
- }
-
- BIND(&if_other);
- {
- // We do not require an Or with earlier feedback here because once we
- // convert the value to a number, we cannot reach this path. We can
- // only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this, SmiEqual(var_feedback.value(),
- SmiConstant(BinaryOperationFeedback::kNone)));
- OverwriteFeedback(&var_feedback, BinaryOperationFeedback::kAny);
- var_value = CallBuiltin(Builtins::kNonNumberToNumeric, GetContext(),
- value_heap_object);
- Goto(&start);
- }
- }
-
- BIND(&do_float_op);
- {
- CombineFeedback(&var_feedback, BinaryOperationFeedback::kNumber);
- var_result =
- AllocateHeapNumberWithValue(FloatOp(var_float_value.value()));
- Goto(&end);
- }
-
- BIND(&end);
- TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
- UpdateFeedback(var_feedback.value(), maybe_feedback_vector, slot_index);
- SetAccumulator(var_result.value());
- Dispatch();
- }
-};
-
-class NegateAssemblerImpl : public UnaryNumericOpAssembler {
- public:
- explicit NegateAssemblerImpl(CodeAssemblerState* state, Bytecode bytecode,
- OperandScale operand_scale)
- : UnaryNumericOpAssembler(state, bytecode, operand_scale) {}
-
- TNode<Number> SmiOp(TNode<Smi> smi_value, TVariable<Smi>* var_feedback,
- Label* do_float_op,
- TVariable<Float64T>* var_float) override {
- TVARIABLE(Number, var_result);
- Label if_zero(this), if_min_smi(this), end(this);
- // Return -0 if operand is 0.
- GotoIf(SmiEqual(smi_value, SmiConstant(0)), &if_zero);
-
- // Special-case the minimum Smi to avoid overflow.
- GotoIf(SmiEqual(smi_value, SmiConstant(Smi::kMinValue)), &if_min_smi);
-
- // Else simply subtract operand from 0.
- CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
- var_result = SmiSub(SmiConstant(0), smi_value);
- Goto(&end);
-
- BIND(&if_zero);
- CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber);
- var_result = MinusZeroConstant();
- Goto(&end);
-
- BIND(&if_min_smi);
- *var_float = SmiToFloat64(smi_value);
- Goto(do_float_op);
-
- BIND(&end);
- return var_result.value();
- }
-
- TNode<Float64T> FloatOp(TNode<Float64T> float_value) override {
- return Float64Neg(float_value);
- }
-
- TNode<HeapObject> BigIntOp(TNode<HeapObject> bigint_value) override {
- return CAST(CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value,
- SmiConstant(Operation::kNegate)));
- }
-};
-
// Negate <feedback_slot>
//
// Perform arithmetic negation on the accumulator.
-IGNITION_HANDLER(Negate, NegateAssemblerImpl) { UnaryOpWithFeedback(); }
+IGNITION_HANDLER(Negate, InterpreterAssembler) {
+ TNode<Object> value = GetAccumulator();
+ TNode<Context> context = GetContext();
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+
+ UnaryOpAssembler unary_op_asm(state());
+ TNode<Object> result = unary_op_asm.Generate_NegateWithFeedback(
+ context, value, slot_index, maybe_feedback_vector);
+
+ SetAccumulator(result);
+ Dispatch();
+}
// ToName <dst>
//
@@ -1345,72 +1185,39 @@ IGNITION_HANDLER(ToString, InterpreterAssembler) {
Dispatch();
}
-class IncDecAssembler : public UnaryNumericOpAssembler {
- public:
- explicit IncDecAssembler(CodeAssemblerState* state, Bytecode bytecode,
- OperandScale operand_scale)
- : UnaryNumericOpAssembler(state, bytecode, operand_scale) {}
-
- Operation op() {
- DCHECK(op_ == Operation::kIncrement || op_ == Operation::kDecrement);
- return op_;
- }
-
- TNode<Number> SmiOp(TNode<Smi> value, TVariable<Smi>* var_feedback,
- Label* do_float_op,
- TVariable<Float64T>* var_float) override {
- TNode<Smi> one = SmiConstant(1);
- Label if_overflow(this), if_notoverflow(this);
- TNode<Smi> result = op() == Operation::kIncrement
- ? TrySmiAdd(value, one, &if_overflow)
- : TrySmiSub(value, one, &if_overflow);
- Goto(&if_notoverflow);
-
- BIND(&if_overflow);
- {
- *var_float = SmiToFloat64(value);
- Goto(do_float_op);
- }
-
- BIND(&if_notoverflow);
- CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall);
- return result;
- }
-
- TNode<Float64T> FloatOp(TNode<Float64T> float_value) override {
- return op() == Operation::kIncrement
- ? Float64Add(float_value, Float64Constant(1.0))
- : Float64Sub(float_value, Float64Constant(1.0));
- }
-
- TNode<HeapObject> BigIntOp(TNode<HeapObject> bigint_value) override {
- return CAST(CallRuntime(Runtime::kBigIntUnaryOp, GetContext(), bigint_value,
- SmiConstant(op())));
- }
-
- void IncWithFeedback() {
- op_ = Operation::kIncrement;
- UnaryOpWithFeedback();
- }
-
- void DecWithFeedback() {
- op_ = Operation::kDecrement;
- UnaryOpWithFeedback();
- }
-
- private:
- Operation op_ = Operation::kEqual; // Dummy initialization.
-};
-
// Inc
//
// Increments value in the accumulator by one.
-IGNITION_HANDLER(Inc, IncDecAssembler) { IncWithFeedback(); }
+IGNITION_HANDLER(Inc, InterpreterAssembler) {
+ TNode<Object> value = GetAccumulator();
+ TNode<Context> context = GetContext();
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+
+ UnaryOpAssembler unary_op_asm(state());
+ TNode<Object> result = unary_op_asm.Generate_IncrementWithFeedback(
+ context, value, slot_index, maybe_feedback_vector);
+
+ SetAccumulator(result);
+ Dispatch();
+}
// Dec
//
// Decrements value in the accumulator by one.
-IGNITION_HANDLER(Dec, IncDecAssembler) { DecWithFeedback(); }
+IGNITION_HANDLER(Dec, InterpreterAssembler) {
+ TNode<Object> value = GetAccumulator();
+ TNode<Context> context = GetContext();
+ TNode<UintPtrT> slot_index = BytecodeOperandIdx(0);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+
+ UnaryOpAssembler unary_op_asm(state());
+ TNode<Object> result = unary_op_asm.Generate_DecrementWithFeedback(
+ context, value, slot_index, maybe_feedback_vector);
+
+ SetAccumulator(result);
+ Dispatch();
+}
// ToBooleanLogicalNot
//
@@ -2602,10 +2409,9 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TVARIABLE(Object, var_result);
- var_result = CallBuiltin(Builtins::kCloneObjectIC, context, source, smi_flags,
- slot, maybe_feedback_vector);
- SetAccumulator(var_result.value());
+ TNode<Object> result = CallBuiltin(Builtins::kCloneObjectIC, context, source,
+ smi_flags, slot, maybe_feedback_vector);
+ SetAccumulator(result);
Dispatch();
}
@@ -2615,41 +2421,18 @@ IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
// accumulator, creating and caching the site object on-demand as per the
// specification.
IGNITION_HANDLER(GetTemplateObject, InterpreterAssembler) {
- TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<Context> context = GetContext();
+ TNode<JSFunction> closure = CAST(LoadRegister(Register::function_closure()));
+ TNode<SharedFunctionInfo> shared_info = LoadObjectField<SharedFunctionInfo>(
+ closure, JSFunction::kSharedFunctionInfoOffset);
+ TNode<Object> description = LoadConstantPoolEntryAtOperandIndex(0);
TNode<UintPtrT> slot = BytecodeOperandIdx(1);
-
- Label call_runtime(this, Label::kDeferred);
- GotoIf(IsUndefined(maybe_feedback_vector), &call_runtime);
-
- TNode<Object> cached_value =
- CAST(LoadFeedbackVectorSlot(CAST(maybe_feedback_vector), slot));
-
- GotoIf(TaggedEqual(cached_value, SmiConstant(0)), &call_runtime);
-
- SetAccumulator(cached_value);
+ TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
+ TNode<Object> result =
+ CallBuiltin(Builtins::kGetTemplateObject, context, shared_info,
+ description, slot, maybe_feedback_vector);
+ SetAccumulator(result);
Dispatch();
-
- BIND(&call_runtime);
- {
- TNode<Object> description = LoadConstantPoolEntryAtOperandIndex(0);
- TNode<Smi> slot_smi = SmiTag(Signed(slot));
- TNode<JSFunction> closure =
- CAST(LoadRegister(Register::function_closure()));
- TNode<SharedFunctionInfo> shared_info = LoadObjectField<SharedFunctionInfo>(
- closure, JSFunction::kSharedFunctionInfoOffset);
- TNode<Context> context = GetContext();
- TNode<Object> result = CallRuntime(Runtime::kGetTemplateObject, context,
- description, shared_info, slot_smi);
-
- Label end(this);
- GotoIf(IsUndefined(maybe_feedback_vector), &end);
- StoreFeedbackVectorSlot(CAST(maybe_feedback_vector), slot, result);
- Goto(&end);
-
- Bind(&end);
- SetAccumulator(result);
- Dispatch();
- }
}
// CreateClosure <index> <slot> <flags>
diff --git a/chromium/v8/src/interpreter/interpreter.cc b/chromium/v8/src/interpreter/interpreter.cc
index 42f0c561625..3174e749199 100644
--- a/chromium/v8/src/interpreter/interpreter.cc
+++ b/chromium/v8/src/interpreter/interpreter.cc
@@ -194,14 +194,14 @@ void InterpreterCompilationJob::CheckAndPrintBytecodeMismatch(
MaybeHandle<String> maybe_name = parse_info()->literal()->GetName(isolate);
Handle<String> name;
if (maybe_name.ToHandle(&name) && name->length() != 0) {
- name->StringPrint(std::cerr);
+ name->PrintUC16(std::cerr);
} else {
std::cerr << "anonymous";
}
Object script_name = script->GetNameOrSourceURL();
if (script_name.IsString()) {
std::cerr << " ";
- String::cast(script_name).StringPrint(std::cerr);
+ String::cast(script_name).PrintUC16(std::cerr);
std::cerr << ":" << parse_info()->literal()->start_position();
}
#endif
diff --git a/chromium/v8/src/json/json-parser.cc b/chromium/v8/src/json/json-parser.cc
index da2f60d3209..d099fa36cba 100644
--- a/chromium/v8/src/json/json-parser.cc
+++ b/chromium/v8/src/json/json-parser.cc
@@ -335,7 +335,7 @@ uc32 JsonParser<Char>::ScanUnicodeCharacter() {
uc32 value = 0;
for (int i = 0; i < 4; i++) {
int digit = HexValue(NextCharacter());
- if (V8_UNLIKELY(digit < 0)) return -1;
+ if (V8_UNLIKELY(digit < 0)) return kInvalidUnicodeCharacter;
value = value * 16 + digit;
}
return value;
@@ -1173,7 +1173,7 @@ JsonString JsonParser<Char>::ScanJsonString(bool needs_internalization) {
case EscapeKind::kUnicode: {
uc32 value = ScanUnicodeCharacter();
- if (value == -1) {
+ if (value == kInvalidUnicodeCharacter) {
AllowHeapAllocation allow_before_exception;
ReportUnexpectedCharacter(CurrentCharacter());
return JsonString();
diff --git a/chromium/v8/src/json/json-parser.h b/chromium/v8/src/json/json-parser.h
index 5ee1499b364..6219cd3b5d1 100644
--- a/chromium/v8/src/json/json-parser.h
+++ b/chromium/v8/src/json/json-parser.h
@@ -151,7 +151,8 @@ class JsonParser final {
return result;
}
- static const int kEndOfString = -1;
+ static constexpr uc32 kEndOfString = static_cast<uc32>(-1);
+ static constexpr uc32 kInvalidUnicodeCharacter = static_cast<uc32>(-1);
private:
struct JsonContinuation {
diff --git a/chromium/v8/src/libplatform/default-foreground-task-runner.h b/chromium/v8/src/libplatform/default-foreground-task-runner.h
index a923f3f867a..3cddff5632c 100644
--- a/chromium/v8/src/libplatform/default-foreground-task-runner.h
+++ b/chromium/v8/src/libplatform/default-foreground-task-runner.h
@@ -25,11 +25,10 @@ class V8_PLATFORM_EXPORT DefaultForegroundTaskRunner
explicit RunTaskScope(
std::shared_ptr<DefaultForegroundTaskRunner> task_runner);
~RunTaskScope();
-
- private:
RunTaskScope(const RunTaskScope&) = delete;
RunTaskScope& operator=(const RunTaskScope&) = delete;
+ private:
std::shared_ptr<DefaultForegroundTaskRunner> task_runner_;
};
diff --git a/chromium/v8/src/libplatform/default-platform.cc b/chromium/v8/src/libplatform/default-platform.cc
index e380161eb07..649a47f629f 100644
--- a/chromium/v8/src/libplatform/default-platform.cc
+++ b/chromium/v8/src/libplatform/default-platform.cc
@@ -45,6 +45,13 @@ std::unique_ptr<v8::Platform> NewDefaultPlatform(
return platform;
}
+V8_PLATFORM_EXPORT std::unique_ptr<JobHandle> NewDefaultJobHandle(
+ Platform* platform, TaskPriority priority,
+ std::unique_ptr<JobTask> job_task, size_t num_worker_threads) {
+ return std::make_unique<DefaultJobHandle>(std::make_shared<DefaultJobState>(
+ platform, std::move(job_task), priority, num_worker_threads));
+}
+
bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate,
MessageLoopBehavior behavior) {
return static_cast<DefaultPlatform*>(platform)->PumpMessageLoop(isolate,
diff --git a/chromium/v8/src/libsampler/sampler.h b/chromium/v8/src/libsampler/sampler.h
index c606add82aa..e81ba9c90e1 100644
--- a/chromium/v8/src/libsampler/sampler.h
+++ b/chromium/v8/src/libsampler/sampler.h
@@ -72,7 +72,7 @@ class V8_EXPORT_PRIVATE Sampler {
protected:
// Counts stack samples taken in various VM states.
- bool is_counting_samples_ = 0;
+ bool is_counting_samples_ = false;
unsigned js_sample_count_ = 0;
unsigned external_sample_count_ = 0;
diff --git a/chromium/v8/src/logging/counters-inl.h b/chromium/v8/src/logging/counters-inl.h
index 3b4acf1f93d..3e067c7c060 100644
--- a/chromium/v8/src/logging/counters-inl.h
+++ b/chromium/v8/src/logging/counters-inl.h
@@ -6,6 +6,7 @@
#define V8_LOGGING_COUNTERS_INL_H_
#include "src/logging/counters.h"
+#include "src/logging/tracing-flags.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/logging/counters.cc b/chromium/v8/src/logging/counters.cc
index 21248389b2c..386bb8a75a1 100644
--- a/chromium/v8/src/logging/counters.cc
+++ b/chromium/v8/src/logging/counters.cc
@@ -17,11 +17,6 @@
namespace v8 {
namespace internal {
-std::atomic_uint TracingFlags::runtime_stats{0};
-std::atomic_uint TracingFlags::gc{0};
-std::atomic_uint TracingFlags::gc_stats{0};
-std::atomic_uint TracingFlags::ic_stats{0};
-
StatsTable::StatsTable(Counters* counters)
: lookup_function_(nullptr),
create_histogram_function_(nullptr),
diff --git a/chromium/v8/src/logging/counters.h b/chromium/v8/src/logging/counters.h
index 02a6feee2e3..5002d6e9a10 100644
--- a/chromium/v8/src/logging/counters.h
+++ b/chromium/v8/src/logging/counters.h
@@ -16,6 +16,7 @@
#include "src/execution/isolate.h"
#include "src/init/heap-symbols.h"
#include "src/logging/counters-definitions.h"
+#include "src/logging/tracing-flags.h"
#include "src/objects/objects.h"
#include "src/runtime/runtime.h"
#include "src/tracing/trace-event.h"
@@ -26,33 +27,6 @@
namespace v8 {
namespace internal {
-// This struct contains a set of flags that can be modified from multiple
-// threads at runtime unlike the normal FLAG_-like flags which are not modified
-// after V8 instance is initialized.
-
-struct TracingFlags {
- static V8_EXPORT_PRIVATE std::atomic_uint runtime_stats;
- static V8_EXPORT_PRIVATE std::atomic_uint gc;
- static V8_EXPORT_PRIVATE std::atomic_uint gc_stats;
- static V8_EXPORT_PRIVATE std::atomic_uint ic_stats;
-
- static bool is_runtime_stats_enabled() {
- return runtime_stats.load(std::memory_order_relaxed) != 0;
- }
-
- static bool is_gc_enabled() {
- return gc.load(std::memory_order_relaxed) != 0;
- }
-
- static bool is_gc_stats_enabled() {
- return gc_stats.load(std::memory_order_relaxed) != 0;
- }
-
- static bool is_ic_stats_enabled() {
- return ic_stats.load(std::memory_order_relaxed) != 0;
- }
-};
-
// StatsCounters is an interface for plugging into external
// counters for monitoring. Counters can be looked up and
// manipulated by name.
diff --git a/chromium/v8/src/logging/log-utils.h b/chromium/v8/src/logging/log-utils.h
index e89a449f3b4..bae665cd456 100644
--- a/chromium/v8/src/logging/log-utils.h
+++ b/chromium/v8/src/logging/log-utils.h
@@ -37,7 +37,7 @@ class Log {
FLAG_log_suspect || FLAG_ll_prof || FLAG_perf_basic_prof ||
FLAG_perf_prof || FLAG_log_source_code || FLAG_gdbjit ||
FLAG_log_internal_timer_events || FLAG_prof_cpp || FLAG_trace_ic ||
- FLAG_log_function_events;
+ FLAG_log_function_events || FLAG_trace_zone_stats;
}
// Frees all resources acquired in Initialize and Open... functions.
diff --git a/chromium/v8/src/logging/log.h b/chromium/v8/src/logging/log.h
index 98723a533a0..70f562dee3f 100644
--- a/chromium/v8/src/logging/log.h
+++ b/chromium/v8/src/logging/log.h
@@ -72,13 +72,13 @@ class Ticker;
#undef LOG
#define LOG(isolate, Call) \
do { \
- auto* logger = (isolate)->logger(); \
+ auto&& logger = (isolate)->logger(); \
if (logger->is_logging()) logger->Call; \
} while (false)
#define LOG_CODE_EVENT(isolate, Call) \
do { \
- auto* logger = (isolate)->logger(); \
+ auto&& logger = (isolate)->logger(); \
if (logger->is_listening_to_code_events()) logger->Call; \
} while (false)
diff --git a/chromium/v8/src/logging/off-thread-logger.h b/chromium/v8/src/logging/off-thread-logger.h
index fab58c311bf..f55e429e31c 100644
--- a/chromium/v8/src/logging/off-thread-logger.h
+++ b/chromium/v8/src/logging/off-thread-logger.h
@@ -19,6 +19,7 @@ class OffThreadLogger {
void ScriptEvent(Logger::ScriptEventType type, int script_id) {
UNREACHABLE();
}
+ void ScriptDetails(Script script) { UNREACHABLE(); }
void CodeLinePosInfoRecordEvent(Address code_start,
ByteArray source_position_table) {
UNREACHABLE();
diff --git a/chromium/v8/src/logging/tracing-flags.cc b/chromium/v8/src/logging/tracing-flags.cc
new file mode 100644
index 00000000000..930469fde74
--- /dev/null
+++ b/chromium/v8/src/logging/tracing-flags.cc
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/logging/tracing-flags.h"
+
+namespace v8 {
+namespace internal {
+
+std::atomic_uint TracingFlags::runtime_stats{0};
+std::atomic_uint TracingFlags::gc{0};
+std::atomic_uint TracingFlags::gc_stats{0};
+std::atomic_uint TracingFlags::ic_stats{0};
+std::atomic_uint TracingFlags::zone_stats{0};
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/logging/tracing-flags.h b/chromium/v8/src/logging/tracing-flags.h
new file mode 100644
index 00000000000..b23ed03a20a
--- /dev/null
+++ b/chromium/v8/src/logging/tracing-flags.h
@@ -0,0 +1,50 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LOGGING_TRACING_FLAGS_H_
+#define V8_LOGGING_TRACING_FLAGS_H_
+
+#include <atomic>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+// This struct contains a set of flags that can be modified from multiple
+// threads at runtime unlike the normal FLAG_-like flags which are not modified
+// after V8 instance is initialized.
+
+struct TracingFlags {
+ static V8_EXPORT_PRIVATE std::atomic_uint runtime_stats;
+ static V8_EXPORT_PRIVATE std::atomic_uint gc;
+ static V8_EXPORT_PRIVATE std::atomic_uint gc_stats;
+ static V8_EXPORT_PRIVATE std::atomic_uint ic_stats;
+ static V8_EXPORT_PRIVATE std::atomic_uint zone_stats;
+
+ static bool is_runtime_stats_enabled() {
+ return runtime_stats.load(std::memory_order_relaxed) != 0;
+ }
+
+ static bool is_gc_enabled() {
+ return gc.load(std::memory_order_relaxed) != 0;
+ }
+
+ static bool is_gc_stats_enabled() {
+ return gc_stats.load(std::memory_order_relaxed) != 0;
+ }
+
+ static bool is_ic_stats_enabled() {
+ return ic_stats.load(std::memory_order_relaxed) != 0;
+ }
+
+ static bool is_zone_stats_enabled() {
+ return zone_stats.load(std::memory_order_relaxed) != 0;
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LOGGING_TRACING_FLAGS_H_
diff --git a/chromium/v8/src/objects/api-callbacks.tq b/chromium/v8/src/objects/api-callbacks.tq
index 8a8aab59f8d..102ffd7ab21 100644
--- a/chromium/v8/src/objects/api-callbacks.tq
+++ b/chromium/v8/src/objects/api-callbacks.tq
@@ -40,7 +40,6 @@ extern class AccessCheckInfo extends Struct {
data: Object;
}
-type PropertyAttributes extends int32 constexpr 'PropertyAttributes';
type SideEffectType extends int32 constexpr 'SideEffectType';
bitfield struct AccessorInfoFlags extends uint31 {
diff --git a/chromium/v8/src/objects/arguments-inl.h b/chromium/v8/src/objects/arguments-inl.h
index 8c49d909d2f..494a8960bd8 100644
--- a/chromium/v8/src/objects/arguments-inl.h
+++ b/chromium/v8/src/objects/arguments-inl.h
@@ -5,9 +5,8 @@
#ifndef V8_OBJECTS_ARGUMENTS_INL_H_
#define V8_OBJECTS_ARGUMENTS_INL_H_
-#include "src/objects/arguments.h"
-
#include "src/execution/isolate-inl.h"
+#include "src/objects/arguments.h"
#include "src/objects/contexts-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/objects-inl.h"
@@ -18,38 +17,9 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(SloppyArgumentsElements, FixedArray)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSArgumentsObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(AliasedArgumentsEntry)
-CAST_ACCESSOR(SloppyArgumentsElements)
-
-DEF_GETTER(SloppyArgumentsElements, context, Context) {
- return TaggedField<Context>::load(isolate, *this,
- OffsetOfElementAt(kContextIndex));
-}
-
-DEF_GETTER(SloppyArgumentsElements, arguments, FixedArray) {
- return TaggedField<FixedArray>::load(isolate, *this,
- OffsetOfElementAt(kArgumentsIndex));
-}
-
-void SloppyArgumentsElements::set_arguments(FixedArray arguments) {
- set(kArgumentsIndex, arguments);
-}
-
-uint32_t SloppyArgumentsElements::parameter_map_length() {
- return length() - kParameterMapStart;
-}
-
-Object SloppyArgumentsElements::get_mapped_entry(uint32_t entry) {
- return get(entry + kParameterMapStart);
-}
-
-void SloppyArgumentsElements::set_mapped_entry(uint32_t entry, Object object) {
- set(entry + kParameterMapStart, object);
-}
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/objects/arguments.h b/chromium/v8/src/objects/arguments.h
index ed2d31d1dd4..41eaaed1a17 100644
--- a/chromium/v8/src/objects/arguments.h
+++ b/chromium/v8/src/objects/arguments.h
@@ -58,49 +58,6 @@ class JSStrictArgumentsObject : public JSArgumentsObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSStrictArgumentsObject);
};
-// Helper class to access FAST_ and SLOW_SLOPPY_ARGUMENTS_ELEMENTS
-//
-// +---+-----------------------+
-// | 0 | Context context |
-// +---------------------------+
-// | 1 | FixedArray arguments +----+ HOLEY_ELEMENTS
-// +---------------------------+ v-----+-----------+
-// | 2 | Object param_1_map | | 0 | the_hole |
-// |...| ... | | ... | ... |
-// |n+1| Object param_n_map | | n-1 | the_hole |
-// +---------------------------+ | n | element_1 |
-// | ... | ... |
-// |n+m-1| element_m |
-// +-----------------+
-//
-// Parameter maps give the index into the provided context. If a map entry is
-// the_hole it means that the given entry has been deleted from the arguments
-// object.
-// The arguments backing store kind depends on the ElementsKind of the outer
-// JSArgumentsObject:
-// - FAST_SLOPPY_ARGUMENTS_ELEMENTS: HOLEY_ELEMENTS
-// - SLOW_SLOPPY_ARGUMENTS_ELEMENTS: DICTIONARY_ELEMENTS
-class SloppyArgumentsElements : public FixedArray {
- public:
- static const int kContextIndex = 0;
- static const int kArgumentsIndex = 1;
- static const uint32_t kParameterMapStart = 2;
-
- DECL_GETTER(context, Context)
- DECL_GETTER(arguments, FixedArray)
- inline void set_arguments(FixedArray arguments);
- inline uint32_t parameter_map_length();
- inline Object get_mapped_entry(uint32_t entry);
- inline void set_mapped_entry(uint32_t entry, Object object);
-
- DECL_CAST(SloppyArgumentsElements)
-#ifdef VERIFY_HEAP
- void SloppyArgumentsElementsVerify(Isolate* isolate, JSObject holder);
-#endif
-
- OBJECT_CONSTRUCTORS(SloppyArgumentsElements, FixedArray);
-};
-
// Representation of a slow alias as part of a sloppy arguments objects.
// For fast aliases (if HasSloppyArgumentsElements()):
// - the parameter map contains an index into the context
diff --git a/chromium/v8/src/objects/arguments.tq b/chromium/v8/src/objects/arguments.tq
index 5211707eae9..e1637056f7f 100644
--- a/chromium/v8/src/objects/arguments.tq
+++ b/chromium/v8/src/objects/arguments.tq
@@ -26,7 +26,69 @@ extern shape JSStrictArgumentsObject extends JSArgumentsObject {
length: JSAny;
}
-type SloppyArgumentsElements extends FixedArray;
+// Helper class to access FAST_ and SLOW_SLOPPY_ARGUMENTS_ELEMENTS, dividing
+// arguments into two types for a given SloppyArgumentsElements object:
+// mapped and unmapped.
+//
+// For clarity SloppyArgumentsElements fields are qualified with "elements."
+// below.
+//
+// Mapped arguments are actual arguments. Unmapped arguments are values added
+// to the arguments object after it was created for the call. Mapped arguments
+// are stored in the context at indexes given by elements.mapped_entries[key].
+// Unmapped arguments are stored as regular indexed properties in the arguments
+// array which can be accessed from elements.arguments.
+//
+// elements.length is min(number_of_actual_arguments,
+// number_of_formal_arguments) for a concrete call to a function.
+//
+// Once a SloppyArgumentsElements is generated, lookup of an argument with index
+// |key| in |elements| works as follows:
+//
+// If key >= elements.length then attempt to look in the unmapped arguments
+// array and return the value at key, missing to the runtime if the unmapped
+// arguments array is not a fixed array or if key >= elements.arguments.length.
+//
+// Otherwise, t = elements.mapped_entries[key]. If t is the hole, then the
+// entry has been deleted fron the arguments object, and value is looked up in
+// the unmapped arguments array, as described above. Otherwise, t is a Smi
+// index into the context array specified at elements.context, and the return
+// value is elements.context[t].
+//
+// A graphic representation of a SloppyArgumentsElements object and a
+// corresponding unmapped arguments FixedArray:
+//
+// SloppyArgumentsElements
+// +---+-----------------------+
+// | Context context |
+// +---------------------------+
+// | FixedArray arguments +----+ HOLEY_ELEMENTS
+// +---------------------------+ v-----+-----------+
+// | 0 | Object mapped_entries | | 0 | the_hole |
+// |...| ... | | ... | ... |
+// |n-1| Object mapped_entries | | n-1 | the_hole |
+// +---------------------------+ | n | element_1 |
+// | ... | ... |
+// |n+m-1| element_m |
+// +-----------------+
+//
+// The elements.arguments backing store kind depends on the ElementsKind of
+// the outer JSArgumentsObject:
+// - FAST_SLOPPY_ARGUMENTS_ELEMENTS: HOLEY_ELEMENTS
+// - SLOW_SLOPPY_ARGUMENTS_ELEMENTS: DICTIONARY_ELEMENTS
+@export
+class SloppyArgumentsElements extends FixedArrayBase {
+ context: Context;
+ arguments: FixedArray;
+ mapped_entries[length]: Smi|TheHole;
+}
+
+macro NewSloppyArgumentsElements<Iterator: type>(
+ length: Smi, context: Context, arguments: FixedArray,
+ it: Iterator): SloppyArgumentsElements {
+ return new
+ SloppyArgumentsElements{length, context, arguments, mapped_entries: ...it};
+}
@generateCppClass
@generatePrint
@@ -49,7 +111,7 @@ macro NewJSStrictArgumentsObject(implicit context: Context)(
}
macro NewJSSloppyArgumentsObject(implicit context: Context)(
- elements: FixedArray, callee: JSFunction): JSSloppyArgumentsObject {
+ elements: FixedArrayBase, callee: JSFunction): JSSloppyArgumentsObject {
const map = GetSloppyArgumentsMap();
return new JSSloppyArgumentsObject{
map,
@@ -61,7 +123,7 @@ macro NewJSSloppyArgumentsObject(implicit context: Context)(
}
macro NewJSFastAliasedArgumentsObject(implicit context: Context)(
- elements: FixedArray, length: Smi,
+ elements: FixedArrayBase, length: Smi,
callee: JSFunction): JSSloppyArgumentsObject {
// TODO(danno): FastAliasedArguments should really be a type for itself
const map = GetFastAliasedArgumentsMap();
@@ -75,28 +137,17 @@ macro NewJSFastAliasedArgumentsObject(implicit context: Context)(
}
struct ParameterMapIterator {
- macro Next(): Object labels NoMore {
- const currentMapSlotCopy = this.currentMapSlot++;
- if (currentMapSlotCopy > 1) {
- if (this.currentIndex == this.endInterationIndex) goto NoMore;
- this.currentIndex--;
- return Convert<Smi>(this.currentIndex);
- } else if (currentMapSlotCopy == 0) {
- return this.context;
- } else {
- assert(currentMapSlotCopy == 1);
- return this.elements;
- }
+ macro Next(): Smi labels NoMore {
+ if (this.currentIndex == this.endInterationIndex) goto NoMore;
+ this.currentIndex--;
+ return Convert<Smi>(this.currentIndex);
}
- const context: Context;
- const elements: FixedArray;
currentIndex: intptr;
const endInterationIndex: intptr;
- currentMapSlot: intptr;
}
macro NewParameterMapIterator(
- context: Context, elements: FixedArray, formalParameterCount: intptr,
+ context: Context, formalParameterCount: intptr,
mappedCount: intptr): ParameterMapIterator {
const flags = context.scope_info.flags;
let contextHeaderSize: intptr = MIN_CONTEXT_SLOTS;
@@ -112,11 +163,8 @@ macro NewParameterMapIterator(
const afterLastContextIndex = contextHeaderSize + formalParameterCount;
const firstContextIndex = afterLastContextIndex - mappedCount;
return ParameterMapIterator{
- context,
- elements,
currentIndex: afterLastContextIndex,
- endInterationIndex: firstContextIndex,
- currentMapSlot: 0
+ endInterationIndex: firstContextIndex
};
}
@@ -188,17 +236,16 @@ macro NewSloppyArguments(implicit context: Context)(
const mappedCount = IntPtrMin(formalParameterCount, argumentCount);
const it = NewParameterValueIterator(mappedCount, arguments);
const parameterValues = NewFixedArray(argumentCount, it);
- let paramIter = NewParameterMapIterator(
- context, parameterValues, formalParameterCount, mappedCount);
- const elementsLength =
- Convert<Smi>(mappedCount + kSloppyArgumentsParameterMapStart);
- const map = kSloppyArgumentsElementsMap;
- const elements = new
- FixedArray{map, length: elementsLength, objects: ...paramIter};
+ let paramIter =
+ NewParameterMapIterator(context, formalParameterCount, mappedCount);
+ const elementsLength = Convert<Smi>(mappedCount);
+ const elements = NewSloppyArgumentsElements(
+ elementsLength, context, parameterValues, paramIter);
const length = Convert<Smi>(argumentCount);
return NewJSFastAliasedArgumentsObject(elements, length, callee);
}
-}
+
+} // namespace arguments
@export
macro EmitFastNewAllArguments(implicit context: Context)(
diff --git a/chromium/v8/src/objects/backing-store.cc b/chromium/v8/src/objects/backing-store.cc
index bd9f39b7d3a..0913d829937 100644
--- a/chromium/v8/src/objects/backing-store.cc
+++ b/chromium/v8/src/objects/backing-store.cc
@@ -428,10 +428,20 @@ std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
auto backing_store =
TryAllocateWasmMemory(isolate, initial_pages, maximum_pages, shared);
- if (!backing_store && maximum_pages > initial_pages) {
- // If reserving {maximum_pages} failed, try with maximum = initial.
+ if (maximum_pages == initial_pages) {
+ // If initial pages, and maximum are equal, nothing more to do return early.
+ return backing_store;
+ }
+
+ // Retry with smaller maximum pages at each retry.
+ const int kAllocationTries = 3;
+ auto delta = (maximum_pages - initial_pages) / (kAllocationTries + 1);
+ size_t sizes[] = {maximum_pages - delta, maximum_pages - 2 * delta,
+ maximum_pages - 3 * delta, initial_pages};
+
+ for (size_t i = 0; i < arraysize(sizes) && !backing_store; i++) {
backing_store =
- TryAllocateWasmMemory(isolate, initial_pages, initial_pages, shared);
+ TryAllocateWasmMemory(isolate, initial_pages, sizes[i], shared);
}
return backing_store;
}
@@ -646,7 +656,7 @@ SharedWasmMemoryData* BackingStore::get_shared_wasm_memory_data() {
namespace {
// Implementation details of GlobalBackingStoreRegistry.
struct GlobalBackingStoreRegistryImpl {
- GlobalBackingStoreRegistryImpl() {}
+ GlobalBackingStoreRegistryImpl() = default;
base::Mutex mutex_;
std::unordered_map<const void*, std::weak_ptr<BackingStore>> map_;
};
diff --git a/chromium/v8/src/objects/bigint.cc b/chromium/v8/src/objects/bigint.cc
index dfc302e77c8..2f8337db979 100644
--- a/chromium/v8/src/objects/bigint.cc
+++ b/chromium/v8/src/objects/bigint.cc
@@ -1125,7 +1125,7 @@ double MutableBigInt::ToDouble(Handle<BigIntBase> x) {
return bit_cast<double>(double_bits);
}
-// This is its own function to keep control flow sane. The meaning of the
+// This is its own function to simplify control flow. The meaning of the
// parameters is defined by {ToDouble}'s local variable usage.
MutableBigInt::Rounding MutableBigInt::DecideRounding(Handle<BigIntBase> x,
int mantissa_bits_unset,
diff --git a/chromium/v8/src/objects/class-definitions-tq-deps-inl.h b/chromium/v8/src/objects/class-definitions-tq-deps-inl.h
index de81ccfeb6a..dafba941ea4 100644
--- a/chromium/v8/src/objects/class-definitions-tq-deps-inl.h
+++ b/chromium/v8/src/objects/class-definitions-tq-deps-inl.h
@@ -12,7 +12,6 @@
#include "src/objects/arguments-inl.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/free-space-inl.h"
-#include "src/objects/js-aggregate-error-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-regexp-inl.h"
diff --git a/chromium/v8/src/objects/code.cc b/chromium/v8/src/objects/code.cc
index 2e8f808262e..cb95761d2d8 100644
--- a/chromium/v8/src/objects/code.cc
+++ b/chromium/v8/src/objects/code.cc
@@ -239,7 +239,8 @@ const char* AbstractCode::Kind2String(Kind kind) {
bool Code::IsIsolateIndependent(Isolate* isolate) {
constexpr int all_real_modes_mask =
- (1 << (RelocInfo::LAST_REAL_RELOC_MODE + 1)) - 1;
+ (1 << (RelocInfo::LAST_REAL_RELOC_MODE + 1)) -
+ (1 << (RelocInfo::FIRST_REAL_RELOC_MODE - 1)) - 1;
constexpr int mode_mask = all_real_modes_mask &
~RelocInfo::ModeMask(RelocInfo::CONST_POOL) &
~RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) &
diff --git a/chromium/v8/src/objects/compilation-cache.h b/chromium/v8/src/objects/compilation-cache.h
index dd507f3c2d9..0074fc7b147 100644
--- a/chromium/v8/src/objects/compilation-cache.h
+++ b/chromium/v8/src/objects/compilation-cache.h
@@ -42,7 +42,7 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> {
class InfoCellPair {
public:
- InfoCellPair() {}
+ InfoCellPair() = default;
inline InfoCellPair(SharedFunctionInfo shared, FeedbackCell feedback_cell);
FeedbackCell feedback_cell() const {
diff --git a/chromium/v8/src/objects/contexts.cc b/chromium/v8/src/objects/contexts.cc
index 686a3c689ef..76c52b27135 100644
--- a/chromium/v8/src/objects/contexts.cc
+++ b/chromium/v8/src/objects/contexts.cc
@@ -159,13 +159,13 @@ static Maybe<bool> UnscopableLookup(LookupIterator* it, bool is_with_context) {
isolate->factory()->unscopables_symbol()),
Nothing<bool>());
if (!unscopables->IsJSReceiver()) return Just(true);
- Handle<Object> blacklist;
+ Handle<Object> blocklist;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, blacklist,
+ isolate, blocklist,
JSReceiver::GetProperty(isolate, Handle<JSReceiver>::cast(unscopables),
it->name()),
Nothing<bool>());
- return Just(!blacklist->BooleanValue(isolate));
+ return Just(!blocklist->BooleanValue(isolate));
}
static PropertyAttributes GetAttributesForMode(VariableMode mode) {
@@ -377,12 +377,12 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
}
}
- // Check blacklist. Names that are listed, cannot be resolved further.
- Object blacklist = context->get(BLACK_LIST_INDEX);
- if (blacklist.IsStringSet() &&
- StringSet::cast(blacklist).Has(isolate, name)) {
+ // Check blocklist. Names that are listed, cannot be resolved further.
+ Object blocklist = context->get(BLOCK_LIST_INDEX);
+ if (blocklist.IsStringSet() &&
+ StringSet::cast(blocklist).Has(isolate, name)) {
if (FLAG_trace_contexts) {
- PrintF(" - name is blacklisted. Aborting.\n");
+ PrintF(" - name is blocklisted. Aborting.\n");
}
break;
}
diff --git a/chromium/v8/src/objects/contexts.h b/chromium/v8/src/objects/contexts.h
index 06f742281ad..d6386f3c49f 100644
--- a/chromium/v8/src/objects/contexts.h
+++ b/chromium/v8/src/objects/contexts.h
@@ -309,6 +309,7 @@ enum ContextLookupFlags {
V(OBJECT_TO_STRING, JSFunction, object_to_string) \
V(OBJECT_VALUE_OF_FUNCTION_INDEX, JSFunction, object_value_of_function) \
V(PROMISE_ALL_INDEX, JSFunction, promise_all) \
+ V(PROMISE_ANY_INDEX, JSFunction, promise_any) \
V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
@@ -508,7 +509,7 @@ class Context : public HeapObject {
// These slots hold values in debug evaluate contexts.
WRAPPED_CONTEXT_INDEX = MIN_CONTEXT_EXTENDED_SLOTS,
- BLACK_LIST_INDEX = MIN_CONTEXT_EXTENDED_SLOTS + 1
+ BLOCK_LIST_INDEX = MIN_CONTEXT_EXTENDED_SLOTS + 1
};
static const int kExtensionSize =
diff --git a/chromium/v8/src/objects/contexts.tq b/chromium/v8/src/objects/contexts.tq
index bae4fd60df9..1b0ae080c3e 100644
--- a/chromium/v8/src/objects/contexts.tq
+++ b/chromium/v8/src/objects/contexts.tq
@@ -36,11 +36,13 @@ extern enum NativeContextSlot extends intptr constexpr 'Context::Field' {
JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX,
MATH_RANDOM_CACHE_INDEX,
MATH_RANDOM_INDEX_INDEX,
+ NUMBER_FUNCTION_INDEX,
PROXY_REVOCABLE_RESULT_MAP_INDEX,
REFLECT_APPLY_INDEX,
REGEXP_FUNCTION_INDEX,
REGEXP_LAST_MATCH_INFO_INDEX,
INITIAL_STRING_ITERATOR_MAP_INDEX,
+ INITIAL_ARRAY_ITERATOR_MAP_INDEX,
SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP,
STRICT_ARGUMENTS_MAP_INDEX,
SLOPPY_ARGUMENTS_MAP_INDEX,
@@ -52,6 +54,9 @@ extern enum NativeContextSlot extends intptr constexpr 'Context::Field' {
STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX,
+
+ BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX,
+ BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX,
...
}
diff --git a/chromium/v8/src/objects/debug-objects.cc b/chromium/v8/src/objects/debug-objects.cc
index 48dc2d5b8d4..839bc097d13 100644
--- a/chromium/v8/src/objects/debug-objects.cc
+++ b/chromium/v8/src/objects/debug-objects.cc
@@ -274,10 +274,13 @@ void BreakPointInfo::SetBreakPoint(Isolate* isolate,
break_point_info->set_break_points(*break_point);
return;
}
- // If the break point object is the same as before just ignore.
- if (break_point_info->break_points() == *break_point) return;
// If there was one break point object before replace with array.
if (!break_point_info->break_points().IsFixedArray()) {
+ if (IsEqual(BreakPoint::cast(break_point_info->break_points()),
+ *break_point)) {
+ return;
+ }
+
Handle<FixedArray> array = isolate->factory()->NewFixedArray(2);
array->set(0, break_point_info->break_points());
array->set(1, *break_point);
diff --git a/chromium/v8/src/objects/debug-objects.h b/chromium/v8/src/objects/debug-objects.h
index 415f456b1b4..22eea25481b 100644
--- a/chromium/v8/src/objects/debug-objects.h
+++ b/chromium/v8/src/objects/debug-objects.h
@@ -28,7 +28,6 @@ class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> {
public:
NEVER_READ_ONLY_SPACE
DEFINE_TORQUE_GENERATED_DEBUG_INFO_FLAGS()
- using Flags = base::Flags<Flag>;
// DebugInfo can be detached from the SharedFunctionInfo iff it is empty.
bool IsEmpty() const;
diff --git a/chromium/v8/src/objects/debug-objects.tq b/chromium/v8/src/objects/debug-objects.tq
index 8544e793664..ee8201e615c 100644
--- a/chromium/v8/src/objects/debug-objects.tq
+++ b/chromium/v8/src/objects/debug-objects.tq
@@ -74,11 +74,13 @@ extern class CoverageInfo extends HeapObject {
@generateCppClass
@generatePrint
extern class WasmValue extends Struct {
+ // TODO(7748): Name and comment are outdated.
// The type, should map to ValueType::Kind values in value-type.h.
value_type: SmiTagged<WasmValueType>;
// Holds the actual value. For example, if this holds a Wasm i32, this will
// be of length 4, for s128, it will have length 16. These values are
// represented by the respective C++ types, and memcpy-ed in.
- // When value_type is a anyref, it holds the object that anyref points to.
+ // When value_type is a externref, it holds the object that externref points
+ // to.
bytes_or_ref: Object|ByteArray;
}
diff --git a/chromium/v8/src/objects/descriptor-array-inl.h b/chromium/v8/src/objects/descriptor-array-inl.h
index 357a6732e22..d9e3408dd96 100644
--- a/chromium/v8/src/objects/descriptor-array-inl.h
+++ b/chromium/v8/src/objects/descriptor-array-inl.h
@@ -55,17 +55,19 @@ void DescriptorArray::CopyEnumCacheFrom(DescriptorArray array) {
set_enum_cache(array.enum_cache());
}
-InternalIndex DescriptorArray::Search(Name name, int valid_descriptors) {
+InternalIndex DescriptorArray::Search(Name name, int valid_descriptors,
+ bool concurrent_search) {
DCHECK(name.IsUniqueName());
- return InternalIndex(
- internal::Search<VALID_ENTRIES>(this, name, valid_descriptors, nullptr));
+ return InternalIndex(internal::Search<VALID_ENTRIES>(
+ this, name, valid_descriptors, nullptr, concurrent_search));
}
-InternalIndex DescriptorArray::Search(Name name, Map map) {
+InternalIndex DescriptorArray::Search(Name name, Map map,
+ bool concurrent_search) {
DCHECK(name.IsUniqueName());
int number_of_own_descriptors = map.NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) return InternalIndex::NotFound();
- return Search(name, number_of_own_descriptors);
+ return Search(name, number_of_own_descriptors, concurrent_search);
}
InternalIndex DescriptorArray::SearchWithCache(Isolate* isolate, Name name,
diff --git a/chromium/v8/src/objects/descriptor-array.h b/chromium/v8/src/objects/descriptor-array.h
index 61da8dc240c..f6894819290 100644
--- a/chromium/v8/src/objects/descriptor-array.h
+++ b/chromium/v8/src/objects/descriptor-array.h
@@ -115,9 +115,13 @@ class DescriptorArray
// Sort the instance descriptors by the hash codes of their keys.
void Sort();
- // Search the instance descriptors for given name.
- V8_INLINE InternalIndex Search(Name name, int number_of_own_descriptors);
- V8_INLINE InternalIndex Search(Name name, Map map);
+ // Search the instance descriptors for given name. {concurrent_search} signals
+ // if we are doing the search on a background thread. If so, we will sacrifice
+ // speed for thread-safety.
+ V8_INLINE InternalIndex Search(Name name, int number_of_own_descriptors,
+ bool concurrent_search = false);
+ V8_INLINE InternalIndex Search(Name name, Map map,
+ bool concurrent_search = false);
// As the above, but uses DescriptorLookupCache and updates it when
// necessary.
@@ -189,7 +193,7 @@ class DescriptorArray
#ifdef DEBUG
// Is the descriptor array sorted and without duplicates?
- V8_EXPORT_PRIVATE bool IsSortedNoDuplicates(int valid_descriptors = -1);
+ V8_EXPORT_PRIVATE bool IsSortedNoDuplicates();
// Are two DescriptorArrays equal?
bool IsEqualTo(DescriptorArray other);
diff --git a/chromium/v8/src/objects/elements.cc b/chromium/v8/src/objects/elements.cc
index d0c680d287d..7994c12ec1a 100644
--- a/chromium/v8/src/objects/elements.cc
+++ b/chromium/v8/src/objects/elements.cc
@@ -22,6 +22,8 @@
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/slots.h"
#include "src/utils/utils.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
+#include "torque-generated/exported-class-definitions-tq.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
// several abstract ElementsAccessor classes are used to allow sharing
@@ -443,6 +445,8 @@ void CopyDictionaryToDoubleElements(Isolate* isolate, FixedArrayBase from_base,
void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
uint32_t sort_size) {
+ if (sort_size == 0) return;
+
// Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
AtomicSlot start(indices->GetFirstElementAddress());
@@ -3882,11 +3886,11 @@ class SloppyArgumentsElementsAccessor
InternalIndex entry) {
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(parameters), isolate);
- uint32_t length = elements->parameter_map_length();
+ uint32_t length = elements->length();
if (entry.as_uint32() < length) {
// Read context mapped entry.
DisallowHeapAllocation no_gc;
- Object probe = elements->get_mapped_entry(entry.as_uint32());
+ Object probe = elements->mapped_entries(entry.as_uint32());
DCHECK(!probe.IsTheHole(isolate));
Context context = elements->context();
int context_entry = Smi::ToInt(probe);
@@ -3918,13 +3922,13 @@ class SloppyArgumentsElementsAccessor
static inline void SetImpl(FixedArrayBase store, InternalIndex entry,
Object value) {
SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store);
- uint32_t length = elements.parameter_map_length();
+ uint32_t length = elements.length();
if (entry.as_uint32() < length) {
// Store context mapped entry.
DisallowHeapAllocation no_gc;
- Object probe = elements.get_mapped_entry(entry.as_uint32());
+ Object probe = elements.mapped_entries(entry.as_uint32());
DCHECK(!probe.IsTheHole());
- Context context = elements.context();
+ Context context = Context::cast(elements.context());
int context_entry = Smi::ToInt(probe);
DCHECK(!context.get(context_entry).IsTheHole());
context.set(context_entry, value);
@@ -3935,7 +3939,7 @@ class SloppyArgumentsElementsAccessor
ArgumentsAccessor::GetRaw(arguments, entry.adjust_down(length));
if (current.IsAliasedArgumentsEntry()) {
AliasedArgumentsEntry alias = AliasedArgumentsEntry::cast(current);
- Context context = elements.context();
+ Context context = Context::cast(elements.context());
int context_entry = alias.aliased_context_slot();
DCHECK(!context.get(context_entry).IsTheHole());
context.set(context_entry, value);
@@ -3955,7 +3959,7 @@ class SloppyArgumentsElementsAccessor
static uint32_t GetCapacityImpl(JSObject holder, FixedArrayBase store) {
SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store);
FixedArray arguments = elements.arguments();
- return elements.parameter_map_length() +
+ return elements.length() +
ArgumentsAccessor::GetCapacityImpl(holder, arguments);
}
@@ -3967,7 +3971,7 @@ class SloppyArgumentsElementsAccessor
size_t max_entries =
ArgumentsAccessor::GetMaxNumberOfEntries(holder, arguments);
DCHECK_LE(max_entries, std::numeric_limits<uint32_t>::max());
- return elements.parameter_map_length() + static_cast<uint32_t>(max_entries);
+ return elements.length() + static_cast<uint32_t>(max_entries);
}
static uint32_t NumberOfElementsImpl(JSObject receiver,
@@ -3977,7 +3981,7 @@ class SloppyArgumentsElementsAccessor
SloppyArgumentsElements::cast(backing_store);
FixedArrayBase arguments = elements.arguments();
uint32_t nof_elements = 0;
- uint32_t length = elements.parameter_map_length();
+ uint32_t length = elements.length();
for (uint32_t index = 0; index < length; index++) {
if (HasParameterMapArg(isolate, elements, index)) nof_elements++;
}
@@ -4004,7 +4008,7 @@ class SloppyArgumentsElementsAccessor
InternalIndex entry) {
SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(parameters);
- uint32_t length = elements.parameter_map_length();
+ uint32_t length = elements.length();
if (entry.raw_value() < length) {
return HasParameterMapArg(isolate, elements, entry.raw_value());
}
@@ -4035,13 +4039,13 @@ class SloppyArgumentsElementsAccessor
if (entry.is_not_found()) return entry;
// Arguments entries could overlap with the dictionary entries, hence offset
// them by the number of context mapped entries.
- return entry.adjust_up(elements.parameter_map_length());
+ return entry.adjust_up(elements.length());
}
static PropertyDetails GetDetailsImpl(JSObject holder, InternalIndex entry) {
SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(holder.elements());
- uint32_t length = elements.parameter_map_length();
+ uint32_t length = elements.length();
if (entry.as_uint32() < length) {
return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
}
@@ -4053,16 +4057,16 @@ class SloppyArgumentsElementsAccessor
static bool HasParameterMapArg(Isolate* isolate,
SloppyArgumentsElements elements,
size_t index) {
- uint32_t length = elements.parameter_map_length();
+ uint32_t length = elements.length();
if (index >= length) return false;
- return !elements.get_mapped_entry(static_cast<uint32_t>(index))
+ return !elements.mapped_entries(static_cast<uint32_t>(index))
.IsTheHole(isolate);
}
static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) {
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(obj->elements()), obj->GetIsolate());
- uint32_t length = elements->parameter_map_length();
+ uint32_t length = elements->length();
InternalIndex delete_or_entry = entry;
if (entry.as_uint32() < length) {
delete_or_entry = InternalIndex::NotFound();
@@ -4071,8 +4075,8 @@ class SloppyArgumentsElementsAccessor
// SloppyDeleteImpl allocates a new dictionary elements store. For making
// heap verification happy we postpone clearing out the mapped entry.
if (entry.as_uint32() < length) {
- elements->set_mapped_entry(entry.as_uint32(),
- obj->GetReadOnlyRoots().the_hole_value());
+ elements->set_mapped_entries(entry.as_uint32(),
+ obj->GetReadOnlyRoots().the_hole_value());
}
}
@@ -4107,10 +4111,10 @@ class SloppyArgumentsElementsAccessor
uint32_t insertion_index = 0) {
Handle<SloppyArgumentsElements> elements =
Handle<SloppyArgumentsElements>::cast(backing_store);
- uint32_t length = elements->parameter_map_length();
+ uint32_t length = elements->length();
for (uint32_t i = 0; i < length; ++i) {
- if (elements->get_mapped_entry(i).IsTheHole(isolate)) continue;
+ if (elements->mapped_entries(i).IsTheHole(isolate)) continue;
if (convert == GetKeysConversion::kConvertToString) {
Handle<String> index_string = isolate->factory()->Uint32ToString(i);
list->set(insertion_index, *index_string);
@@ -4238,7 +4242,7 @@ class SlowSloppyArgumentsElementsAccessor
Isolate* isolate = obj->GetIsolate();
Handle<NumberDictionary> dict(NumberDictionary::cast(elements->arguments()),
isolate);
- uint32_t length = elements->parameter_map_length();
+ uint32_t length = elements->length();
dict =
NumberDictionary::DeleteEntry(isolate, dict, entry.adjust_down(length));
elements->set_arguments(*dict);
@@ -4271,9 +4275,9 @@ class SlowSloppyArgumentsElementsAccessor
Isolate* isolate = object->GetIsolate();
Handle<SloppyArgumentsElements> elements =
Handle<SloppyArgumentsElements>::cast(store);
- uint32_t length = elements->parameter_map_length();
+ uint32_t length = elements->length();
if (entry.as_uint32() < length) {
- Object probe = elements->get_mapped_entry(entry.as_uint32());
+ Object probe = elements->mapped_entries(entry.as_uint32());
DCHECK(!probe.IsTheHole(isolate));
Context context = elements->context();
int context_entry = Smi::ToInt(probe);
@@ -4281,8 +4285,8 @@ class SlowSloppyArgumentsElementsAccessor
context.set(context_entry, *value);
// Redefining attributes of an aliased element destroys fast aliasing.
- elements->set_mapped_entry(entry.as_uint32(),
- ReadOnlyRoots(isolate).the_hole_value());
+ elements->set_mapped_entries(entry.as_uint32(),
+ ReadOnlyRoots(isolate).the_hole_value());
// For elements that are still writable we re-establish slow aliasing.
if ((attributes & READ_ONLY) == 0) {
value = isolate->factory()->NewAliasedArgumentsEntry(context_entry);
@@ -4339,7 +4343,7 @@ class FastSloppyArgumentsElementsAccessor
// kMaxUInt32 indicates that a context mapped element got deleted. In this
// case we only normalize the elements (aka. migrate to SLOW_SLOPPY).
if (entry->is_not_found()) return dictionary;
- uint32_t length = elements->parameter_map_length();
+ uint32_t length = elements->length();
if (entry->as_uint32() >= length) {
*entry =
dictionary
diff --git a/chromium/v8/src/objects/feedback-vector-inl.h b/chromium/v8/src/objects/feedback-vector-inl.h
index 72b6e14883a..42c2fa51530 100644
--- a/chromium/v8/src/objects/feedback-vector-inl.h
+++ b/chromium/v8/src/objects/feedback-vector-inl.h
@@ -233,32 +233,47 @@ BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
}
// Helper function to transform the feedback to CompareOperationHint.
+template <CompareOperationFeedback::Type Feedback>
+bool Is(int type_feedback) {
+ return !(type_feedback & ~Feedback);
+}
+
CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
- switch (type_feedback) {
- case CompareOperationFeedback::kNone:
- return CompareOperationHint::kNone;
- case CompareOperationFeedback::kSignedSmall:
- return CompareOperationHint::kSignedSmall;
- case CompareOperationFeedback::kNumber:
- return CompareOperationHint::kNumber;
- case CompareOperationFeedback::kNumberOrOddball:
- return CompareOperationHint::kNumberOrOddball;
- case CompareOperationFeedback::kInternalizedString:
- return CompareOperationHint::kInternalizedString;
- case CompareOperationFeedback::kString:
- return CompareOperationHint::kString;
- case CompareOperationFeedback::kSymbol:
- return CompareOperationHint::kSymbol;
- case CompareOperationFeedback::kBigInt:
- return CompareOperationHint::kBigInt;
- case CompareOperationFeedback::kReceiver:
- return CompareOperationHint::kReceiver;
- case CompareOperationFeedback::kReceiverOrNullOrUndefined:
- return CompareOperationHint::kReceiverOrNullOrUndefined;
- default:
- return CompareOperationHint::kAny;
+ if (Is<CompareOperationFeedback::kNone>(type_feedback)) {
+ return CompareOperationHint::kNone;
}
- UNREACHABLE();
+
+ if (Is<CompareOperationFeedback::kSignedSmall>(type_feedback)) {
+ return CompareOperationHint::kSignedSmall;
+ } else if (Is<CompareOperationFeedback::kNumber>(type_feedback)) {
+ return CompareOperationHint::kNumber;
+ } else if (Is<CompareOperationFeedback::kNumberOrBoolean>(type_feedback)) {
+ return CompareOperationHint::kNumberOrBoolean;
+ }
+
+ if (Is<CompareOperationFeedback::kInternalizedString>(type_feedback)) {
+ return CompareOperationHint::kInternalizedString;
+ } else if (Is<CompareOperationFeedback::kString>(type_feedback)) {
+ return CompareOperationHint::kString;
+ }
+
+ if (Is<CompareOperationFeedback::kReceiver>(type_feedback)) {
+ return CompareOperationHint::kReceiver;
+ } else if (Is<CompareOperationFeedback::kReceiverOrNullOrUndefined>(
+ type_feedback)) {
+ return CompareOperationHint::kReceiverOrNullOrUndefined;
+ }
+
+ if (Is<CompareOperationFeedback::kBigInt>(type_feedback)) {
+ return CompareOperationHint::kBigInt;
+ }
+
+ if (Is<CompareOperationFeedback::kSymbol>(type_feedback)) {
+ return CompareOperationHint::kSymbol;
+ }
+
+ DCHECK(Is<CompareOperationFeedback::kAny>(type_feedback));
+ return CompareOperationHint::kAny;
}
// Helper function to transform the feedback to ForInHint.
diff --git a/chromium/v8/src/objects/feedback-vector.cc b/chromium/v8/src/objects/feedback-vector.cc
index dc4581f40ee..ce638b23399 100644
--- a/chromium/v8/src/objects/feedback-vector.cc
+++ b/chromium/v8/src/objects/feedback-vector.cc
@@ -235,10 +235,14 @@ Handle<ClosureFeedbackCellArray> ClosureFeedbackCellArray::New(
// static
Handle<FeedbackVector> FeedbackVector::New(
Isolate* isolate, Handle<SharedFunctionInfo> shared,
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array) {
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array,
+ IsCompiledScope* is_compiled_scope) {
+ DCHECK(is_compiled_scope->is_compiled());
Factory* factory = isolate->factory();
- const int slot_count = shared->feedback_metadata().slot_count();
+ Handle<FeedbackMetadata> feedback_metadata(shared->feedback_metadata(),
+ isolate);
+ const int slot_count = feedback_metadata->slot_count();
Handle<FeedbackVector> vector =
factory->NewFeedbackVector(shared, closure_feedback_cell_array);
@@ -260,7 +264,7 @@ Handle<FeedbackVector> FeedbackVector::New(
*uninitialized_sentinel);
for (int i = 0; i < slot_count;) {
FeedbackSlot slot(i);
- FeedbackSlotKind kind = shared->feedback_metadata().GetKind(slot);
+ FeedbackSlotKind kind = feedback_metadata->GetKind(slot);
int index = FeedbackVector::GetIndex(slot);
int entry_size = FeedbackMetadata::GetSlotSize(kind);
@@ -320,6 +324,43 @@ Handle<FeedbackVector> FeedbackVector::New(
return result;
}
+namespace {
+
+Handle<FeedbackVector> NewFeedbackVectorForTesting(
+ Isolate* isolate, const FeedbackVectorSpec* spec) {
+ Handle<FeedbackMetadata> metadata = FeedbackMetadata::New(isolate, spec);
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfoForBuiltin(
+ isolate->factory()->empty_string(), Builtins::kIllegal);
+ // Set the raw feedback metadata to circumvent checks that we are not
+ // overwriting existing metadata.
+ shared->set_raw_outer_scope_info_or_feedback_metadata(*metadata);
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
+ ClosureFeedbackCellArray::New(isolate, shared);
+
+ IsCompiledScope is_compiled_scope(shared->is_compiled_scope());
+ return FeedbackVector::New(isolate, shared, closure_feedback_cell_array,
+ &is_compiled_scope);
+}
+
+} // namespace
+
+// static
+Handle<FeedbackVector> FeedbackVector::NewWithOneBinarySlotForTesting(
+ Zone* zone, Isolate* isolate) {
+ FeedbackVectorSpec one_slot(zone);
+ one_slot.AddBinaryOpICSlot();
+ return NewFeedbackVectorForTesting(isolate, &one_slot);
+}
+
+// static
+Handle<FeedbackVector> FeedbackVector::NewWithOneCompareSlotForTesting(
+ Zone* zone, Isolate* isolate) {
+ FeedbackVectorSpec one_slot(zone);
+ one_slot.AddCompareICSlot();
+ return NewFeedbackVectorForTesting(isolate, &one_slot);
+}
+
// static
void FeedbackVector::AddToVectorsForProfilingTools(
Isolate* isolate, Handle<FeedbackVector> vector) {
diff --git a/chromium/v8/src/objects/feedback-vector.h b/chromium/v8/src/objects/feedback-vector.h
index 24025ad16a4..4789206102a 100644
--- a/chromium/v8/src/objects/feedback-vector.h
+++ b/chromium/v8/src/objects/feedback-vector.h
@@ -23,6 +23,8 @@
namespace v8 {
namespace internal {
+class IsCompiledScope;
+
enum class FeedbackSlotKind {
// This kind means that the slot points to the middle of other slot
// which occupies more than one feedback vector element.
@@ -149,11 +151,9 @@ using MaybeObjectHandles = std::vector<MaybeObjectHandle>;
class FeedbackMetadata;
// ClosureFeedbackCellArray is a FixedArray that contains feedback cells used
-// when creating closures from a function. Along with the feedback
-// cells, the first slot (slot 0) is used to hold a budget to measure the
-// hotness of the function. This is created once the function is compiled and is
-// either held by the feedback vector (if allocated) or by the FeedbackCell of
-// the closure.
+// when creating closures from a function. This is created once the function is
+// compiled and is either held by the feedback vector (if allocated) or by the
+// FeedbackCell of the closure.
class ClosureFeedbackCellArray : public FixedArray {
public:
NEVER_READ_ONLY_SPACE
@@ -262,7 +262,13 @@ class FeedbackVector : public HeapObject {
V8_EXPORT_PRIVATE static Handle<FeedbackVector> New(
Isolate* isolate, Handle<SharedFunctionInfo> shared,
- Handle<ClosureFeedbackCellArray> closure_feedback_cell_array);
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array,
+ IsCompiledScope* is_compiled_scope);
+
+ V8_EXPORT_PRIVATE static Handle<FeedbackVector>
+ NewWithOneBinarySlotForTesting(Zone* zone, Isolate* isolate);
+ V8_EXPORT_PRIVATE static Handle<FeedbackVector>
+ NewWithOneCompareSlotForTesting(Zone* zone, Isolate* isolate);
#define DEFINE_SLOT_KIND_PREDICATE(Name) \
bool Name(FeedbackSlot slot) const { return Name##Kind(GetKind(slot)); }
diff --git a/chromium/v8/src/objects/fixed-array-inl.h b/chromium/v8/src/objects/fixed-array-inl.h
index 174d4abc5b4..a49483ebc64 100644
--- a/chromium/v8/src/objects/fixed-array-inl.h
+++ b/chromium/v8/src/objects/fixed-array-inl.h
@@ -209,8 +209,13 @@ inline int WeakArrayList::AllocatedSize() {
template <SearchMode search_mode, typename T>
int BinarySearch(T* array, Name name, int valid_entries,
int* out_insertion_index) {
- DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == nullptr);
+ DCHECK_IMPLIES(search_mode == VALID_ENTRIES, out_insertion_index == nullptr);
int low = 0;
+ // We have to search on all entries, even when search_mode == VALID_ENTRIES.
+ // This is because the InternalIndex might be different from the SortedIndex
+ // (i.e the first added item in {array} could be the last in the sorted
+ // index). After doing the binary search and getting the correct internal
+ // index we check to have the index lower than valid_entries, if needed.
int high = array->number_of_entries() - 1;
uint32_t hash = name.hash_field();
int limit = high;
@@ -234,6 +239,11 @@ int BinarySearch(T* array, Name name, int valid_entries,
Name entry = array->GetKey(InternalIndex(sort_index));
uint32_t current_hash = entry.hash_field();
if (current_hash != hash) {
+ // 'search_mode == ALL_ENTRIES' here and below is not needed since
+ // 'out_insertion_index != nullptr' implies 'search_mode == ALL_ENTRIES'.
+ // Having said that, when creating the template for <VALID_ENTRIES> these
+ // ifs can be elided by the C++ compiler if we add 'search_mode ==
+ // ALL_ENTRIES'.
if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
*out_insertion_index = sort_index + (current_hash > hash ? 0 : 1);
}
@@ -284,8 +294,9 @@ int LinearSearch(T* array, Name name, int valid_entries,
}
template <SearchMode search_mode, typename T>
-int Search(T* array, Name name, int valid_entries, int* out_insertion_index) {
- SLOW_DCHECK(array->IsSortedNoDuplicates());
+int Search(T* array, Name name, int valid_entries, int* out_insertion_index,
+ bool concurrent_search) {
+ SLOW_DCHECK_IMPLIES(!concurrent_search, array->IsSortedNoDuplicates());
if (valid_entries == 0) {
if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
@@ -294,14 +305,14 @@ int Search(T* array, Name name, int valid_entries, int* out_insertion_index) {
return T::kNotFound;
}
- // Fast case: do linear search for small arrays.
+ // Do linear search for small arrays, and for searches in the background
+ // thread.
const int kMaxElementsForLinearSearch = 8;
- if (valid_entries <= kMaxElementsForLinearSearch) {
+ if (valid_entries <= kMaxElementsForLinearSearch || concurrent_search) {
return LinearSearch<search_mode>(array, name, valid_entries,
out_insertion_index);
}
- // Slow case: perform binary search.
return BinarySearch<search_mode>(array, name, valid_entries,
out_insertion_index);
}
diff --git a/chromium/v8/src/objects/fixed-array.h b/chromium/v8/src/objects/fixed-array.h
index 63c3c5360b9..ccb954c6e25 100644
--- a/chromium/v8/src/objects/fixed-array.h
+++ b/chromium/v8/src/objects/fixed-array.h
@@ -465,7 +465,8 @@ enum SearchMode { ALL_ENTRIES, VALID_ENTRIES };
template <SearchMode search_mode, typename T>
inline int Search(T* array, Name name, int valid_entries = 0,
- int* out_insertion_index = nullptr);
+ int* out_insertion_index = nullptr,
+ bool concurrent_search = false);
// ByteArray represents fixed sized byte arrays. Used for the relocation info
// that is attached to code objects.
diff --git a/chromium/v8/src/objects/fixed-array.tq b/chromium/v8/src/objects/fixed-array.tq
index 5c22149ebb8..5e206cb5afe 100644
--- a/chromium/v8/src/objects/fixed-array.tq
+++ b/chromium/v8/src/objects/fixed-array.tq
@@ -36,11 +36,13 @@ extern class ByteArray extends FixedArrayBase {
@hasSameInstanceTypeAsParent
@generateCppClass
+@doNotGenerateCast
extern class ArrayList extends FixedArray {
}
@hasSameInstanceTypeAsParent
@generateCppClass
+@doNotGenerateCast
extern class TemplateList extends FixedArray {
}
@@ -101,6 +103,9 @@ operator '[]=' macro StoreFixedArrayDirect(a: FixedArray, i: Smi, v: Object) {
a.objects[i] = v;
}
+extern macro AllocateFixedArray(
+ constexpr ElementsKind, intptr, constexpr AllocationFlag): FixedArrayBase;
+
extern macro AllocateZeroedFixedArray(intptr): FixedArray;
extern macro AllocateZeroedFixedDoubleArray(intptr): FixedDoubleArray;
extern macro CalculateNewElementsCapacity(Smi): Smi;
diff --git a/chromium/v8/src/objects/frame-array.h b/chromium/v8/src/objects/frame-array.h
index 9ad4fb96a9f..bc4676fc7b8 100644
--- a/chromium/v8/src/objects/frame-array.h
+++ b/chromium/v8/src/objects/frame-array.h
@@ -52,7 +52,8 @@ class FrameArray : public FixedArray {
kIsConstructor = 1 << 3,
kAsmJsAtNumberConversion = 1 << 4,
kIsAsync = 1 << 5,
- kIsPromiseAll = 1 << 6
+ kIsPromiseAll = 1 << 6,
+ kIsPromiseAny = 1 << 7
};
static Handle<FrameArray> AppendJSFrame(Handle<FrameArray> in,
diff --git a/chromium/v8/src/objects/heap-object.h b/chromium/v8/src/objects/heap-object.h
index b19d429320b..0f8efb5c78f 100644
--- a/chromium/v8/src/objects/heap-object.h
+++ b/chromium/v8/src/objects/heap-object.h
@@ -191,7 +191,7 @@ class HeapObject : public Object {
bool CanBeRehashed() const;
// Rehash the object based on the layout inferred from its map.
- void RehashBasedOnMap(ReadOnlyRoots root);
+ void RehashBasedOnMap(LocalIsolateWrapper isolate);
// Layout description.
#define HEAP_OBJECT_FIELDS(V) \
diff --git a/chromium/v8/src/objects/heap-object.tq b/chromium/v8/src/objects/heap-object.tq
index e2f1fe4240e..ca794032f39 100644
--- a/chromium/v8/src/objects/heap-object.tq
+++ b/chromium/v8/src/objects/heap-object.tq
@@ -3,6 +3,7 @@
// found in the LICENSE file.
@abstract
+@doNotGenerateCast
extern class HeapObject extends StrongTagged {
const map: Map;
}
diff --git a/chromium/v8/src/objects/instance-type-inl.h b/chromium/v8/src/objects/instance-type-inl.h
index 45b858209d3..dc41ed6b32d 100644
--- a/chromium/v8/src/objects/instance-type-inl.h
+++ b/chromium/v8/src/objects/instance-type-inl.h
@@ -30,18 +30,14 @@ struct InstanceRangeChecker {
template <InstanceType upper_limit>
struct InstanceRangeChecker<FIRST_TYPE, upper_limit> {
static constexpr bool Check(InstanceType value) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_LE(FIRST_TYPE, value);
-#endif
+ CONSTEXPR_DCHECK(FIRST_TYPE <= value);
return value <= upper_limit;
}
};
template <InstanceType lower_limit>
struct InstanceRangeChecker<lower_limit, LAST_TYPE> {
static constexpr bool Check(InstanceType value) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_GE(LAST_TYPE, value);
-#endif
+ CONSTEXPR_DCHECK(LAST_TYPE >= value);
return value >= lower_limit;
}
};
diff --git a/chromium/v8/src/objects/instance-type.h b/chromium/v8/src/objects/instance-type.h
index 5de264b1de4..077eb307012 100644
--- a/chromium/v8/src/objects/instance-type.h
+++ b/chromium/v8/src/objects/instance-type.h
@@ -243,6 +243,11 @@ TYPED_ARRAYS(TYPED_ARRAY_IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
} // namespace InstanceTypeChecker
+// This list must contain only maps that are shared by all objects of their
+// instance type.
+#define UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(V, _) \
+ TORQUE_DEFINED_MAP_CSA_LIST_GENERATOR(V, _)
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/objects/internal-index.h b/chromium/v8/src/objects/internal-index.h
index f00c65bbdd7..7cc9fa67387 100644
--- a/chromium/v8/src/objects/internal-index.h
+++ b/chromium/v8/src/objects/internal-index.h
@@ -40,9 +40,7 @@ class InternalIndex {
return static_cast<uint32_t>(entry_);
}
constexpr int as_int() const {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_LE(entry_, std::numeric_limits<int>::max());
-#endif
+ CONSTEXPR_DCHECK(entry_ <= std::numeric_limits<int>::max());
return static_cast<int>(entry_);
}
diff --git a/chromium/v8/src/objects/intl-objects.cc b/chromium/v8/src/objects/intl-objects.cc
index d358e2780e2..b58dc83264a 100644
--- a/chromium/v8/src/objects/intl-objects.cc
+++ b/chromium/v8/src/objects/intl-objects.cc
@@ -89,9 +89,7 @@ inline constexpr uint16_t ToLatin1Lower(uint16_t ch) {
// Does not work for U+00DF (sharp-s), U+00B5 (micron), U+00FF.
inline constexpr uint16_t ToLatin1Upper(uint16_t ch) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
-#endif
+ CONSTEXPR_DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
return ch &
~((IsAsciiLower(ch) || (((ch & 0xE0) == 0xE0) && ch != 0xF7)) << 5);
}
@@ -1471,7 +1469,7 @@ class Iterator : public icu::Locale::Iterator {
Iterator(std::vector<std::string>::const_iterator begin,
std::vector<std::string>::const_iterator end)
: iter_(begin), end_(end) {}
- virtual ~Iterator() {}
+ ~Iterator() override = default;
UBool hasNext() const override { return iter_ != end_; }
@@ -1893,8 +1891,8 @@ Maybe<Intl::ResolvedLocale> Intl::ResolveLocale(
Handle<Managed<icu::UnicodeString>> Intl::SetTextToBreakIterator(
Isolate* isolate, Handle<String> text, icu::BreakIterator* break_iterator) {
text = String::Flatten(isolate, text);
- icu::UnicodeString* u_text =
- (icu::UnicodeString*)(Intl::ToICUUnicodeString(isolate, text).clone());
+ icu::UnicodeString* u_text = static_cast<icu::UnicodeString*>(
+ Intl::ToICUUnicodeString(isolate, text).clone());
Handle<Managed<icu::UnicodeString>> new_u_text =
Managed<icu::UnicodeString>::FromRawPtr(isolate, 0, u_text);
diff --git a/chromium/v8/src/objects/intl-objects.h b/chromium/v8/src/objects/intl-objects.h
index 944a1f103ed..494b576364c 100644
--- a/chromium/v8/src/objects/intl-objects.h
+++ b/chromium/v8/src/objects/intl-objects.h
@@ -21,7 +21,7 @@
#include "unicode/locid.h"
#include "unicode/uversion.h"
-#define V8_MINIMUM_ICU_VERSION 65
+#define V8_MINIMUM_ICU_VERSION 67
namespace U_ICU_NAMESPACE {
class BreakIterator;
@@ -287,7 +287,7 @@ class Intl {
std::vector<std::string> all_locales;
const char* loc;
- while ((loc = uenum_next(uenum, NULL, &status)) != nullptr) {
+ while ((loc = uenum_next(uenum, nullptr, &status)) != nullptr) {
DCHECK(U_SUCCESS(status));
std::string locstr(loc);
std::replace(locstr.begin(), locstr.end(), '_', '-');
@@ -299,7 +299,6 @@ class Intl {
set_ = Intl::BuildLocaleSet(all_locales, C::path(), C::key());
}
- virtual ~AvailableLocales() {}
const std::set<std::string>& Get() const { return set_; }
private:
diff --git a/chromium/v8/src/objects/js-aggregate-error-inl.h b/chromium/v8/src/objects/js-aggregate-error-inl.h
deleted file mode 100644
index 552012c37fe..00000000000
--- a/chromium/v8/src/objects/js-aggregate-error-inl.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_JS_AGGREGATE_ERROR_INL_H_
-#define V8_OBJECTS_JS_AGGREGATE_ERROR_INL_H_
-
-#include "src/objects/js-aggregate-error.h"
-
-#include "src/objects/objects-inl.h" // Needed for write barriers
-
-// Has to be the last include (doesn't have include guards):
-#include "src/objects/object-macros.h"
-
-namespace v8 {
-namespace internal {
-
-TQ_OBJECT_CONSTRUCTORS_IMPL(JSAggregateError)
-
-} // namespace internal
-} // namespace v8
-
-#include "src/objects/object-macros-undef.h"
-
-#endif // V8_OBJECTS_JS_AGGREGATE_ERROR_INL_H_
diff --git a/chromium/v8/src/objects/js-aggregate-error.h b/chromium/v8/src/objects/js-aggregate-error.h
deleted file mode 100644
index c77633d44ed..00000000000
--- a/chromium/v8/src/objects/js-aggregate-error.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_JS_AGGREGATE_ERROR_H_
-#define V8_OBJECTS_JS_AGGREGATE_ERROR_H_
-
-#include "src/objects/js-objects.h"
-#include "torque-generated/builtin-definitions-tq.h"
-
-// Has to be the last include (doesn't have include guards):
-#include "src/objects/object-macros.h"
-
-namespace v8 {
-namespace internal {
-
-class JSAggregateError
- : public TorqueGeneratedJSAggregateError<JSAggregateError, JSObject> {
- public:
- DECL_PRINTER(JSAggregateError)
- TQ_OBJECT_CONSTRUCTORS(JSAggregateError)
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_OBJECTS_JS_AGGREGATE_ERROR_H_
diff --git a/chromium/v8/src/objects/js-aggregate-error.tq b/chromium/v8/src/objects/js-aggregate-error.tq
deleted file mode 100644
index efa416e9fb4..00000000000
--- a/chromium/v8/src/objects/js-aggregate-error.tq
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include 'src/objects/js-aggregate-error.h'
-
-@generateCppClass
-extern class JSAggregateError extends JSObject {
- // Only Undefined during AggregateError object creation. In order to make the
- // field type FixedArray, we'd need to initialize it in ErrorUtils::Construct
- // (after it, it's too late) which we don't want.
- errors: FixedArray|Undefined;
-}
-
-namespace error {
-
-transitioning javascript builtin AggregateErrorConstructor(
- js-implicit context: NativeContext, target: JSFunction,
- newTarget: JSAny)(...arguments): JSAny {
- // This function is implementing the spec as suggested by
- // https://github.com/tc39/proposal-promise-any/pull/59 . FIXME(marja):
- // change this if the PR is declined.
-
- // 1. If NewTarget is undefined, let newTarget be the active function
- // object, else let newTarget be NewTarget.
- // 2. Let O be ? OrdinaryCreateFromConstructor(newTarget,
- // "%AggregateError.prototype%", « [[ErrorData]], [[AggregateErrors]] »).
- // 3. If _message_ is not _undefined_, then
- // a. Let msg be ? ToString(_message_).
- // b. Let msgDesc be the PropertyDescriptor { [[Value]]: _msg_,
- // [[Writable]]: *true*, [[Enumerable]]: *false*, [[Configurable]]: *true*
- // c. Perform ! DefinePropertyOrThrow(_O_, *"message"*, _msgDesc_).
- const message: JSAny = arguments[1];
- const obj: JSAggregateError =
- ConstructAggregateErrorHelper(context, target, newTarget, message);
-
- // 4. Let errorsList be ? IterableToList(errors).
- const errors: JSAny = arguments[0];
- const errorsArray =
- iterator::IterableToFixedArrayWithSymbolLookupSlow(errors);
- // errorsArray must be marked copy-on-write, since the "errors" getter
- // creates a thin JSArray wrapper around it.
- MakeFixedArrayCOW(errorsArray);
-
- // 5. Set O.[[AggregateErrors]] to errorsList.
- obj.errors = errorsArray;
-
- // 6. Return O.
- return obj;
-}
-
-transitioning javascript builtin AggregateErrorPrototypeErrorsGetter(
- js-implicit context: NativeContext, receiver: JSAny)(): JSAny {
- // 1. Let E be the this value.
- // 2. If Type(E) is not Object, throw a TypeError exception.
- // 3. If E does not have an [[ErrorData]] internal slot, throw a TypeError
- // exception.
- // 4. If E does not have an [[AggregateErrors]] internal slot, throw a
- // TypeError exception.
- // 5. Return ! CreateArrayFromList(E.[[AggregateErrors]]).
- typeswitch (receiver) {
- case (receiver: JSAggregateError): {
- return array::CreateJSArrayWithElements(
- UnsafeCast<FixedArray>(receiver.errors));
- }
- case (Object): {
- ThrowTypeError(
- MessageTemplate::kNotGeneric, 'JSAggregateError.prototype.errors.get',
- 'AggregateError');
- }
- }
-}
-
-extern runtime ConstructAggregateErrorHelper(
- Context, JSFunction, JSAny, Object): JSAggregateError;
-
-extern runtime ConstructInternalAggregateErrorHelper(
- Context, Object): JSAggregateError;
-
-extern macro MakeFixedArrayCOW(FixedArray);
-}
diff --git a/chromium/v8/src/objects/js-array.h b/chromium/v8/src/objects/js-array.h
index 86d14f1924b..6cf54df896f 100644
--- a/chromium/v8/src/objects/js-array.h
+++ b/chromium/v8/src/objects/js-array.h
@@ -30,6 +30,7 @@ class JSArray : public JSObject {
// is set to a smi. This matches the set function on FixedArray.
inline void set_length(Smi length);
+ static bool MayHaveReadOnlyLength(Map js_array_map);
static bool HasReadOnlyLength(Handle<JSArray> array);
static bool WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index);
diff --git a/chromium/v8/src/objects/js-array.tq b/chromium/v8/src/objects/js-array.tq
index 0cba7203a5c..dcffc68cba0 100644
--- a/chromium/v8/src/objects/js-array.tq
+++ b/chromium/v8/src/objects/js-array.tq
@@ -2,10 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+extern enum IterationKind extends uint31
+constexpr 'IterationKind' { kKeys, kValues, kEntries }
+
extern class JSArrayIterator extends JSObject {
iterated_object: JSReceiver;
next_index: Number;
- kind: Smi;
+ kind: SmiTagged<IterationKind>;
+}
+
+// Perform CreateArrayIterator (ES #sec-createarrayiterator).
+@export
+macro CreateArrayIterator(implicit context: NativeContext)(
+ array: JSReceiver, kind: constexpr IterationKind): JSArrayIterator {
+ return new JSArrayIterator{
+ map: UnsafeCast<Map>(
+ context[NativeContextSlot::INITIAL_ARRAY_ITERATOR_MAP_INDEX]),
+ properties_or_hash: kEmptyFixedArray,
+ elements: kEmptyFixedArray,
+ iterated_object: array,
+ next_index: 0,
+ kind: SmiTag<IterationKind>(kind)
+ };
}
extern class JSArray extends JSObject {
diff --git a/chromium/v8/src/objects/js-collator.cc b/chromium/v8/src/objects/js-collator.cc
index ea9120cbed9..4cc7ce3f217 100644
--- a/chromium/v8/src/objects/js-collator.cc
+++ b/chromium/v8/src/objects/js-collator.cc
@@ -508,7 +508,7 @@ class CollatorAvailableLocales {
set_ = Intl::BuildLocaleSet(locales, U_ICUDATA_COLL, nullptr);
#undef U_ICUDATA_COLL
}
- virtual ~CollatorAvailableLocales() {}
+ virtual ~CollatorAvailableLocales() = default;
const std::set<std::string>& Get() const { return set_; }
private:
diff --git a/chromium/v8/src/objects/js-collection.h b/chromium/v8/src/objects/js-collection.h
index 17f9c3e198b..a0350726c02 100644
--- a/chromium/v8/src/objects/js-collection.h
+++ b/chromium/v8/src/objects/js-collection.h
@@ -30,6 +30,7 @@ class JSSet : public TorqueGeneratedJSSet<JSSet, JSCollection> {
public:
static void Initialize(Handle<JSSet> set, Isolate* isolate);
static void Clear(Isolate* isolate, Handle<JSSet> set);
+ void Rehash(Isolate* isolate);
// Dispatched behavior.
DECL_PRINTER(JSSet)
@@ -56,6 +57,7 @@ class JSMap : public TorqueGeneratedJSMap<JSMap, JSCollection> {
public:
static void Initialize(Handle<JSMap> map, Isolate* isolate);
static void Clear(Isolate* isolate, Handle<JSMap> map);
+ void Rehash(Isolate* isolate);
// Dispatched behavior.
DECL_PRINTER(JSMap)
diff --git a/chromium/v8/src/objects/js-date-time-format.cc b/chromium/v8/src/objects/js-date-time-format.cc
index 669dfd88ab2..7a2c9036ad2 100644
--- a/chromium/v8/src/objects/js-date-time-format.cc
+++ b/chromium/v8/src/objects/js-date-time-format.cc
@@ -59,6 +59,21 @@ JSDateTimeFormat::HourCycle ToHourCycle(const std::string& hc) {
return JSDateTimeFormat::HourCycle::kUndefined;
}
+JSDateTimeFormat::HourCycle ToHourCycle(UDateFormatHourCycle hc) {
+ switch (hc) {
+ case UDAT_HOUR_CYCLE_11:
+ return JSDateTimeFormat::HourCycle::kH11;
+ case UDAT_HOUR_CYCLE_12:
+ return JSDateTimeFormat::HourCycle::kH12;
+ case UDAT_HOUR_CYCLE_23:
+ return JSDateTimeFormat::HourCycle::kH23;
+ case UDAT_HOUR_CYCLE_24:
+ return JSDateTimeFormat::HourCycle::kH24;
+ default:
+ return JSDateTimeFormat::HourCycle::kUndefined;
+ }
+}
+
Maybe<JSDateTimeFormat::HourCycle> GetHourCycle(Isolate* isolate,
Handle<JSReceiver> options,
const char* method) {
@@ -161,7 +176,7 @@ static std::vector<PatternItem> BuildPatternItems() {
class PatternItems {
public:
PatternItems() : data(BuildPatternItems()) {}
- virtual ~PatternItems() {}
+ virtual ~PatternItems() = default;
const std::vector<PatternItem>& Get() const { return data; }
private:
@@ -225,7 +240,7 @@ const std::vector<PatternData> CreateData(const char* digit2,
class Pattern {
public:
Pattern(const char* d1, const char* d2) : data(CreateData(d1, d2)) {}
- virtual ~Pattern() {}
+ virtual ~Pattern() = default;
virtual const std::vector<PatternData>& Get() const { return data; }
private:
@@ -892,21 +907,58 @@ MaybeHandle<JSObject> JSDateTimeFormat::ToDateTimeOptions(
needs_default &= maybe_needs_default.FromJust();
}
- // 6. If needDefaults is true and defaults is either "date" or "all", then
+ // 6. Let dateStyle be ? Get(options, "dateStyle").
+ Maybe<bool> maybe_datestyle_undefined =
+ IsPropertyUndefined(isolate, options, factory->dateStyle_string());
+ MAYBE_RETURN(maybe_datestyle_undefined, Handle<JSObject>());
+ // 7. Let timeStyle be ? Get(options, "timeStyle").
+ Maybe<bool> maybe_timestyle_undefined =
+ IsPropertyUndefined(isolate, options, factory->timeStyle_string());
+ MAYBE_RETURN(maybe_timestyle_undefined, Handle<JSObject>());
+ // 8. If dateStyle is not undefined or timeStyle is not undefined, let
+ // needDefaults be false.
+ if (!maybe_datestyle_undefined.FromJust() ||
+ !maybe_timestyle_undefined.FromJust()) {
+ needs_default = false;
+ }
+ // 9. If required is "date" and timeStyle is not undefined,
+ if (required == RequiredOption::kDate &&
+ !maybe_timestyle_undefined.FromJust()) {
+ // a. Throw a TypeError exception.
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalid,
+ factory->NewStringFromStaticChars("option"),
+ factory->NewStringFromStaticChars("timeStyle")),
+ JSObject);
+ }
+ // 10. If required is "time" and dateStyle is not undefined,
+ if (required == RequiredOption::kTime &&
+ !maybe_datestyle_undefined.FromJust()) {
+ // a. Throw a TypeError exception.
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalid,
+ factory->NewStringFromStaticChars("option"),
+ factory->NewStringFromStaticChars("dateStyle")),
+ JSObject);
+ }
+
+ // 11. If needDefaults is true and defaults is either "date" or "all", then
if (needs_default) {
if (defaults == DefaultsOption::kAll || defaults == DefaultsOption::kDate) {
// a. For each of the property names "year", "month", "day", do)
const std::vector<std::string> list({"year", "month", "day"});
MAYBE_RETURN(CreateDefault(isolate, options, list), Handle<JSObject>());
}
- // 7. If needDefaults is true and defaults is either "time" or "all", then
+ // 12. If needDefaults is true and defaults is either "time" or "all", then
if (defaults == DefaultsOption::kAll || defaults == DefaultsOption::kTime) {
// a. For each of the property names "hour", "minute", "second", do
const std::vector<std::string> list({"hour", "minute", "second"});
MAYBE_RETURN(CreateDefault(isolate, options, list), Handle<JSObject>());
}
}
- // 8. Return options.
+ // 13. Return options.
return options;
}
@@ -1275,7 +1327,7 @@ icu::UnicodeString ReplaceSkeleton(const icu::UnicodeString input,
std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern(
JSDateTimeFormat::DateTimeStyle date_style,
- JSDateTimeFormat::DateTimeStyle time_style, const icu::Locale& icu_locale,
+ JSDateTimeFormat::DateTimeStyle time_style, icu::Locale& icu_locale,
JSDateTimeFormat::HourCycle hc, icu::DateTimePatternGenerator* generator) {
std::unique_ptr<icu::SimpleDateFormat> result;
if (date_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
@@ -1290,7 +1342,9 @@ std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern(
icu_locale)));
// For instance without time, we do not need to worry about the hour cycle
// impact so we can return directly.
- return result;
+ if (result.get() != nullptr) {
+ return result;
+ }
}
} else {
if (time_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
@@ -1305,28 +1359,27 @@ std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern(
UErrorCode status = U_ZERO_ERROR;
// Somehow we fail to create the instance.
if (result.get() == nullptr) {
- icu::Locale modified_locale(icu_locale);
// Fallback to the locale without "nu".
if (!icu_locale.getUnicodeKeywordValue<std::string>("nu", status).empty()) {
status = U_ZERO_ERROR;
- modified_locale.setUnicodeKeywordValue("nu", nullptr, status);
- return DateTimeStylePattern(date_style, time_style, modified_locale, hc,
+ icu_locale.setUnicodeKeywordValue("nu", nullptr, status);
+ return DateTimeStylePattern(date_style, time_style, icu_locale, hc,
generator);
}
status = U_ZERO_ERROR;
// Fallback to the locale without "hc".
if (!icu_locale.getUnicodeKeywordValue<std::string>("hc", status).empty()) {
status = U_ZERO_ERROR;
- modified_locale.setUnicodeKeywordValue("hc", nullptr, status);
- return DateTimeStylePattern(date_style, time_style, modified_locale, hc,
+ icu_locale.setUnicodeKeywordValue("hc", nullptr, status);
+ return DateTimeStylePattern(date_style, time_style, icu_locale, hc,
generator);
}
status = U_ZERO_ERROR;
// Fallback to the locale without "ca".
if (!icu_locale.getUnicodeKeywordValue<std::string>("ca", status).empty()) {
status = U_ZERO_ERROR;
- modified_locale.setUnicodeKeywordValue("ca", nullptr, status);
- return DateTimeStylePattern(date_style, time_style, modified_locale, hc,
+ icu_locale.setUnicodeKeywordValue("ca", nullptr, status);
+ return DateTimeStylePattern(date_style, time_style, icu_locale, hc,
generator);
}
return nullptr;
@@ -1508,34 +1561,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
CHECK(U_SUCCESS(status));
}
- // 17. Let timeZone be ? Get(options, "timeZone").
- std::unique_ptr<char[]> timezone = nullptr;
- Maybe<bool> maybe_timezone = Intl::GetStringOption(
- isolate, options, "timeZone", empty_values, service, &timezone);
- MAYBE_RETURN(maybe_timezone, Handle<JSDateTimeFormat>());
-
- std::unique_ptr<icu::TimeZone> tz = CreateTimeZone(timezone.get());
- if (tz.get() == nullptr) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kInvalidTimeZone,
- factory->NewStringFromAsciiChecked(timezone.get())),
- JSDateTimeFormat);
- }
-
- std::unique_ptr<icu::Calendar> calendar(
- CreateCalendar(isolate, icu_locale, tz.release()));
-
- // 18.b If the result of IsValidTimeZoneName(timeZone) is false, then
- // i. Throw a RangeError exception.
- if (calendar.get() == nullptr) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kInvalidTimeZone,
- factory->NewStringFromAsciiChecked(timezone.get())),
- JSDateTimeFormat);
- }
-
static base::LazyInstance<DateTimePatternGeneratorCache>::type
generator_cache = LAZY_INSTANCE_INITIALIZER;
@@ -1543,9 +1568,8 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
generator_cache.Pointer()->CreateGenerator(icu_locale));
// 15.Let hcDefault be dataLocaleData.[[hourCycle]].
- icu::UnicodeString hour_pattern = generator->getBestPattern("jjmm", status);
+ HourCycle hc_default = ToHourCycle(generator->getDefaultHourCycle(status));
CHECK(U_SUCCESS(status));
- HourCycle hc_default = HourCycleFromPattern(hour_pattern);
// 16.Let hc be r.[[hc]].
HourCycle hc = HourCycle::kUndefined;
@@ -1590,11 +1614,87 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
}
}
+ // 17. Let timeZone be ? Get(options, "timeZone").
+ std::unique_ptr<char[]> timezone = nullptr;
+ Maybe<bool> maybe_timezone = Intl::GetStringOption(
+ isolate, options, "timeZone", empty_values, service, &timezone);
+ MAYBE_RETURN(maybe_timezone, Handle<JSDateTimeFormat>());
+
+ std::unique_ptr<icu::TimeZone> tz = CreateTimeZone(timezone.get());
+ if (tz.get() == nullptr) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidTimeZone,
+ factory->NewStringFromAsciiChecked(timezone.get())),
+ JSDateTimeFormat);
+ }
+
+ std::unique_ptr<icu::Calendar> calendar(
+ CreateCalendar(isolate, icu_locale, tz.release()));
+
+ // 18.b If the result of IsValidTimeZoneName(timeZone) is false, then
+ // i. Throw a RangeError exception.
+ if (calendar.get() == nullptr) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidTimeZone,
+ factory->NewStringFromAsciiChecked(timezone.get())),
+ JSDateTimeFormat);
+ }
+
DateTimeStyle date_style = DateTimeStyle::kUndefined;
DateTimeStyle time_style = DateTimeStyle::kUndefined;
std::unique_ptr<icu::SimpleDateFormat> icu_date_format;
- // 28. Let dateStyle be ? GetOption(options, "dateStyle", "string", «
+ // 28. For each row of Table 1, except the header row, do
+ bool has_hour_option = false;
+ std::string skeleton;
+ for (const PatternData& item : GetPatternData(hc)) {
+ std::unique_ptr<char[]> input;
+ // i. Let prop be the name given in the Property column of the row.
+ // ii. Let value be ? GetOption(options, prop, "string", « the strings
+ // given in the Values column of the row », undefined).
+ Maybe<bool> maybe_get_option =
+ Intl::GetStringOption(isolate, options, item.property.c_str(),
+ item.allowed_values, service, &input);
+ MAYBE_RETURN(maybe_get_option, Handle<JSDateTimeFormat>());
+ if (maybe_get_option.FromJust()) {
+ if (item.property == "hour") {
+ has_hour_option = true;
+ }
+ DCHECK_NOT_NULL(input.get());
+ // iii. Set opt.[[<prop>]] to value.
+ skeleton += item.map.find(input.get())->second;
+ }
+ }
+ if (FLAG_harmony_intl_dateformat_fractional_second_digits) {
+ Maybe<int> maybe_fsd = Intl::GetNumberOption(
+ isolate, options, factory->fractionalSecondDigits_string(), 0, 3, 0);
+ MAYBE_RETURN(maybe_fsd, MaybeHandle<JSDateTimeFormat>());
+ // Convert fractionalSecondDigits to skeleton.
+ int fsd = maybe_fsd.FromJust();
+ for (int i = 0; i < fsd; i++) {
+ skeleton += "S";
+ }
+ }
+
+ // 29. Let matcher be ? GetOption(options, "formatMatcher", "string", «
+ // "basic", "best fit" », "best fit").
+ enum FormatMatcherOption { kBestFit, kBasic };
+ // We implement only best fit algorithm, but still need to check
+ // if the formatMatcher values are in range.
+ // c. Let matcher be ? GetOption(options, "formatMatcher", "string",
+ // « "basic", "best fit" », "best fit").
+ Maybe<FormatMatcherOption> maybe_format_matcher =
+ Intl::GetStringOption<FormatMatcherOption>(
+ isolate, options, "formatMatcher", service, {"best fit", "basic"},
+ {FormatMatcherOption::kBestFit, FormatMatcherOption::kBasic},
+ FormatMatcherOption::kBestFit);
+ MAYBE_RETURN(maybe_format_matcher, MaybeHandle<JSDateTimeFormat>());
+ // TODO(ftang): uncomment the following line and handle format_matcher.
+ // FormatMatcherOption format_matcher = maybe_format_matcher.FromJust();
+
+ // 32. Let dateStyle be ? GetOption(options, "dateStyle", "string", «
// "full", "long", "medium", "short" », undefined).
Maybe<DateTimeStyle> maybe_date_style = Intl::GetStringOption<DateTimeStyle>(
isolate, options, "dateStyle", service,
@@ -1603,11 +1703,10 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
DateTimeStyle::kShort},
DateTimeStyle::kUndefined);
MAYBE_RETURN(maybe_date_style, MaybeHandle<JSDateTimeFormat>());
- // 29. If dateStyle is not undefined, set dateTimeFormat.[[DateStyle]] to
- // dateStyle.
+ // 33. Set dateTimeFormat.[[DateStyle]] to dateStyle.
date_style = maybe_date_style.FromJust();
- // 30. Let timeStyle be ? GetOption(options, "timeStyle", "string", «
+ // 34. Let timeStyle be ? GetOption(options, "timeStyle", "string", «
// "full", "long", "medium", "short" »).
Maybe<DateTimeStyle> maybe_time_style = Intl::GetStringOption<DateTimeStyle>(
isolate, options, "timeStyle", service,
@@ -1617,88 +1716,68 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
DateTimeStyle::kUndefined);
MAYBE_RETURN(maybe_time_style, MaybeHandle<JSDateTimeFormat>());
- // 31. If timeStyle is not undefined, set dateTimeFormat.[[TimeStyle]] to
- // timeStyle.
+ // 35. Set dateTimeFormat.[[TimeStyle]] to timeStyle.
time_style = maybe_time_style.FromJust();
- // 32. If dateStyle or timeStyle are not undefined, then
+ // 36. If timeStyle is not undefined, then
+ HourCycle dateTimeFormatHourCycle = HourCycle::kUndefined;
+ if (time_style != DateTimeStyle::kUndefined) {
+ // a. Set dateTimeFormat.[[HourCycle]] to hc.
+ dateTimeFormatHourCycle = hc;
+ }
+
+ // 37. If dateStyle or timeStyle are not undefined, then
if (date_style != DateTimeStyle::kUndefined ||
time_style != DateTimeStyle::kUndefined) {
- // Track newer feature dateStyle/timeStyle option.
+ // a. For each row in Table 1, except the header row, do
+ // i. Let prop be the name given in the Property column of the row.
+ // ii. Let p be opt.[[<prop>]].
+ // iii. If p is not undefined, then
+ // 1. Throw a TypeError exception.
+ if (skeleton.length() > 0) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kInvalid,
+ factory->NewStringFromStaticChars("option"),
+ date_style != DateTimeStyle::kUndefined
+ ? factory->dateStyle_string()
+ : factory->timeStyle_string()),
+ JSDateTimeFormat);
+ }
+ // b. Let pattern be DateTimeStylePattern(dateStyle, timeStyle,
+ // dataLocaleData, hc).
isolate->CountUsage(
v8::Isolate::UseCounterFeature::kDateTimeFormatDateTimeStyle);
- icu_date_format = DateTimeStylePattern(date_style, time_style, icu_locale,
- hc, generator.get());
- }
-
- // 33. Else,
- if (icu_date_format.get() == nullptr) {
- bool has_hour_option = false;
- // b. For each row of Table 5, except the header row, do
- std::string skeleton;
- for (const PatternData& item : GetPatternData(hc)) {
- std::unique_ptr<char[]> input;
- // i. Let prop be the name given in the Property column of the row.
- // ii. Let value be ? GetOption(options, prop, "string", « the strings
- // given in the Values column of the row », undefined).
- Maybe<bool> maybe_get_option =
- Intl::GetStringOption(isolate, options, item.property.c_str(),
- item.allowed_values, service, &input);
- MAYBE_RETURN(maybe_get_option, Handle<JSDateTimeFormat>());
- if (maybe_get_option.FromJust()) {
- if (item.property == "hour") {
- has_hour_option = true;
- }
- DCHECK_NOT_NULL(input.get());
- // iii. Set opt.[[<prop>]] to value.
- skeleton += item.map.find(input.get())->second;
- }
+ icu_date_format =
+ DateTimeStylePattern(date_style, time_style, icu_locale,
+ dateTimeFormatHourCycle, generator.get());
+ if (icu_date_format.get() == nullptr) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSDateTimeFormat);
}
- if (FLAG_harmony_intl_dateformat_fractional_second_digits) {
- Maybe<int> maybe_fsd = Intl::GetNumberOption(
- isolate, options, factory->fractionalSecondDigits_string(), 0, 3, 0);
- MAYBE_RETURN(maybe_fsd, MaybeHandle<JSDateTimeFormat>());
- // Convert fractionalSecondDigits to skeleton.
- int fsd = maybe_fsd.FromJust();
- for (int i = 0; i < fsd; i++) {
- skeleton += "S";
- }
+ } else {
+ // e. If dateTimeFormat.[[Hour]] is not undefined, then
+ if (has_hour_option) {
+ // v. Set dateTimeFormat.[[HourCycle]] to hc.
+ dateTimeFormatHourCycle = hc;
+ } else {
+ // f. Else,
+ // Set dateTimeFormat.[[HourCycle]] to undefined.
+ dateTimeFormatHourCycle = HourCycle::kUndefined;
}
-
- enum FormatMatcherOption { kBestFit, kBasic };
- // We implement only best fit algorithm, but still need to check
- // if the formatMatcher values are in range.
- // c. Let matcher be ? GetOption(options, "formatMatcher", "string",
- // « "basic", "best fit" », "best fit").
- Maybe<FormatMatcherOption> maybe_format_matcher =
- Intl::GetStringOption<FormatMatcherOption>(
- isolate, options, "formatMatcher", service, {"best fit", "basic"},
- {FormatMatcherOption::kBestFit, FormatMatcherOption::kBasic},
- FormatMatcherOption::kBestFit);
- MAYBE_RETURN(maybe_format_matcher, MaybeHandle<JSDateTimeFormat>());
- // TODO(ftang): uncomment the following line and handle format_matcher.
- // FormatMatcherOption format_matcher = maybe_format_matcher.FromJust();
-
icu::UnicodeString skeleton_ustr(skeleton.c_str());
- icu_date_format = CreateICUDateFormatFromCache(icu_locale, skeleton_ustr,
- generator.get(), hc);
+ icu_date_format = CreateICUDateFormatFromCache(
+ icu_locale, skeleton_ustr, generator.get(), dateTimeFormatHourCycle);
if (icu_date_format.get() == nullptr) {
// Remove extensions and try again.
icu_locale = icu::Locale(icu_locale.getBaseName());
- icu_date_format = CreateICUDateFormatFromCache(icu_locale, skeleton_ustr,
- generator.get(), hc);
+ icu_date_format = CreateICUDateFormatFromCache(
+ icu_locale, skeleton_ustr, generator.get(), dateTimeFormatHourCycle);
if (icu_date_format.get() == nullptr) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
JSDateTimeFormat);
}
}
-
- // g. If dateTimeFormat.[[Hour]] is not undefined, then
- if (!has_hour_option) {
- // h. Else, i. Set dateTimeFormat.[[HourCycle]] to undefined.
- hc = HourCycle::kUndefined;
- }
}
// The creation of Calendar depends on timeZone so we have to put 13 after 17.
@@ -1723,7 +1802,8 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
maybe_hour_cycle.FromJust() != HourCycle::kUndefined) {
auto hc_extension_it = r.extensions.find("hc");
if (hc_extension_it != r.extensions.end()) {
- if (hc != ToHourCycle(hc_extension_it->second.c_str())) {
+ if (dateTimeFormatHourCycle !=
+ ToHourCycle(hc_extension_it->second.c_str())) {
// Remove -hc- if it does not agree with what we used.
UErrorCode status = U_ZERO_ERROR;
resolved_locale.setUnicodeKeywordValue("hc", nullptr, status);
@@ -1757,12 +1837,8 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
}
if (time_style != DateTimeStyle::kUndefined) {
date_time_format->set_time_style(time_style);
- date_time_format->set_hour_cycle(hc);
- }
- if ((date_style == DateTimeStyle::kUndefined) &&
- (time_style == DateTimeStyle::kUndefined)) {
- date_time_format->set_hour_cycle(hc);
}
+ date_time_format->set_hour_cycle(dateTimeFormatHourCycle);
date_time_format->set_locale(*locale_str);
date_time_format->set_icu_locale(*managed_locale);
date_time_format->set_icu_simple_date_format(*managed_format);
diff --git a/chromium/v8/src/objects/js-number-format.cc b/chromium/v8/src/objects/js-number-format.cc
index c5b3d06fcaf..0abcd0cbcfc 100644
--- a/chromium/v8/src/objects/js-number-format.cc
+++ b/chromium/v8/src/objects/js-number-format.cc
@@ -207,7 +207,7 @@ std::map<const std::string, icu::MeasureUnit> CreateUnitMap() {
class UnitFactory {
public:
UnitFactory() : map_(CreateUnitMap()) {}
- virtual ~UnitFactory() {}
+ virtual ~UnitFactory() = default;
// ecma402 #sec-issanctionedsimpleunitidentifier
icu::MeasureUnit create(const std::string& unitIdentifier) {
@@ -482,16 +482,16 @@ Handle<String> SignDisplayString(Isolate* isolate,
} // anonymous namespace
// Return the minimum integer digits by counting the number of '0' after
-// "integer-width/+" in the skeleton.
+// "integer-width/*" in the skeleton.
// Ex: Return 15 for skeleton as
-// “currency/TWD .00 rounding-mode-half-up integer-width/+000000000000000”
+// “currency/TWD .00 rounding-mode-half-up integer-width/*000000000000000”
// 1
// 123456789012345
-// Return default value as 1 if there are no "integer-width/+".
+// Return default value as 1 if there are no "integer-width/*".
int32_t JSNumberFormat::MinimumIntegerDigitsFromSkeleton(
const icu::UnicodeString& skeleton) {
- // count the number of 0 after "integer-width/+"
- icu::UnicodeString search("integer-width/+");
+ // count the number of 0 after "integer-width/*"
+ icu::UnicodeString search("integer-width/*");
int32_t index = skeleton.indexOf(search);
if (index < 0) return 1; // return 1 if cannot find it.
index += search.length();
diff --git a/chromium/v8/src/objects/js-objects-inl.h b/chromium/v8/src/objects/js-objects-inl.h
index 300b40d9d74..192d0ed61d1 100644
--- a/chromium/v8/src/objects/js-objects-inl.h
+++ b/chromium/v8/src/objects/js-objects-inl.h
@@ -5,8 +5,6 @@
#ifndef V8_OBJECTS_JS_OBJECTS_INL_H_
#define V8_OBJECTS_JS_OBJECTS_INL_H_
-#include "src/objects/js-objects.h"
-
#include "src/diagnostics/code-tracer.h"
#include "src/heap/heap-write-barrier.h"
#include "src/objects/elements.h"
@@ -16,6 +14,7 @@
#include "src/objects/field-index-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/js-objects.h"
#include "src/objects/keys.h"
#include "src/objects/lookup-inl.h"
#include "src/objects/property-array-inl.h"
@@ -775,9 +774,8 @@ DEF_GETTER(JSObject, GetElementsKind, ElementsKind) {
DCHECK(kind > DICTIONARY_ELEMENTS ||
IsAnyNonextensibleElementsKind(kind));
}
- DCHECK(
- !IsSloppyArgumentsElementsKind(kind) ||
- (elements(isolate).IsFixedArray() && elements(isolate).length() >= 2));
+ DCHECK(!IsSloppyArgumentsElementsKind(kind) ||
+ elements(isolate).IsSloppyArgumentsElements());
}
#endif
return kind;
diff --git a/chromium/v8/src/objects/js-objects.cc b/chromium/v8/src/objects/js-objects.cc
index a77d2dadfc8..77254ff9979 100644
--- a/chromium/v8/src/objects/js-objects.cc
+++ b/chromium/v8/src/objects/js-objects.cc
@@ -27,7 +27,6 @@
#include "src/objects/field-type.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-number.h"
-#include "src/objects/js-aggregate-error.h"
#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/layout-descriptor.h"
@@ -72,6 +71,8 @@
#include "src/strings/string-stream.h"
#include "src/utils/ostreams.h"
#include "src/wasm/wasm-objects.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
+#include "torque-generated/exported-class-definitions-tq.h"
namespace v8 {
namespace internal {
@@ -2080,8 +2081,6 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSObject::kHeaderSize;
case JS_GENERATOR_OBJECT_TYPE:
return JSGeneratorObject::kHeaderSize;
- case JS_AGGREGATE_ERROR_TYPE:
- return JSAggregateError::kHeaderSize;
case JS_ASYNC_FUNCTION_OBJECT_TYPE:
return JSAsyncFunctionObject::kHeaderSize;
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
@@ -4995,9 +4994,10 @@ void JSFunction::EnsureClosureFeedbackCellArray(Handle<JSFunction> function) {
}
// static
-void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
+void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function,
+ IsCompiledScope* is_compiled_scope) {
Isolate* const isolate = function->GetIsolate();
- DCHECK(function->shared().is_compiled());
+ DCHECK(is_compiled_scope->is_compiled());
DCHECK(function->shared().HasFeedbackMetadata());
if (function->has_feedback_vector()) return;
if (function->shared().HasAsmWasmData()) return;
@@ -5008,8 +5008,8 @@ void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
EnsureClosureFeedbackCellArray(function);
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
handle(function->closure_feedback_cell_array(), isolate);
- Handle<HeapObject> feedback_vector =
- FeedbackVector::New(isolate, shared, closure_feedback_cell_array);
+ Handle<HeapObject> feedback_vector = FeedbackVector::New(
+ isolate, shared, closure_feedback_cell_array, is_compiled_scope);
// EnsureClosureFeedbackCellArray should handle the special case where we need
// to allocate a new feedback cell. Please look at comment in that function
// for more details.
@@ -5020,7 +5020,8 @@ void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
}
// static
-void JSFunction::InitializeFeedbackCell(Handle<JSFunction> function) {
+void JSFunction::InitializeFeedbackCell(Handle<JSFunction> function,
+ IsCompiledScope* is_compiled_scope) {
Isolate* const isolate = function->GetIsolate();
if (function->has_feedback_vector()) {
@@ -5038,7 +5039,7 @@ void JSFunction::InitializeFeedbackCell(Handle<JSFunction> function) {
if (FLAG_always_opt) needs_feedback_vector = true;
if (needs_feedback_vector) {
- EnsureFeedbackVector(function);
+ EnsureFeedbackVector(function, is_compiled_scope);
} else {
EnsureClosureFeedbackCellArray(function);
}
@@ -5160,8 +5161,16 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
if (function->has_initial_map()) return;
Isolate* isolate = function->GetIsolate();
- // First create a new map with the size and number of in-object properties
- // suggested by the function.
+ int expected_nof_properties =
+ CalculateExpectedNofProperties(isolate, function);
+
+ // {CalculateExpectedNofProperties} can have had the side effect of creating
+ // the initial map (e.g. it could have triggered an optimized compilation
+ // whose dependency installation reentered {EnsureHasInitialMap}).
+ if (function->has_initial_map()) return;
+
+ // Create a new map with the size and number of in-object properties suggested
+ // by the function.
InstanceType instance_type;
if (IsResumableFunction(function->shared().kind())) {
instance_type = IsAsyncGeneratorFunction(function->shared().kind())
@@ -5173,8 +5182,6 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
int instance_size;
int inobject_properties;
- int expected_nof_properties =
- CalculateExpectedNofProperties(isolate, function);
CalculateInstanceSizeHelper(instance_type, false, 0, expected_nof_properties,
&instance_size, &inobject_properties);
@@ -5202,7 +5209,6 @@ namespace {
bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
switch (instance_type) {
- case JS_AGGREGATE_ERROR_TYPE:
case JS_API_OBJECT_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
@@ -5577,7 +5583,7 @@ int JSFunction::CalculateExpectedNofProperties(Isolate* isolate,
&is_compiled_scope)) {
DCHECK(shared->is_compiled());
int count = shared->expected_nof_properties();
- // Check that the estimate is sane.
+ // Check that the estimate is sensible.
if (expected_nof_properties <= JSObject::kMaxInObjectProperties - count) {
expected_nof_properties += count;
} else {
diff --git a/chromium/v8/src/objects/js-objects.h b/chromium/v8/src/objects/js-objects.h
index 9e9f8e31283..41f371cd0bf 100644
--- a/chromium/v8/src/objects/js-objects.h
+++ b/chromium/v8/src/objects/js-objects.h
@@ -27,6 +27,7 @@ enum InstanceType : uint16_t;
class JSGlobalObject;
class JSGlobalProxy;
class NativeContext;
+class IsCompiledScope;
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
@@ -973,6 +974,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
static const int kNameDescriptorIndex = 1;
// Home object descriptor index when function has a [[HomeObject]] slot.
static const int kMaybeHomeObjectDescriptorIndex = 2;
+ // Fast binding requires length and name accessors.
+ static const int kMinDescriptorsForFastBind = 2;
// [context]: The context for this function.
inline Context context();
@@ -1062,7 +1065,7 @@ class JSFunction : public JSFunctionOrBoundFunction {
inline FeedbackVector feedback_vector() const;
inline bool has_feedback_vector() const;
V8_EXPORT_PRIVATE static void EnsureFeedbackVector(
- Handle<JSFunction> function);
+ Handle<JSFunction> function, IsCompiledScope* compiled_scope);
// Functions related to clousre feedback cell array that holds feedback cells
// used to create closures from this function. We allocate closure feedback
@@ -1076,7 +1079,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
// initialized to the closure feedback cell array that holds the feedback
// cells for create closure calls from this function. In the regular mode,
// this allocates feedback vector.
- static void InitializeFeedbackCell(Handle<JSFunction> function);
+ static void InitializeFeedbackCell(Handle<JSFunction> function,
+ IsCompiledScope* compiled_scope);
// Unconditionally clear the type feedback vector.
void ClearTypeFeedbackInfo();
@@ -1132,6 +1136,7 @@ class JSFunction : public JSFunctionOrBoundFunction {
DECL_CAST(JSFunction)
// Calculate the instance size and in-object properties count.
+ // {CalculateExpectedNofProperties} can trigger compilation.
static V8_WARN_UNUSED_RESULT int CalculateExpectedNofProperties(
Isolate* isolate, Handle<JSFunction> function);
static void CalculateInstanceSizeHelper(InstanceType instance_type,
diff --git a/chromium/v8/src/objects/js-regexp.h b/chromium/v8/src/objects/js-regexp.h
index 3d584b9f1ab..b32ea4d9402 100644
--- a/chromium/v8/src/objects/js-regexp.h
+++ b/chromium/v8/src/objects/js-regexp.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_JS_REGEXP_H_
#include "src/objects/js-array.h"
+#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -36,40 +37,18 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
// ATOM: A simple string to match against using an indexOf operation.
// IRREGEXP: Compiled with Irregexp.
enum Type { NOT_COMPILED, ATOM, IRREGEXP };
- struct FlagShiftBit {
- static constexpr int kGlobal = 0;
- static constexpr int kIgnoreCase = 1;
- static constexpr int kMultiline = 2;
- static constexpr int kSticky = 3;
- static constexpr int kUnicode = 4;
- static constexpr int kDotAll = 5;
- static constexpr int kInvalid = 6;
- };
- enum Flag : uint8_t {
- kNone = 0,
- kGlobal = 1 << FlagShiftBit::kGlobal,
- kIgnoreCase = 1 << FlagShiftBit::kIgnoreCase,
- kMultiline = 1 << FlagShiftBit::kMultiline,
- kSticky = 1 << FlagShiftBit::kSticky,
- kUnicode = 1 << FlagShiftBit::kUnicode,
- kDotAll = 1 << FlagShiftBit::kDotAll,
- // Update FlagCount when adding new flags.
- kInvalid = 1 << FlagShiftBit::kInvalid, // Not included in FlagCount.
- };
- using Flags = base::Flags<Flag>;
-
- static constexpr int kFlagCount = 6;
-
- static constexpr Flag FlagFromChar(char c) {
+ DEFINE_TORQUE_GENERATED_JS_REG_EXP_FLAGS()
+
+ static constexpr base::Optional<Flag> FlagFromChar(char c) {
STATIC_ASSERT(kFlagCount == 6);
// clang-format off
- return c == 'g' ? kGlobal
- : c == 'i' ? kIgnoreCase
- : c == 'm' ? kMultiline
- : c == 'y' ? kSticky
- : c == 'u' ? kUnicode
- : c == 's' ? kDotAll
- : kInvalid;
+ return c == 'g' ? base::Optional<Flag>(kGlobal)
+ : c == 'i' ? base::Optional<Flag>(kIgnoreCase)
+ : c == 'm' ? base::Optional<Flag>(kMultiline)
+ : c == 'y' ? base::Optional<Flag>(kSticky)
+ : c == 'u' ? base::Optional<Flag>(kUnicode)
+ : c == 's' ? base::Optional<Flag>(kDotAll)
+ : base::Optional<Flag>();
// clang-format on
}
diff --git a/chromium/v8/src/objects/js-regexp.tq b/chromium/v8/src/objects/js-regexp.tq
index f2e216f282d..35e77114ba0 100644
--- a/chromium/v8/src/objects/js-regexp.tq
+++ b/chromium/v8/src/objects/js-regexp.tq
@@ -2,11 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+bitfield struct JSRegExpFlags extends uint31 {
+ global: bool: 1 bit;
+ ignore_case: bool: 1 bit;
+ multiline: bool: 1 bit;
+ sticky: bool: 1 bit;
+ unicode: bool: 1 bit;
+ dot_all: bool: 1 bit;
+}
+
@generateCppClass
extern class JSRegExp extends JSObject {
data: FixedArray|Undefined;
source: String|Undefined;
- flags: Smi|Undefined;
+ flags: SmiTagged<JSRegExpFlags>|Undefined;
}
// Note: Although a condition for a FastJSRegExp is having a positive smi
diff --git a/chromium/v8/src/objects/js-relative-time-format.cc b/chromium/v8/src/objects/js-relative-time-format.cc
index 0cb6b117dff..a6fe9a5864c 100644
--- a/chromium/v8/src/objects/js-relative-time-format.cc
+++ b/chromium/v8/src/objects/js-relative-time-format.cc
@@ -18,8 +18,10 @@
#include "src/objects/js-number-format.h"
#include "src/objects/js-relative-time-format-inl.h"
#include "src/objects/objects-inl.h"
+#include "unicode/decimfmt.h"
#include "unicode/numfmt.h"
#include "unicode/reldatefmt.h"
+#include "unicode/unum.h"
namespace v8 {
namespace internal {
@@ -193,6 +195,10 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
}
}
+ icu::DecimalFormat* decimal_format =
+ static_cast<icu::DecimalFormat*>(number_format);
+ decimal_format->setMinimumGroupingDigits(-2);
+
// Change UDISPCTX_CAPITALIZATION_NONE to other values if
// ECMA402 later include option to change capitalization.
// Ref: https://github.com/tc39/proposal-intl-relative-time/issues/11
diff --git a/chromium/v8/src/objects/keys.cc b/chromium/v8/src/objects/keys.cc
index 16008e39d87..66373f3a546 100644
--- a/chromium/v8/src/objects/keys.cc
+++ b/chromium/v8/src/objects/keys.cc
@@ -949,7 +949,7 @@ Maybe<bool> KeyAccumulator::CollectOwnKeys(Handle<JSReceiver> receiver,
if (mode_ == KeyCollectionMode::kIncludePrototypes) {
return Just(false);
}
- // ...whereas [[OwnPropertyKeys]] shall return whitelisted properties.
+ // ...whereas [[OwnPropertyKeys]] shall return allowlisted properties.
DCHECK_EQ(KeyCollectionMode::kOwnOnly, mode_);
Handle<AccessCheckInfo> access_check_info;
{
diff --git a/chromium/v8/src/objects/lookup.cc b/chromium/v8/src/objects/lookup.cc
index d5fbf7c894a..0361b6dfd78 100644
--- a/chromium/v8/src/objects/lookup.cc
+++ b/chromium/v8/src/objects/lookup.cc
@@ -14,6 +14,8 @@
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/struct-inl.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
+#include "torque-generated/exported-class-definitions-tq.h"
namespace v8 {
namespace internal {
@@ -763,13 +765,14 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
receiver->RequireSlowElements(*dictionary);
if (receiver->HasSlowArgumentsElements(isolate_)) {
- FixedArray parameter_map = FixedArray::cast(receiver->elements(isolate_));
- uint32_t length = parameter_map.length() - 2;
+ SloppyArgumentsElements parameter_map =
+ SloppyArgumentsElements::cast(receiver->elements(isolate_));
+ uint32_t length = parameter_map.length();
if (number_.is_found() && number_.as_uint32() < length) {
- parameter_map.set(number_.as_int() + 2,
- ReadOnlyRoots(isolate_).the_hole_value());
+ parameter_map.set_mapped_entries(
+ number_.as_int(), ReadOnlyRoots(isolate_).the_hole_value());
}
- FixedArray::cast(receiver->elements(isolate_)).set(1, *dictionary);
+ parameter_map.set_arguments(*dictionary);
} else {
receiver->set_elements(*dictionary);
}
diff --git a/chromium/v8/src/objects/map-inl.h b/chromium/v8/src/objects/map-inl.h
index d529a8bbc97..04ac97c8dbd 100644
--- a/chromium/v8/src/objects/map-inl.h
+++ b/chromium/v8/src/objects/map-inl.h
@@ -55,7 +55,7 @@ ACCESSORS_CHECKED(Map, prototype_info, Object,
// |bit_field| fields.
// Concurrent access to |has_prototype_slot| and |has_non_instance_prototype|
-// is explicitly whitelisted here. The former is never modified after the map
+// is explicitly allowlisted here. The former is never modified after the map
// is setup but it's being read by concurrent marker when pointer compression
// is enabled. The latter bit can be modified on a live objects.
BIT_FIELD_ACCESSORS(Map, relaxed_bit_field, has_non_instance_prototype,
@@ -227,8 +227,6 @@ FixedArrayBase Map::GetInitialElements() const {
if (has_fast_elements() || has_fast_string_wrapper_elements() ||
has_any_nonextensible_elements()) {
result = GetReadOnlyRoots().empty_fixed_array();
- } else if (has_fast_sloppy_arguments_elements()) {
- result = GetReadOnlyRoots().empty_sloppy_arguments_elements();
} else if (has_typed_array_elements()) {
result = GetReadOnlyRoots().empty_byte_array();
} else if (has_dictionary_elements()) {
diff --git a/chromium/v8/src/objects/map-updater.cc b/chromium/v8/src/objects/map-updater.cc
index 8c9b94014f8..e51bcfc7601 100644
--- a/chromium/v8/src/objects/map-updater.cc
+++ b/chromium/v8/src/objects/map-updater.cc
@@ -713,16 +713,18 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
TransitionsAccessor transitions(isolate_, split_map);
// Invalidate a transition target at |key|.
- Map maybe_transition = transitions.SearchTransition(
- GetKey(split_index), split_details.kind(), split_details.attributes());
- if (!maybe_transition.is_null()) {
- maybe_transition.DeprecateTransitionTree(isolate_);
+ Handle<Map> maybe_transition(
+ transitions.SearchTransition(GetKey(split_index), split_details.kind(),
+ split_details.attributes()),
+ isolate_);
+ if (!maybe_transition->is_null()) {
+ maybe_transition->DeprecateTransitionTree(isolate_);
}
// If |maybe_transition| is not nullptr then the transition array already
// contains entry for given descriptor. This means that the transition
// could be inserted regardless of whether transitions array is full or not.
- if (maybe_transition.is_null() && !transitions.CanHaveMoreTransitions()) {
+ if (maybe_transition->is_null() && !transitions.CanHaveMoreTransitions()) {
return Normalize("Normalize_CantHaveMoreTransitions");
}
diff --git a/chromium/v8/src/objects/map.cc b/chromium/v8/src/objects/map.cc
index bb13ace4bb0..d85d5893c43 100644
--- a/chromium/v8/src/objects/map.cc
+++ b/chromium/v8/src/objects/map.cc
@@ -25,6 +25,8 @@
#include "src/roots/roots.h"
#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
+#include "torque-generated/exported-class-definitions-tq.h"
#include "torque-generated/field-offsets-tq.h"
namespace v8 {
@@ -89,7 +91,7 @@ Map Map::GetInstanceTypeMap(ReadOnlyRoots roots, InstanceType type) {
case TYPE: \
map = roots.name##_map(); \
break;
- TORQUE_INTERNAL_INSTANCE_TYPE_LIST(MAKE_CASE)
+ TORQUE_DEFINED_INSTANCE_TYPE_LIST(MAKE_CASE)
#undef MAKE_CASE
default:
UNREACHABLE();
@@ -268,7 +270,6 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
- case JS_AGGREGATE_ERROR_TYPE:
case JS_ARGUMENTS_OBJECT_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -1437,8 +1438,9 @@ bool Map::MayHaveReadOnlyElementsInPrototypeChain(Isolate* isolate) {
}
if (IsSlowArgumentsElementsKind(elements_kind)) {
- FixedArray parameter_map = FixedArray::cast(current.elements(isolate));
- Object arguments = parameter_map.get(isolate, 1);
+ SloppyArgumentsElements elements =
+ SloppyArgumentsElements::cast(current.elements(isolate));
+ Object arguments = elements.arguments();
if (NumberDictionary::cast(arguments).requires_slow_elements()) {
return true;
}
diff --git a/chromium/v8/src/objects/map.h b/chromium/v8/src/objects/map.h
index 9876d85d3ec..5e1298461d8 100644
--- a/chromium/v8/src/objects/map.h
+++ b/chromium/v8/src/objects/map.h
@@ -252,7 +252,7 @@ class Map : public HeapObject {
// Bit field.
//
DECL_PRIMITIVE_ACCESSORS(bit_field, byte)
- // Atomic accessors, used for whitelisting legitimate concurrent accesses.
+ // Atomic accessors, used for allowlisting legitimate concurrent accesses.
DECL_PRIMITIVE_ACCESSORS(relaxed_bit_field, byte)
// Bit positions for |bit_field|.
@@ -594,6 +594,7 @@ class Map : public HeapObject {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [instance descriptors]: describes the object.
+ DECL_GETTER(synchronized_instance_descriptors, DescriptorArray)
DECL_GETTER(instance_descriptors, DescriptorArray)
V8_EXPORT_PRIVATE void SetInstanceDescriptors(Isolate* isolate,
DescriptorArray descriptors,
@@ -976,7 +977,8 @@ class Map : public HeapObject {
MaybeHandle<Object> new_value);
// Use the high-level instance_descriptors/SetInstanceDescriptors instead.
- DECL_ACCESSORS(synchronized_instance_descriptors, DescriptorArray)
+ inline void set_synchronized_instance_descriptors(
+ DescriptorArray value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 128;
diff --git a/chromium/v8/src/objects/map.tq b/chromium/v8/src/objects/map.tq
index 93c56278955..e7333a38634 100644
--- a/chromium/v8/src/objects/map.tq
+++ b/chromium/v8/src/objects/map.tq
@@ -80,3 +80,16 @@ extern class Map extends HeapObject {
macro LoadMapPrototypeInfo(m: Map): PrototypeInfo labels HasNoPrototypeInfo {
return m.PrototypeInfo() otherwise HasNoPrototypeInfo;
}
+
+// Returns true if the map corresponds to non-special fast or dictionary
+// object.
+@export
+macro IsSimpleObjectMap(map: Map): bool {
+ if (IsSpecialReceiverInstanceType(map.instance_type)) {
+ return false;
+ }
+ const bitField = map.bit_field;
+ return !bitField.has_named_interceptor & !bitField.is_access_check_needed;
+}
+
+extern macro IsSpecialReceiverInstanceType(InstanceType): bool;
diff --git a/chromium/v8/src/objects/maybe-object-inl.h b/chromium/v8/src/objects/maybe-object-inl.h
index 7c236a8ff94..05ef21c3dfc 100644
--- a/chromium/v8/src/objects/maybe-object-inl.h
+++ b/chromium/v8/src/objects/maybe-object-inl.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_MAYBE_OBJECT_INL_H_
#include "src/common/ptr-compr-inl.h"
+#include "src/execution/local-isolate-wrapper.h"
#include "src/objects/maybe-object.h"
#include "src/objects/smi-inl.h"
#include "src/objects/tagged-impl-inl.h"
@@ -88,6 +89,13 @@ HeapObjectReference HeapObjectReference::ClearedValue(
return HeapObjectReference(raw_value);
}
+// static
+HeapObjectReference HeapObjectReference::ClearedValue(
+ LocalIsolateWrapper isolate) {
+ return isolate.is_off_thread() ? ClearedValue(isolate.off_thread())
+ : ClearedValue(isolate.main_thread());
+}
+
template <typename THeapObjectSlot>
void HeapObjectReference::Update(THeapObjectSlot slot, HeapObject value) {
static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
diff --git a/chromium/v8/src/objects/maybe-object.h b/chromium/v8/src/objects/maybe-object.h
index 0bb312692a4..92f68204138 100644
--- a/chromium/v8/src/objects/maybe-object.h
+++ b/chromium/v8/src/objects/maybe-object.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_MAYBE_OBJECT_H_
#define V8_OBJECTS_MAYBE_OBJECT_H_
+#include "src/execution/local-isolate-wrapper.h"
#include "src/objects/tagged-impl.h"
namespace v8 {
@@ -52,6 +53,9 @@ class HeapObjectReference : public MaybeObject {
V8_INLINE static HeapObjectReference ClearedValue(
const OffThreadIsolate* isolate);
+ V8_INLINE static HeapObjectReference ClearedValue(
+ LocalIsolateWrapper isolate);
+
template <typename THeapObjectSlot>
V8_INLINE static void Update(THeapObjectSlot slot, HeapObject value);
};
diff --git a/chromium/v8/src/objects/module-inl.h b/chromium/v8/src/objects/module-inl.h
index ce03c395005..e627aedf18a 100644
--- a/chromium/v8/src/objects/module-inl.h
+++ b/chromium/v8/src/objects/module-inl.h
@@ -36,8 +36,9 @@ ACCESSORS(Module, exception, Object, kExceptionOffset)
SMI_ACCESSORS(Module, status, kStatusOffset)
SMI_ACCESSORS(Module, hash, kHashOffset)
-BOOL_ACCESSORS(SourceTextModule, flags, async, kAsyncBit)
-BOOL_ACCESSORS(SourceTextModule, flags, async_evaluating, kAsyncEvaluatingBit)
+BOOL_ACCESSORS(SourceTextModule, flags, async, AsyncBit::kShift)
+BOOL_ACCESSORS(SourceTextModule, flags, async_evaluating,
+ AsyncEvaluatingBit::kShift)
ACCESSORS(SourceTextModule, async_parent_modules, ArrayList,
kAsyncParentModulesOffset)
ACCESSORS(SourceTextModule, top_level_capability, HeapObject,
diff --git a/chromium/v8/src/objects/name.tq b/chromium/v8/src/objects/name.tq
index 70bb6aea184..4870e2a3f7a 100644
--- a/chromium/v8/src/objects/name.tq
+++ b/chromium/v8/src/objects/name.tq
@@ -5,8 +5,16 @@
@abstract
@generateCppClass
extern class Name extends PrimitiveHeapObject {
- hash_field: uint32;
+ hash_field: NameHash;
}
+
+bitfield struct NameHash extends uint32 {
+ hash_not_commputed: bool: 1 bit;
+ is_not_integer_index_mask: bool: 1 bit;
+ array_index_value: uint32: 24 bit;
+ array_index_length: uint32: 6 bit;
+}
+
// This is the same as Name, but with the information that there are no other
// kinds of names.
type AnyName = PrivateSymbol|PublicSymbol|String;
@@ -29,5 +37,12 @@ extern class Symbol extends Name {
type PublicSymbol extends Symbol;
type PrivateSymbol extends Symbol;
-const kNameEmptyHashField:
- constexpr uint32 generates 'Name::kEmptyHashField';
+const kNameEmptyHashField: NameHash = NameHash{
+ hash_not_commputed: true,
+ is_not_integer_index_mask: true,
+ array_index_value: 0,
+ array_index_length: 0
+};
+
+const kMaxCachedArrayIndexLength: constexpr uint32
+ generates 'Name::kMaxCachedArrayIndexLength';
diff --git a/chromium/v8/src/objects/object-list-macros.h b/chromium/v8/src/objects/object-list-macros.h
index 34b3ae26efc..2b779955dcb 100644
--- a/chromium/v8/src/objects/object-list-macros.h
+++ b/chromium/v8/src/objects/object-list-macros.h
@@ -126,7 +126,6 @@ class ZoneForwardList;
V(HandlerTable) \
V(HeapNumber) \
V(InternalizedString) \
- V(JSAggregateError) \
V(JSArgumentsObject) \
V(JSArray) \
V(JSArrayBuffer) \
@@ -202,7 +201,6 @@ class ZoneForwardList;
V(SharedFunctionInfo) \
V(SimpleNumberDictionary) \
V(SlicedString) \
- V(SloppyArgumentsElements) \
V(SmallOrderedHashMap) \
V(SmallOrderedHashSet) \
V(SmallOrderedNameDictionary) \
@@ -238,7 +236,7 @@ class ZoneForwardList;
V(WeakFixedArray) \
V(WeakArrayList) \
V(WeakCell) \
- TORQUE_INTERNAL_CLASS_LIST(V)
+ TORQUE_DEFINED_CLASS_LIST(V)
#ifdef V8_INTL_SUPPORT
#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
diff --git a/chromium/v8/src/objects/objects-body-descriptors-inl.h b/chromium/v8/src/objects/objects-body-descriptors-inl.h
index 58b4106e882..bb25adaa353 100644
--- a/chromium/v8/src/objects/objects-body-descriptors-inl.h
+++ b/chromium/v8/src/objects/objects-body-descriptors-inl.h
@@ -811,7 +811,7 @@ class WasmArray::BodyDescriptor final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- if (!WasmArray::type(map)->element_type().IsReferenceType()) return;
+ if (!WasmArray::type(map)->element_type().is_reference_type()) return;
IteratePointers(obj, WasmArray::kHeaderSize, object_size, v);
}
@@ -835,7 +835,7 @@ class WasmStruct::BodyDescriptor final : public BodyDescriptorBase {
WasmStruct wasm_struct = WasmStruct::cast(obj);
wasm::StructType* type = WasmStruct::GcSafeType(map);
for (uint32_t i = 0; i < type->field_count(); i++) {
- if (!type->field(i).IsReferenceType()) continue;
+ if (!type->field(i).is_reference_type()) continue;
int offset =
WasmStruct::kHeaderSize + static_cast<int>(type->field_offset(i));
v->VisitPointer(wasm_struct, wasm_struct.RawField(offset));
@@ -981,7 +981,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_PRIMITIVE_WRAPPER_TYPE:
case JS_DATE_TYPE:
- case JS_AGGREGATE_ERROR_TYPE:
case JS_ARRAY_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
diff --git a/chromium/v8/src/objects/objects-definitions.h b/chromium/v8/src/objects/objects-definitions.h
index 8a990cbc631..eaa3d9e9cc2 100644
--- a/chromium/v8/src/objects/objects-definitions.h
+++ b/chromium/v8/src/objects/objects-definitions.h
@@ -151,7 +151,6 @@ namespace internal {
V(_, TUPLE2_TYPE, Tuple2, tuple2) \
V(_, WASM_CAPI_FUNCTION_DATA_TYPE, WasmCapiFunctionData, \
wasm_capi_function_data) \
- V(_, WASM_DEBUG_INFO_TYPE, WasmDebugInfo, wasm_debug_info) \
V(_, WASM_EXCEPTION_TAG_TYPE, WasmExceptionTag, wasm_exception_tag) \
V(_, WASM_EXPORTED_FUNCTION_DATA_TYPE, WasmExportedFunctionData, \
wasm_exported_function_data) \
diff --git a/chromium/v8/src/objects/objects-inl.h b/chromium/v8/src/objects/objects-inl.h
index 9877b67c19d..56465b179ee 100644
--- a/chromium/v8/src/objects/objects-inl.h
+++ b/chromium/v8/src/objects/objects-inl.h
@@ -67,10 +67,6 @@ int PropertyDetails::field_width_in_words() const {
return representation().IsDouble() ? kDoubleSize / kTaggedSize : 1;
}
-DEF_GETTER(HeapObject, IsSloppyArgumentsElements, bool) {
- return IsFixedArrayExact(isolate);
-}
-
DEF_GETTER(HeapObject, IsClassBoilerplate, bool) {
return IsFixedArrayExact(isolate);
}
diff --git a/chromium/v8/src/objects/objects.cc b/chromium/v8/src/objects/objects.cc
index 53693149e14..3ef1067d9f9 100644
--- a/chromium/v8/src/objects/objects.cc
+++ b/chromium/v8/src/objects/objects.cc
@@ -2306,9 +2306,8 @@ bool HeapObject::NeedsRehashing() const {
case TRANSITION_ARRAY_TYPE:
return TransitionArray::cast(*this).number_of_entries() > 1;
case ORDERED_HASH_MAP_TYPE:
- return OrderedHashMap::cast(*this).NumberOfElements() > 0;
case ORDERED_HASH_SET_TYPE:
- return OrderedHashSet::cast(*this).NumberOfElements() > 0;
+ return false; // We'll rehash from the JSMap or JSSet referencing them.
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
@@ -2318,6 +2317,8 @@ bool HeapObject::NeedsRehashing() const {
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
+ case JS_MAP_TYPE:
+ case JS_SET_TYPE:
return true;
default:
return false;
@@ -2327,10 +2328,13 @@ bool HeapObject::NeedsRehashing() const {
bool HeapObject::CanBeRehashed() const {
DCHECK(NeedsRehashing());
switch (map().instance_type()) {
+ case JS_MAP_TYPE:
+ case JS_SET_TYPE:
+ return true;
case ORDERED_HASH_MAP_TYPE:
case ORDERED_HASH_SET_TYPE:
+ UNREACHABLE(); // We'll rehash from the JSMap or JSSet referencing them.
case ORDERED_NAME_DICTIONARY_TYPE:
- // TODO(yangguo): actually support rehashing OrderedHash{Map,Set}.
return false;
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
@@ -2354,7 +2358,8 @@ bool HeapObject::CanBeRehashed() const {
return false;
}
-void HeapObject::RehashBasedOnMap(ReadOnlyRoots roots) {
+void HeapObject::RehashBasedOnMap(LocalIsolateWrapper isolate) {
+ ReadOnlyRoots roots = ReadOnlyRoots(isolate);
switch (map().instance_type()) {
case HASH_TABLE_TYPE:
UNREACHABLE();
@@ -2386,6 +2391,19 @@ void HeapObject::RehashBasedOnMap(ReadOnlyRoots roots) {
case SMALL_ORDERED_HASH_SET_TYPE:
DCHECK_EQ(0, SmallOrderedHashSet::cast(*this).NumberOfElements());
break;
+ case ORDERED_HASH_MAP_TYPE:
+ case ORDERED_HASH_SET_TYPE:
+ UNREACHABLE(); // We'll rehash from the JSMap or JSSet referencing them.
+ case JS_MAP_TYPE: {
+ DCHECK(isolate.is_main_thread());
+ JSMap::cast(*this).Rehash(isolate.main_thread());
+ break;
+ }
+ case JS_SET_TYPE: {
+ DCHECK(isolate.is_main_thread());
+ JSSet::cast(*this).Rehash(isolate.main_thread());
+ break;
+ }
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
DCHECK_EQ(0, SmallOrderedNameDictionary::cast(*this).NumberOfElements());
break;
@@ -5509,13 +5527,21 @@ int SharedFunctionInfo::StartPosition() const {
if (info.HasPositionInfo()) {
return info.StartPosition();
}
- } else if (HasUncompiledData()) {
+ }
+ if (HasUncompiledData()) {
// Works with or without scope.
return uncompiled_data().start_position();
- } else if (IsApiFunction() || HasBuiltinId()) {
+ }
+ if (IsApiFunction() || HasBuiltinId()) {
DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
return 0;
}
+ if (HasWasmExportedFunctionData()) {
+ WasmInstanceObject instance = wasm_exported_function_data().instance();
+ int func_index = wasm_exported_function_data().function_index();
+ auto& function = instance.module()->functions[func_index];
+ return static_cast<int>(function.code.offset());
+ }
return kNoSourcePosition;
}
@@ -5526,13 +5552,21 @@ int SharedFunctionInfo::EndPosition() const {
if (info.HasPositionInfo()) {
return info.EndPosition();
}
- } else if (HasUncompiledData()) {
+ }
+ if (HasUncompiledData()) {
// Works with or without scope.
return uncompiled_data().end_position();
- } else if (IsApiFunction() || HasBuiltinId()) {
+ }
+ if (IsApiFunction() || HasBuiltinId()) {
DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
return 0;
}
+ if (HasWasmExportedFunctionData()) {
+ WasmInstanceObject instance = wasm_exported_function_data().instance();
+ int func_index = wasm_exported_function_data().function_index();
+ auto& function = instance.module()->functions[func_index];
+ return static_cast<int>(function.code.end_offset());
+ }
return kNoSourcePosition;
}
@@ -5717,17 +5751,27 @@ const char* AllocationSite::PretenureDecisionName(PretenureDecision decision) {
return nullptr;
}
+// static
+bool JSArray::MayHaveReadOnlyLength(Map js_array_map) {
+ DCHECK(js_array_map.IsJSArrayMap());
+ if (js_array_map.is_dictionary_map()) return true;
+
+ // Fast path: "length" is the first fast property of arrays with non
+ // dictionary properties. Since it's not configurable, it's guaranteed to be
+ // the first in the descriptor array.
+ InternalIndex first(0);
+ DCHECK(js_array_map.instance_descriptors().GetKey(first) ==
+ js_array_map.GetReadOnlyRoots().length_string());
+ return js_array_map.instance_descriptors().GetDetails(first).IsReadOnly();
+}
+
bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
Map map = array->map();
- // Fast path: "length" is the first fast property of arrays. Since it's not
- // configurable, it's guaranteed to be the first in the descriptor array.
- if (!map.is_dictionary_map()) {
- InternalIndex first(0);
- DCHECK(map.instance_descriptors().GetKey(first) ==
- array->GetReadOnlyRoots().length_string());
- return map.instance_descriptors().GetDetails(first).IsReadOnly();
- }
+ // If map guarantees that there can't be a read-only length, we are done.
+ if (!MayHaveReadOnlyLength(map)) return false;
+
+ // Look at the object.
Isolate* isolate = array->GetIsolate();
LookupIterator it(isolate, array, isolate->factory()->length_string(), array,
LookupIterator::OWN_SKIP_INTERCEPTOR);
@@ -5758,7 +5802,7 @@ void Dictionary<Derived, Shape>::Print(std::ostream& os) {
if (!dictionary.ToKey(roots, i, &k)) continue;
os << "\n ";
if (k.IsString()) {
- String::cast(k).StringPrint(os);
+ String::cast(k).PrintUC16(os);
} else {
os << Brief(k);
}
@@ -5796,10 +5840,8 @@ void Symbol::SymbolShortPrint(std::ostream& os) {
os << "<Symbol:";
if (!description().IsUndefined()) {
os << " ";
- HeapStringAllocator allocator;
- StringStream accumulator(&allocator);
- String::cast(description()).StringShortPrint(&accumulator, false);
- os << accumulator.ToCString().get();
+ String description_as_string = String::cast(description());
+ description_as_string.PrintUC16(os, 0, description_as_string.length());
} else {
os << " (" << PrivateSymbolToName() << ")";
}
@@ -6176,12 +6218,12 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
// static
JSRegExp::Flags JSRegExp::FlagsFromString(Isolate* isolate,
Handle<String> flags, bool* success) {
- STATIC_ASSERT(JSRegExp::FlagFromChar('g') == JSRegExp::kGlobal);
- STATIC_ASSERT(JSRegExp::FlagFromChar('i') == JSRegExp::kIgnoreCase);
- STATIC_ASSERT(JSRegExp::FlagFromChar('m') == JSRegExp::kMultiline);
- STATIC_ASSERT(JSRegExp::FlagFromChar('s') == JSRegExp::kDotAll);
- STATIC_ASSERT(JSRegExp::FlagFromChar('u') == JSRegExp::kUnicode);
- STATIC_ASSERT(JSRegExp::FlagFromChar('y') == JSRegExp::kSticky);
+ STATIC_ASSERT(*JSRegExp::FlagFromChar('g') == JSRegExp::kGlobal);
+ STATIC_ASSERT(*JSRegExp::FlagFromChar('i') == JSRegExp::kIgnoreCase);
+ STATIC_ASSERT(*JSRegExp::FlagFromChar('m') == JSRegExp::kMultiline);
+ STATIC_ASSERT(*JSRegExp::FlagFromChar('s') == JSRegExp::kDotAll);
+ STATIC_ASSERT(*JSRegExp::FlagFromChar('u') == JSRegExp::kUnicode);
+ STATIC_ASSERT(*JSRegExp::FlagFromChar('y') == JSRegExp::kSticky);
int length = flags->length();
if (length == 0) {
@@ -6190,14 +6232,16 @@ JSRegExp::Flags JSRegExp::FlagsFromString(Isolate* isolate,
}
// A longer flags string cannot be valid.
if (length > JSRegExp::kFlagCount) return JSRegExp::Flags(0);
- // Initialize {value} to {kInvalid} to allow 2-in-1 duplicate/invalid check.
- JSRegExp::Flags value = JSRegExp::kInvalid;
+ JSRegExp::Flags value(0);
if (flags->IsSeqOneByteString()) {
DisallowHeapAllocation no_gc;
SeqOneByteString seq_flags = SeqOneByteString::cast(*flags);
for (int i = 0; i < length; i++) {
- JSRegExp::Flag flag = JSRegExp::FlagFromChar(seq_flags.Get(i));
- // Duplicate or invalid flag.
+ base::Optional<JSRegExp::Flag> maybe_flag =
+ JSRegExp::FlagFromChar(seq_flags.Get(i));
+ if (!maybe_flag.has_value()) return JSRegExp::Flags(0);
+ JSRegExp::Flag flag = *maybe_flag;
+ // Duplicate flag.
if (value & flag) return JSRegExp::Flags(0);
value |= flag;
}
@@ -6206,15 +6250,16 @@ JSRegExp::Flags JSRegExp::FlagsFromString(Isolate* isolate,
DisallowHeapAllocation no_gc;
String::FlatContent flags_content = flags->GetFlatContent(no_gc);
for (int i = 0; i < length; i++) {
- JSRegExp::Flag flag = JSRegExp::FlagFromChar(flags_content.Get(i));
- // Duplicate or invalid flag.
+ base::Optional<JSRegExp::Flag> maybe_flag =
+ JSRegExp::FlagFromChar(flags_content.Get(i));
+ if (!maybe_flag.has_value()) return JSRegExp::Flags(0);
+ JSRegExp::Flag flag = *maybe_flag;
+ // Duplicate flag.
if (value & flag) return JSRegExp::Flags(0);
value |= flag;
}
}
*success = true;
- // Drop the initially set {kInvalid} bit.
- value ^= JSRegExp::kInvalid;
return value;
}
@@ -7852,6 +7897,13 @@ void JSSet::Clear(Isolate* isolate, Handle<JSSet> set) {
set->set_table(*table);
}
+void JSSet::Rehash(Isolate* isolate) {
+ Handle<OrderedHashSet> table_handle(OrderedHashSet::cast(table()), isolate);
+ Handle<OrderedHashSet> new_table =
+ OrderedHashSet::Rehash(isolate, table_handle).ToHandleChecked();
+ set_table(*new_table);
+}
+
void JSMap::Initialize(Handle<JSMap> map, Isolate* isolate) {
Handle<OrderedHashMap> table = isolate->factory()->NewOrderedHashMap();
map->set_table(*table);
@@ -7863,6 +7915,13 @@ void JSMap::Clear(Isolate* isolate, Handle<JSMap> map) {
map->set_table(*table);
}
+void JSMap::Rehash(Isolate* isolate) {
+ Handle<OrderedHashMap> table_handle(OrderedHashMap::cast(table()), isolate);
+ Handle<OrderedHashMap> new_table =
+ OrderedHashMap::Rehash(isolate, table_handle).ToHandleChecked();
+ set_table(*new_table);
+}
+
void JSWeakCollection::Initialize(Handle<JSWeakCollection> weak_collection,
Isolate* isolate) {
Handle<EphemeronHashTable> table = EphemeronHashTable::New(isolate, 0);
diff --git a/chromium/v8/src/objects/oddball.h b/chromium/v8/src/objects/oddball.h
index 4f32e157941..5f0c7ce0015 100644
--- a/chromium/v8/src/objects/oddball.h
+++ b/chromium/v8/src/objects/oddball.h
@@ -47,6 +47,7 @@ class Oddball : public TorqueGeneratedOddball<Oddball, PrimitiveHeapObject> {
static const byte kOptimizedOut = 9;
static const byte kStaleRegister = 10;
static const byte kSelfReferenceMarker = 10;
+ static const byte kBasicBlockCountersMarker = 11;
static_assert(kStartOfWeakFieldsOffset == kEndOfWeakFieldsOffset,
"Ensure BodyDescriptor does not need to handle weak fields.");
diff --git a/chromium/v8/src/objects/ordered-hash-table.cc b/chromium/v8/src/objects/ordered-hash-table.cc
index cbf3ba373b9..d3250bd92db 100644
--- a/chromium/v8/src/objects/ordered-hash-table.cc
+++ b/chromium/v8/src/objects/ordered-hash-table.cc
@@ -196,6 +196,13 @@ HeapObject OrderedHashMap::GetEmpty(ReadOnlyRoots ro_roots) {
template <class Derived, int entrysize>
MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
+ Isolate* isolate, Handle<Derived> table) {
+ return OrderedHashTable<Derived, entrysize>::Rehash(isolate, table,
+ table->Capacity());
+}
+
+template <class Derived, int entrysize>
+MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
Isolate* isolate, Handle<Derived> table, int new_capacity) {
DCHECK(!table->IsObsolete());
@@ -250,6 +257,20 @@ MaybeHandle<OrderedHashSet> OrderedHashSet::Rehash(Isolate* isolate,
new_capacity);
}
+MaybeHandle<OrderedHashSet> OrderedHashSet::Rehash(
+ Isolate* isolate, Handle<OrderedHashSet> table) {
+ return OrderedHashTable<
+ OrderedHashSet, OrderedHashSet::kEntrySizeWithoutChain>::Rehash(isolate,
+ table);
+}
+
+MaybeHandle<OrderedHashMap> OrderedHashMap::Rehash(
+ Isolate* isolate, Handle<OrderedHashMap> table) {
+ return OrderedHashTable<
+ OrderedHashMap, OrderedHashMap::kEntrySizeWithoutChain>::Rehash(isolate,
+ table);
+}
+
MaybeHandle<OrderedHashMap> OrderedHashMap::Rehash(Isolate* isolate,
Handle<OrderedHashMap> table,
int new_capacity) {
diff --git a/chromium/v8/src/objects/ordered-hash-table.h b/chromium/v8/src/objects/ordered-hash-table.h
index b587960432c..5f3c45a110a 100644
--- a/chromium/v8/src/objects/ordered-hash-table.h
+++ b/chromium/v8/src/objects/ordered-hash-table.h
@@ -138,6 +138,7 @@ class OrderedHashTable : public FixedArray {
// The extra +1 is for linking the bucket chains together.
static const int kEntrySize = entrysize + 1;
+ static const int kEntrySizeWithoutChain = entrysize;
static const int kChainOffset = entrysize;
static const int kNotFound = -1;
@@ -200,6 +201,8 @@ class OrderedHashTable : public FixedArray {
static MaybeHandle<Derived> Allocate(
Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
+
+ static MaybeHandle<Derived> Rehash(Isolate* isolate, Handle<Derived> table);
static MaybeHandle<Derived> Rehash(Isolate* isolate, Handle<Derived> table,
int new_capacity);
@@ -244,6 +247,8 @@ class V8_EXPORT_PRIVATE OrderedHashSet
static MaybeHandle<OrderedHashSet> Rehash(Isolate* isolate,
Handle<OrderedHashSet> table,
int new_capacity);
+ static MaybeHandle<OrderedHashSet> Rehash(Isolate* isolate,
+ Handle<OrderedHashSet> table);
static MaybeHandle<OrderedHashSet> Allocate(
Isolate* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
@@ -273,6 +278,8 @@ class V8_EXPORT_PRIVATE OrderedHashMap
static MaybeHandle<OrderedHashMap> Rehash(Isolate* isolate,
Handle<OrderedHashMap> table,
int new_capacity);
+ static MaybeHandle<OrderedHashMap> Rehash(Isolate* isolate,
+ Handle<OrderedHashMap> table);
Object ValueAt(int entry);
// This takes and returns raw Address values containing tagged Object
diff --git a/chromium/v8/src/objects/property-descriptor.cc b/chromium/v8/src/objects/property-descriptor.cc
index 9c9a71849f4..a14601bc74b 100644
--- a/chromium/v8/src/objects/property-descriptor.cc
+++ b/chromium/v8/src/objects/property-descriptor.cc
@@ -43,29 +43,29 @@ bool GetPropertyIfPresent(Handle<JSReceiver> receiver, Handle<String> name,
bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
PropertyDescriptor* desc) {
if (!obj->IsJSObject()) return false;
- Map map = Handle<JSObject>::cast(obj)->map();
- if (map.instance_type() != JS_OBJECT_TYPE) return false;
- if (map.is_access_check_needed()) return false;
- if (map.prototype() != *isolate->initial_object_prototype()) return false;
+ Handle<Map> map(Handle<JSObject>::cast(obj)->map(), isolate);
+ if (map->instance_type() != JS_OBJECT_TYPE) return false;
+ if (map->is_access_check_needed()) return false;
+ if (map->prototype() != *isolate->initial_object_prototype()) return false;
// During bootstrapping, the object_function_prototype_map hasn't been
// set up yet.
if (isolate->bootstrapper()->IsActive()) return false;
- if (JSObject::cast(map.prototype()).map() !=
+ if (JSObject::cast(map->prototype()).map() !=
isolate->native_context()->object_function_prototype_map()) {
return false;
}
// TODO(jkummerow): support dictionary properties?
- if (map.is_dictionary_map()) return false;
+ if (map->is_dictionary_map()) return false;
Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(map.instance_descriptors(), isolate);
- for (InternalIndex i : map.IterateOwnDescriptors()) {
+ Handle<DescriptorArray>(map->instance_descriptors(), isolate);
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
Handle<Object> value;
if (details.location() == kField) {
if (details.kind() == kData) {
value = JSObject::FastPropertyAt(Handle<JSObject>::cast(obj),
details.representation(),
- FieldIndex::ForDescriptor(map, i));
+ FieldIndex::ForDescriptor(*map, i));
} else {
DCHECK_EQ(kAccessor, details.kind());
// Bail out to slow path.
diff --git a/chromium/v8/src/objects/prototype-info-inl.h b/chromium/v8/src/objects/prototype-info-inl.h
index 80efa862c28..8c93b21f24b 100644
--- a/chromium/v8/src/objects/prototype-info-inl.h
+++ b/chromium/v8/src/objects/prototype-info-inl.h
@@ -20,9 +20,7 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(PrototypeInfo, Struct)
-
-CAST_ACCESSOR(PrototypeInfo)
+TQ_OBJECT_CONSTRUCTORS_IMPL(PrototypeInfo)
Map PrototypeInfo::ObjectCreateMap() {
return Map::cast(object_create_map()->GetHeapObjectAssumeWeak());
@@ -39,14 +37,8 @@ bool PrototypeInfo::HasObjectCreateMap() {
return cache->IsWeak();
}
-ACCESSORS(PrototypeInfo, module_namespace, Object, kJsModuleNamespaceOffset)
-ACCESSORS(PrototypeInfo, prototype_users, Object, kPrototypeUsersOffset)
-ACCESSORS(PrototypeInfo, prototype_chain_enum_cache, Object,
- kPrototypeChainEnumCacheOffset)
-WEAK_ACCESSORS(PrototypeInfo, object_create_map, kObjectCreateMapOffset)
-SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
-SMI_ACCESSORS(PrototypeInfo, bit_field, kBitFieldOffset)
-BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
+BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map,
+ ShouldBeFastBit::kShift)
void PrototypeUsers::MarkSlotEmpty(WeakArrayList array, int index) {
DCHECK_GT(index, 0);
diff --git a/chromium/v8/src/objects/prototype-info.h b/chromium/v8/src/objects/prototype-info.h
index ab238bd9c2f..4e665a39c8d 100644
--- a/chromium/v8/src/objects/prototype-info.h
+++ b/chromium/v8/src/objects/prototype-info.h
@@ -8,6 +8,7 @@
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
#include "src/objects/struct.h"
+#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,57 +17,29 @@ namespace v8 {
namespace internal {
// Container for metadata stored on each prototype map.
-class PrototypeInfo : public Struct {
+class PrototypeInfo
+ : public TorqueGeneratedPrototypeInfo<PrototypeInfo, Struct> {
public:
static const int UNREGISTERED = -1;
- // [module_namespace]: A backpointer to JSModuleNamespace from its
- // PrototypeInfo (or undefined). This field is only used for JSModuleNamespace
- // maps. TODO(jkummerow): Figure out if there's a way to store the namespace
- // pointer elsewhere to save memory.
- DECL_ACCESSORS(module_namespace, Object)
-
- // [prototype_users]: WeakArrayList containing weak references to maps using
- // this prototype, or Smi(0) if uninitialized.
- DECL_ACCESSORS(prototype_users, Object)
-
- DECL_ACCESSORS(prototype_chain_enum_cache, Object)
-
// [object_create_map]: A field caching the map for Object.create(prototype).
static inline void SetObjectCreateMap(Handle<PrototypeInfo> info,
Handle<Map> map);
inline Map ObjectCreateMap();
inline bool HasObjectCreateMap();
- // [registry_slot]: Slot in prototype's user registry where this user
- // is stored. Returns UNREGISTERED if this prototype has not been registered.
- inline int registry_slot() const;
- inline void set_registry_slot(int slot);
-
- // [bit_field]
- inline int bit_field() const;
- inline void set_bit_field(int bit_field);
-
DECL_BOOLEAN_ACCESSORS(should_be_fast_map)
- DECL_CAST(PrototypeInfo)
-
// Dispatched behavior.
DECL_PRINTER(PrototypeInfo)
DECL_VERIFIER(PrototypeInfo)
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_PROTOTYPE_INFO_FIELDS)
-
// Bit field usage.
- static const int kShouldBeFastBit = 0;
+ DEFINE_TORQUE_GENERATED_PROTOTYPE_INFO_FLAGS()
class BodyDescriptor;
- private:
- DECL_ACCESSORS(object_create_map, MaybeObject)
-
- OBJECT_CONSTRUCTORS(PrototypeInfo, Struct);
+ TQ_OBJECT_CONSTRUCTORS(PrototypeInfo)
};
// A growing array with an additional API for marking slots "empty". When adding
diff --git a/chromium/v8/src/objects/prototype-info.tq b/chromium/v8/src/objects/prototype-info.tq
index 77ffa4358d7..96f65a053ee 100644
--- a/chromium/v8/src/objects/prototype-info.tq
+++ b/chromium/v8/src/objects/prototype-info.tq
@@ -2,20 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+bitfield struct PrototypeInfoFlags extends uint31 {
+ should_be_fast: bool: 1 bit;
+}
+
+@generateCppClass
extern class PrototypeInfo extends Struct {
- js_module_namespace: JSModuleNamespace|Undefined;
+ // [module_namespace]: A backpointer to JSModuleNamespace from its
+ // PrototypeInfo (or undefined). This field is only used for JSModuleNamespace
+ // maps. TODO(jkummerow): Figure out if there's a way to store the namespace
+ // pointer elsewhere to save memory.
+ module_namespace: JSModuleNamespace|Undefined;
+
+ // [prototype_users]: WeakArrayList containing weak references to maps using
+ // this prototype, or Smi(0) if uninitialized.
prototype_users: WeakArrayList|Zero;
- prototype_chain_enum_cache: FixedArray|Object|Undefined;
+
+ prototype_chain_enum_cache: FixedArray|Zero|Undefined;
+
+ // [registry_slot]: Slot in prototype's user registry where this user
+ // is stored. Returns UNREGISTERED if this prototype has not been registered.
registry_slot: Smi;
- validity_cell: Object;
- object_create_map: Weak<Map>|Undefined;
- bit_field: Smi;
-}
-extern macro PrototypeInfoMapConstant(): Map;
-const kPrototypeInfoMap: Map = PrototypeInfoMapConstant();
+ // [object_create_map]: A field caching the map for Object.create(prototype).
+ object_create_map: Weak<Map>|Undefined;
-Cast<PrototypeInfo>(o: HeapObject): PrototypeInfo labels CastError {
- if (o.map != kPrototypeInfoMap) goto CastError;
- return %RawDownCast<PrototypeInfo>(o);
+ bit_field: SmiTagged<PrototypeInfoFlags>;
}
diff --git a/chromium/v8/src/objects/regexp-match-info.tq b/chromium/v8/src/objects/regexp-match-info.tq
index 6940ce45836..33825c7c8a9 100644
--- a/chromium/v8/src/objects/regexp-match-info.tq
+++ b/chromium/v8/src/objects/regexp-match-info.tq
@@ -3,6 +3,7 @@
// found in the LICENSE file.
@hasSameInstanceTypeAsParent
+@doNotGenerateCast
extern class RegExpMatchInfo extends FixedArray {
macro GetStartOfCapture(implicit context: Context)(captureIndex:
constexpr int31): Smi {
diff --git a/chromium/v8/src/objects/scope-info.cc b/chromium/v8/src/objects/scope-info.cc
index 4dcd67905c6..b303e039301 100644
--- a/chromium/v8/src/objects/scope-info.cc
+++ b/chromium/v8/src/objects/scope-info.cc
@@ -215,7 +215,7 @@ Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
scope->private_name_lookup_skips_outer_class()) |
HasContextExtensionSlotBit::encode(scope->HasContextExtensionSlot()) |
IsReplModeScopeBit::encode(scope->is_repl_mode_scope()) |
- HasLocalsBlackListBit::encode(false);
+ HasLocalsBlockListBit::encode(false);
scope_info.SetFlags(flags);
scope_info.SetParameterCount(parameter_count);
@@ -415,7 +415,7 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
ForceContextAllocationBit::encode(false) |
PrivateNameLookupSkipsOuterClassBit::encode(false) |
HasContextExtensionSlotBit::encode(true) |
- IsReplModeScopeBit::encode(false) | HasLocalsBlackListBit::encode(false);
+ IsReplModeScopeBit::encode(false) | HasLocalsBlockListBit::encode(false);
scope_info->SetFlags(flags);
scope_info->SetParameterCount(0);
@@ -495,7 +495,7 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
ForceContextAllocationBit::encode(false) |
PrivateNameLookupSkipsOuterClassBit::encode(false) |
HasContextExtensionSlotBit::encode(is_native_context) |
- IsReplModeScopeBit::encode(false) | HasLocalsBlackListBit::encode(false);
+ IsReplModeScopeBit::encode(false) | HasLocalsBlockListBit::encode(false);
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
scope_info->SetContextLocalCount(context_local_count);
@@ -552,34 +552,34 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
}
// static
-Handle<ScopeInfo> ScopeInfo::RecreateWithBlackList(
- Isolate* isolate, Handle<ScopeInfo> original, Handle<StringSet> blacklist) {
+Handle<ScopeInfo> ScopeInfo::RecreateWithBlockList(
+ Isolate* isolate, Handle<ScopeInfo> original, Handle<StringSet> blocklist) {
DCHECK(!original.is_null());
- if (original->HasLocalsBlackList()) return original;
+ if (original->HasLocalsBlockList()) return original;
Handle<ScopeInfo> scope_info =
isolate->factory()->NewScopeInfo(original->length() + 1);
// Copy the static part first and update the flags to include the
- // blacklist field, so {LocalsBlackListIndex} returns the correct value.
+ // blocklist field, so {LocalsBlockListIndex} returns the correct value.
scope_info->CopyElements(isolate, 0, *original, 0, kVariablePartIndex,
WriteBarrierMode::UPDATE_WRITE_BARRIER);
scope_info->SetFlags(
- HasLocalsBlackListBit::update(scope_info->Flags(), true));
+ HasLocalsBlockListBit::update(scope_info->Flags(), true));
- // Copy the dynamic part including the provided blacklist:
- // 1) copy all the fields up to the blacklist index
- // 2) add the blacklist
+ // Copy the dynamic part including the provided blocklist:
+ // 1) copy all the fields up to the blocklist index
+ // 2) add the blocklist
// 3) copy the remaining fields
scope_info->CopyElements(
isolate, kVariablePartIndex, *original, kVariablePartIndex,
- scope_info->LocalsBlackListIndex() - kVariablePartIndex,
+ scope_info->LocalsBlockListIndex() - kVariablePartIndex,
WriteBarrierMode::UPDATE_WRITE_BARRIER);
- scope_info->set(scope_info->LocalsBlackListIndex(), *blacklist);
+ scope_info->set(scope_info->LocalsBlockListIndex(), *blocklist);
scope_info->CopyElements(
- isolate, scope_info->LocalsBlackListIndex() + 1, *original,
- scope_info->LocalsBlackListIndex(),
- scope_info->length() - scope_info->LocalsBlackListIndex() - 1,
+ isolate, scope_info->LocalsBlockListIndex() + 1, *original,
+ scope_info->LocalsBlockListIndex(),
+ scope_info->length() - scope_info->LocalsBlockListIndex() - 1,
WriteBarrierMode::UPDATE_WRITE_BARRIER);
return scope_info;
}
@@ -735,14 +735,14 @@ bool ScopeInfo::IsReplModeScope() const {
return IsReplModeScopeBit::decode(Flags());
}
-bool ScopeInfo::HasLocalsBlackList() const {
+bool ScopeInfo::HasLocalsBlockList() const {
if (length() == 0) return false;
- return HasLocalsBlackListBit::decode(Flags());
+ return HasLocalsBlockListBit::decode(Flags());
}
-StringSet ScopeInfo::LocalsBlackList() const {
- DCHECK(HasLocalsBlackList());
- return StringSet::cast(get(LocalsBlackListIndex()));
+StringSet ScopeInfo::LocalsBlockList() const {
+ DCHECK(HasLocalsBlockList());
+ return StringSet::cast(get(LocalsBlockListIndex()));
}
bool ScopeInfo::HasContext() const { return ContextLength() > 0; }
@@ -984,12 +984,12 @@ int ScopeInfo::OuterScopeInfoIndex() const {
return PositionInfoIndex() + (HasPositionInfo() ? kPositionInfoEntries : 0);
}
-int ScopeInfo::LocalsBlackListIndex() const {
+int ScopeInfo::LocalsBlockListIndex() const {
return OuterScopeInfoIndex() + (HasOuterScopeInfo() ? 1 : 0);
}
int ScopeInfo::ModuleInfoIndex() const {
- return LocalsBlackListIndex() + (HasLocalsBlackList() ? 1 : 0);
+ return LocalsBlockListIndex() + (HasLocalsBlockList() ? 1 : 0);
}
int ScopeInfo::ModuleVariableCountIndex() const {
diff --git a/chromium/v8/src/objects/scope-info.h b/chromium/v8/src/objects/scope-info.h
index 2e7df22562a..73f039e3f60 100644
--- a/chromium/v8/src/objects/scope-info.h
+++ b/chromium/v8/src/objects/scope-info.h
@@ -200,13 +200,13 @@ class ScopeInfo : public FixedArray {
bool is_script_scope() const;
- // Returns true if this ScopeInfo has a black list attached containing
- // stack allocated local variables.
- V8_EXPORT_PRIVATE bool HasLocalsBlackList() const;
+ // Returns true if this ScopeInfo has a blocklist attached containing stack
+ // allocated local variables.
+ V8_EXPORT_PRIVATE bool HasLocalsBlockList() const;
// Returns a list of stack-allocated locals of parent scopes.
// Used during local debug-evalute to decide whether a context lookup
// can continue upwards after checking this scope.
- V8_EXPORT_PRIVATE StringSet LocalsBlackList() const;
+ V8_EXPORT_PRIVATE StringSet LocalsBlockList() const;
// Returns true if this ScopeInfo was created for a scope that skips the
// closest outer class when resolving private names.
@@ -231,12 +231,12 @@ class ScopeInfo : public FixedArray {
static Handle<ScopeInfo> CreateForNativeContext(Isolate* isolate);
static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
- // Creates a copy of a {ScopeInfo} but with the provided locals blacklist
+ // Creates a copy of a {ScopeInfo} but with the provided locals blocklist
// attached. Does nothing if the original {ScopeInfo} already has a field
- // for a blacklist reserved.
- V8_EXPORT_PRIVATE static Handle<ScopeInfo> RecreateWithBlackList(
+ // for a blocklist reserved.
+ V8_EXPORT_PRIVATE static Handle<ScopeInfo> RecreateWithBlockList(
Isolate* isolate, Handle<ScopeInfo> original,
- Handle<StringSet> blacklist);
+ Handle<StringSet> blocklist);
// Serializes empty scope info.
V8_EXPORT_PRIVATE static ScopeInfo Empty(Isolate* isolate);
@@ -302,7 +302,7 @@ class ScopeInfo : public FixedArray {
// the scope belongs to a function or script.
// 8. OuterScopeInfoIndex:
// The outer scope's ScopeInfo or the hole if there's none.
- // 9. LocalsBlackList: List of stack allocated local variables. Used by
+ // 9. LocalsBlockList: List of stack allocated local variables. Used by
// debug evaluate to properly abort variable lookup when a name clashes
// with a stack allocated local that can't be materialized.
// 10. SourceTextModuleInfo, ModuleVariableCount, and ModuleVariables:
@@ -317,7 +317,7 @@ class ScopeInfo : public FixedArray {
int InferredFunctionNameIndex() const;
int PositionInfoIndex() const;
int OuterScopeInfoIndex() const;
- V8_EXPORT_PRIVATE int LocalsBlackListIndex() const;
+ V8_EXPORT_PRIVATE int LocalsBlockListIndex() const;
int ModuleInfoIndex() const;
int ModuleVariableCountIndex() const;
int ModuleVariablesIndex() const;
@@ -354,7 +354,7 @@ class ScopeInfo : public FixedArray {
friend std::ostream& operator<<(std::ostream& os, VariableAllocationInfo var);
OBJECT_CONSTRUCTORS(ScopeInfo, FixedArray);
- FRIEND_TEST(TestWithNativeContext, RecreateScopeInfoWithLocalsBlacklistWorks);
+ FRIEND_TEST(TestWithNativeContext, RecreateScopeInfoWithLocalsBlocklistWorks);
};
std::ostream& operator<<(std::ostream& os, VariableAllocationInfo var);
diff --git a/chromium/v8/src/objects/scope-info.tq b/chromium/v8/src/objects/scope-info.tq
index cb8ead30abb..746c8711717 100644
--- a/chromium/v8/src/objects/scope-info.tq
+++ b/chromium/v8/src/objects/scope-info.tq
@@ -40,5 +40,5 @@ bitfield struct ScopeFlags extends uint32 {
private_name_lookup_skips_outer_class: bool: 1 bit;
has_context_extension_slot: bool: 1 bit;
is_repl_mode_scope: bool: 1 bit;
- has_locals_black_list: bool: 1 bit;
+ has_locals_block_list: bool: 1 bit;
}
diff --git a/chromium/v8/src/objects/script-inl.h b/chromium/v8/src/objects/script-inl.h
index 1e8b83798cb..ce0bd80a6dc 100644
--- a/chromium/v8/src/objects/script-inl.h
+++ b/chromium/v8/src/objects/script-inl.h
@@ -17,29 +17,16 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(Script, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Script)
NEVER_READ_ONLY_SPACE_IMPL(Script)
-CAST_ACCESSOR(Script)
-
-ACCESSORS(Script, source, Object, kSourceOffset)
-ACCESSORS(Script, name, Object, kNameOffset)
-SMI_ACCESSORS(Script, id, kIdOffset)
-SMI_ACCESSORS(Script, line_offset, kLineOffsetOffset)
-SMI_ACCESSORS(Script, column_offset, kColumnOffsetOffset)
-ACCESSORS(Script, context_data, Object, kContextOffset)
SMI_ACCESSORS(Script, type, kScriptTypeOffset)
-ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
ACCESSORS_CHECKED(Script, eval_from_shared_or_wrapped_arguments, Object,
kEvalFromSharedOrWrappedArgumentsOffset,
this->type() != TYPE_WASM)
SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
this->type() != TYPE_WASM)
-SMI_ACCESSORS(Script, flags, kFlagsOffset)
-ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
-ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
-ACCESSORS(Script, host_defined_options, FixedArray, kHostDefinedOptionsOffset)
ACCESSORS_CHECKED(Script, wasm_breakpoint_infos, FixedArray,
kEvalFromSharedOrWrappedArgumentsOffset,
this->type() == TYPE_WASM)
@@ -100,39 +87,30 @@ wasm::NativeModule* Script::wasm_native_module() const {
}
Script::CompilationType Script::compilation_type() {
- return BooleanBit::get(flags(), kCompilationTypeBit) ? COMPILATION_TYPE_EVAL
- : COMPILATION_TYPE_HOST;
+ return CompilationTypeBit::decode(flags());
}
void Script::set_compilation_type(CompilationType type) {
- set_flags(BooleanBit::set(flags(), kCompilationTypeBit,
- type == COMPILATION_TYPE_EVAL));
+ set_flags(CompilationTypeBit::update(flags(), type));
}
Script::CompilationState Script::compilation_state() {
- return BooleanBit::get(flags(), kCompilationStateBit)
- ? COMPILATION_STATE_COMPILED
- : COMPILATION_STATE_INITIAL;
+ return CompilationStateBit::decode(flags());
}
void Script::set_compilation_state(CompilationState state) {
- set_flags(BooleanBit::set(flags(), kCompilationStateBit,
- state == COMPILATION_STATE_COMPILED));
+ set_flags(CompilationStateBit::update(flags(), state));
}
-bool Script::is_repl_mode() const {
- return BooleanBit::get(flags(), kREPLModeBit);
-}
+bool Script::is_repl_mode() const { return IsReplModeBit::decode(flags()); }
void Script::set_is_repl_mode(bool value) {
- set_flags(BooleanBit::set(flags(), kREPLModeBit, value));
+ set_flags(IsReplModeBit::update(flags(), value));
}
ScriptOriginOptions Script::origin_options() {
- return ScriptOriginOptions((flags() & kOriginOptionsMask) >>
- kOriginOptionsShift);
+ return ScriptOriginOptions(OriginOptionsBits::decode(flags()));
}
void Script::set_origin_options(ScriptOriginOptions origin_options) {
- DCHECK(!(origin_options.Flags() & ~((1 << kOriginOptionsSize) - 1)));
- set_flags((flags() & ~kOriginOptionsMask) |
- (origin_options.Flags() << kOriginOptionsShift));
+ DCHECK(!(origin_options.Flags() & ~((1 << OriginOptionsBits::kSize) - 1)));
+ set_flags(OriginOptionsBits::update(flags(), origin_options.Flags()));
}
bool Script::HasValidSource() {
diff --git a/chromium/v8/src/objects/script.h b/chromium/v8/src/objects/script.h
index d5876de2e0a..9ce44c770fd 100644
--- a/chromium/v8/src/objects/script.h
+++ b/chromium/v8/src/objects/script.h
@@ -11,6 +11,7 @@
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
#include "src/objects/struct.h"
+#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -20,7 +21,7 @@ namespace v8 {
namespace internal {
// Script describes a script which has been added to the VM.
-class Script : public Struct {
+class Script : public TorqueGeneratedScript<Script, Struct> {
public:
// Script ID used for temporary scripts, which shouldn't be added to the
// script list.
@@ -45,31 +46,9 @@ class Script : public Struct {
COMPILATION_STATE_COMPILED = 1
};
- // [source]: the script source.
- DECL_ACCESSORS(source, Object)
-
- // [name]: the script name.
- DECL_ACCESSORS(name, Object)
-
- // [id]: the script id.
- DECL_INT_ACCESSORS(id)
-
- // [line_offset]: script line offset in resource from where it was extracted.
- DECL_INT_ACCESSORS(line_offset)
-
- // [column_offset]: script column offset in resource from where it was
- // extracted.
- DECL_INT_ACCESSORS(column_offset)
-
- // [context_data]: context data for the context this script was compiled in.
- DECL_ACCESSORS(context_data, Object)
-
// [type]: the script type.
DECL_INT_ACCESSORS(type)
- // [line_ends]: FixedArray of line ends positions.
- DECL_ACCESSORS(line_ends, Object)
-
DECL_ACCESSORS(eval_from_shared_or_wrapped_arguments, Object)
// [eval_from_shared]: for eval scripts the shared function info for the
@@ -95,15 +74,6 @@ class Script : public Struct {
// function infos created from this script.
DECL_ACCESSORS(shared_function_infos, WeakFixedArray)
- // [flags]: Holds an exciting bitfield.
- DECL_INT_ACCESSORS(flags)
-
- // [source_url]: sourceURL from magic comment
- DECL_ACCESSORS(source_url, Object)
-
- // [source_mapping_url]: sourceMappingURL magic comment
- DECL_ACCESSORS(source_mapping_url, Object)
-
// [wasm_breakpoint_infos]: the list of {BreakPointInfo} objects describing
// all WebAssembly breakpoints for modules/instances managed via this script.
// This must only be called if the type of this script is TYPE_WASM.
@@ -120,9 +90,6 @@ class Script : public Struct {
// This must only be called if the type of this script is TYPE_WASM.
DECL_ACCESSORS(wasm_weak_instance_list, WeakArrayList)
- // [host_defined_options]: Options defined by the embedder.
- DECL_ACCESSORS(host_defined_options, FixedArray)
-
// [compilation_type]: how the the script was compiled. Encoded in the
// 'flags' field.
inline CompilationType compilation_type();
@@ -144,8 +111,6 @@ class Script : public Struct {
inline v8::ScriptOriginOptions origin_options();
inline void set_origin_options(ScriptOriginOptions origin_options);
- DECL_CAST(Script)
-
// If script source is an external string, check that the underlying
// resource is accessible. Otherwise, always return true.
inline bool HasValidSource();
@@ -218,20 +183,11 @@ class Script : public Struct {
DECL_PRINTER(Script)
DECL_VERIFIER(Script)
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_SCRIPT_FIELDS)
-
private:
// Bit positions in the flags field.
- static const int kCompilationTypeBit = 0;
- static const int kCompilationStateBit = 1;
- static const int kREPLModeBit = 2;
- static const int kOriginOptionsShift = 3;
- static const int kOriginOptionsSize = 4;
- static const int kOriginOptionsMask = ((1 << kOriginOptionsSize) - 1)
- << kOriginOptionsShift;
-
- OBJECT_CONSTRUCTORS(Script, Struct);
+ DEFINE_TORQUE_GENERATED_SCRIPT_FLAGS()
+
+ TQ_OBJECT_CONSTRUCTORS(Script)
};
} // namespace internal
diff --git a/chromium/v8/src/objects/script.tq b/chromium/v8/src/objects/script.tq
index 5e68c870239..cac5ceb3ba4 100644
--- a/chromium/v8/src/objects/script.tq
+++ b/chromium/v8/src/objects/script.tq
@@ -2,20 +2,56 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+type CompilationType extends int32 constexpr 'Script::CompilationType';
+type CompilationState extends int32 constexpr 'Script::CompilationState';
+
+bitfield struct ScriptFlags extends uint31 {
+ compilation_type: CompilationType: 1 bit;
+ compilation_state: CompilationState: 1 bit;
+ is_repl_mode: bool: 1 bit;
+ origin_options: int32: 4 bit;
+}
+
+@generateCppClass
extern class Script extends Struct {
- source: Object;
+ // [source]: the script source.
+ source: String|Undefined;
+
+ // [name]: the script name.
name: Object;
+
+ // [line_offset]: script line offset in resource from where it was extracted.
line_offset: Smi;
+
+ // [column_offset]: script column offset in resource from where it was
+ // extracted.
column_offset: Smi;
- context: Object;
+
+ // [context_data]: context data for the context this script was compiled in.
+ context_data: Smi|Undefined|Symbol;
+
script_type: Smi;
- line_ends: Object;
+
+ // [line_ends]: FixedArray of line ends positions.
+ line_ends: FixedArray|Undefined;
+
+ // [id]: the script id.
id: Smi;
- eval_from_shared_or_wrapped_arguments: Object;
+
+ eval_from_shared_or_wrapped_arguments: SharedFunctionInfo|FixedArray|
+ Undefined;
eval_from_position: Smi|Foreign; // Smi or Managed<wasm::NativeModule>
- shared_function_infos: Object;
- flags: Smi;
- source_url: Object;
+ shared_function_infos: WeakFixedArray|WeakArrayList;
+
+ // [flags]: Holds an exciting bitfield.
+ flags: SmiTagged<ScriptFlags>;
+
+ // [source_url]: sourceURL from magic comment
+ source_url: String|Undefined;
+
+ // [source_mapping_url]: sourceMappingURL magic comment
source_mapping_url: Object;
- host_defined_options: Object;
+
+ // [host_defined_options]: Options defined by the embedder.
+ host_defined_options: FixedArray;
}
diff --git a/chromium/v8/src/objects/shared-function-info-inl.h b/chromium/v8/src/objects/shared-function-info-inl.h
index 169e3c0c156..28274b71c17 100644
--- a/chromium/v8/src/objects/shared-function-info-inl.h
+++ b/chromium/v8/src/objects/shared-function-info-inl.h
@@ -97,6 +97,8 @@ NEVER_READ_ONLY_SPACE_IMPL(SharedFunctionInfo)
CAST_ACCESSOR(SharedFunctionInfo)
DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
+SYNCHRONIZED_ACCESSORS(SharedFunctionInfo, function_data, Object,
+ kFunctionDataOffset)
ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
kNameOrScopeInfoOffset)
ACCESSORS(SharedFunctionInfo, script_or_debug_info, HeapObject,
@@ -168,15 +170,6 @@ AbstractCode SharedFunctionInfo::abstract_code() {
}
}
-Object SharedFunctionInfo::function_data() const {
- return ACQUIRE_READ_FIELD(*this, kFunctionDataOffset);
-}
-
-void SharedFunctionInfo::set_function_data(Object data, WriteBarrierMode mode) {
- RELEASE_WRITE_FIELD(*this, kFunctionDataOffset, data);
- CONDITIONAL_WRITE_BARRIER(*this, kFunctionDataOffset, data, mode);
-}
-
int SharedFunctionInfo::function_token_position() const {
int offset = raw_function_token_offset();
if (offset == kFunctionTokenOutOfRange) {
diff --git a/chromium/v8/src/objects/shared-function-info.tq b/chromium/v8/src/objects/shared-function-info.tq
index f37cc250bc2..c29060714a7 100644
--- a/chromium/v8/src/objects/shared-function-info.tq
+++ b/chromium/v8/src/objects/shared-function-info.tq
@@ -77,3 +77,12 @@ extern class UncompiledDataWithoutPreparseData extends UncompiledData {
extern class UncompiledDataWithPreparseData extends UncompiledData {
preparse_data: PreparseData;
}
+
+@export
+class OnHeapBasicBlockProfilerData extends HeapObject {
+ block_rpo_numbers: ByteArray; // Stored as 4-byte ints
+ counts: ByteArray; // Stored as 4-byte ints
+ name: String;
+ schedule: String;
+ code: String;
+}
diff --git a/chromium/v8/src/objects/smi.h b/chromium/v8/src/objects/smi.h
index 9468f56e527..44cd5f7446e 100644
--- a/chromium/v8/src/objects/smi.h
+++ b/chromium/v8/src/objects/smi.h
@@ -26,9 +26,7 @@ class Smi : public Object {
// in that we want them to be constexprs.
constexpr Smi() : Object() {}
explicit constexpr Smi(Address ptr) : Object(ptr) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(HAS_SMI_TAG(ptr));
-#endif
+ CONSTEXPR_DCHECK(HAS_SMI_TAG(ptr));
}
// Returns the integer value.
@@ -45,9 +43,7 @@ class Smi : public Object {
// Convert a value to a Smi object.
static inline constexpr Smi FromInt(int value) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(Smi::IsValid(value));
-#endif
+ CONSTEXPR_DCHECK(Smi::IsValid(value));
return Smi(Internals::IntToSmi(value));
}
@@ -73,10 +69,8 @@ class Smi : public Object {
// Returns whether value can be represented in a Smi.
static inline bool constexpr IsValid(intptr_t value) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_EQ(Internals::IsValidSmi(value),
- value >= kMinValue && value <= kMaxValue);
-#endif
+ CONSTEXPR_DCHECK(Internals::IsValidSmi(value) ==
+ (value >= kMinValue && value <= kMaxValue));
return Internals::IsValidSmi(value);
}
diff --git a/chromium/v8/src/objects/source-text-module.h b/chromium/v8/src/objects/source-text-module.h
index 7e64668a7ed..91970812543 100644
--- a/chromium/v8/src/objects/source-text-module.h
+++ b/chromium/v8/src/objects/source-text-module.h
@@ -7,6 +7,7 @@
#include "src/objects/module.h"
#include "src/objects/promise.h"
+#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -91,8 +92,7 @@ class SourceTextModule
inline void DecrementPendingAsyncDependencies();
// Bits for flags.
- static const int kAsyncBit = 0;
- static const int kAsyncEvaluatingBit = 1;
+ DEFINE_TORQUE_GENERATED_SOURCE_TEXT_MODULE_FLAGS()
// async_evaluating, top_level_capability, pending_async_dependencies, and
// async_parent_modules are used exclusively during evaluation of async
diff --git a/chromium/v8/src/objects/source-text-module.tq b/chromium/v8/src/objects/source-text-module.tq
index fda0138695f..185443414dd 100644
--- a/chromium/v8/src/objects/source-text-module.tq
+++ b/chromium/v8/src/objects/source-text-module.tq
@@ -4,6 +4,11 @@
type SourceTextModuleInfo extends FixedArray;
+bitfield struct SourceTextModuleFlags extends uint31 {
+ async: bool: 1 bit;
+ async_evaluating: bool: 1 bit;
+}
+
@generateCppClass
extern class SourceTextModule extends Module {
// The code representing this module, or an abstraction thereof.
@@ -39,7 +44,7 @@ extern class SourceTextModule extends Module {
// The number of currently evaluating async dependencies of this module.
pending_async_dependencies: Smi;
- flags: Smi;
+ flags: SmiTagged<SourceTextModuleFlags>;
}
@generateCppClass
diff --git a/chromium/v8/src/objects/stack-frame-info-inl.h b/chromium/v8/src/objects/stack-frame-info-inl.h
index 36236f576ba..820d4324a2f 100644
--- a/chromium/v8/src/objects/stack-frame-info-inl.h
+++ b/chromium/v8/src/objects/stack-frame-info-inl.h
@@ -18,37 +18,22 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(StackFrameInfo, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(StackFrameInfo)
NEVER_READ_ONLY_SPACE_IMPL(StackFrameInfo)
-CAST_ACCESSOR(StackFrameInfo)
-
-SMI_ACCESSORS(StackFrameInfo, line_number, kLineNumberOffset)
-SMI_ACCESSORS(StackFrameInfo, column_number, kColumnNumberOffset)
-SMI_ACCESSORS(StackFrameInfo, script_id, kScriptIdOffset)
-SMI_ACCESSORS(StackFrameInfo, wasm_function_index, kWasmFunctionIndexOffset)
-SMI_ACCESSORS(StackFrameInfo, promise_all_index, kPromiseAllIndexOffset)
-SMI_ACCESSORS_CHECKED(StackFrameInfo, function_offset, kPromiseAllIndexOffset,
- is_wasm())
-ACCESSORS(StackFrameInfo, script_name, Object, kScriptNameOffset)
-ACCESSORS(StackFrameInfo, script_name_or_source_url, Object,
- kScriptNameOrSourceUrlOffset)
-ACCESSORS(StackFrameInfo, function_name, Object, kFunctionNameOffset)
-ACCESSORS(StackFrameInfo, method_name, Object, kMethodNameOffset)
-ACCESSORS(StackFrameInfo, type_name, Object, kTypeNameOffset)
-ACCESSORS(StackFrameInfo, eval_origin, Object, kEvalOriginOffset)
-ACCESSORS(StackFrameInfo, wasm_module_name, Object, kWasmModuleNameOffset)
-ACCESSORS(StackFrameInfo, wasm_instance, Object, kWasmInstanceOffset)
-SMI_ACCESSORS(StackFrameInfo, flag, kFlagOffset)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_eval, kIsEvalBit)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_constructor, kIsConstructorBit)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_wasm, kIsWasmBit)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_asmjs_wasm, kIsAsmJsWasmBit)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_user_java_script, kIsUserJavaScriptBit)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_toplevel, kIsToplevelBit)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_async, kIsAsyncBit)
-BOOL_ACCESSORS(StackFrameInfo, flag, is_promise_all, kIsPromiseAllBit)
+SMI_ACCESSORS_CHECKED(StackFrameInfo, function_offset,
+ kPromiseCombinatorIndexOffset, is_wasm())
+BOOL_ACCESSORS(StackFrameInfo, flag, is_eval, IsEvalBit::kShift)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_constructor, IsConstructorBit::kShift)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_wasm, IsWasmBit::kShift)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_asmjs_wasm, IsAsmJsWasmBit::kShift)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_user_java_script,
+ IsUserJavaScriptBit::kShift)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_toplevel, IsToplevelBit::kShift)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_async, IsAsyncBit::kShift)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_promise_all, IsPromiseAllBit::kShift)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_promise_any, IsPromiseAnyBit::kShift)
TQ_OBJECT_CONSTRUCTORS_IMPL(StackTraceFrame)
NEVER_READ_ONLY_SPACE_IMPL(StackTraceFrame)
diff --git a/chromium/v8/src/objects/stack-frame-info.cc b/chromium/v8/src/objects/stack-frame-info.cc
index c15ad1031cf..a6797599871 100644
--- a/chromium/v8/src/objects/stack-frame-info.cc
+++ b/chromium/v8/src/objects/stack-frame-info.cc
@@ -47,8 +47,8 @@ int StackTraceFrame::GetScriptId(Handle<StackTraceFrame> frame) {
}
// static
-int StackTraceFrame::GetPromiseAllIndex(Handle<StackTraceFrame> frame) {
- return GetFrameInfo(frame)->promise_all_index();
+int StackTraceFrame::GetPromiseCombinatorIndex(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->promise_combinator_index();
}
// static
@@ -169,6 +169,11 @@ bool StackTraceFrame::IsPromiseAll(Handle<StackTraceFrame> frame) {
}
// static
+bool StackTraceFrame::IsPromiseAny(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->is_promise_any();
+}
+
+// static
Handle<StackFrameInfo> StackTraceFrame::GetFrameInfo(
Handle<StackTraceFrame> frame) {
if (frame->frame_info().IsUndefined()) InitializeFrameInfo(frame);
@@ -326,6 +331,7 @@ void SerializeJSStackFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
const bool is_toplevel = StackTraceFrame::IsToplevel(frame);
const bool is_async = StackTraceFrame::IsAsync(frame);
const bool is_promise_all = StackTraceFrame::IsPromiseAll(frame);
+ const bool is_promise_any = StackTraceFrame::IsPromiseAny(frame);
const bool is_constructor = StackTraceFrame::IsConstructor(frame);
// Note: Keep the {is_method_call} predicate in sync with the corresponding
// predicate in factory.cc where the StackFrameInfo is created.
@@ -338,7 +344,13 @@ void SerializeJSStackFrame(Isolate* isolate, Handle<StackTraceFrame> frame,
}
if (is_promise_all) {
builder->AppendCString("Promise.all (index ");
- builder->AppendInt(StackTraceFrame::GetPromiseAllIndex(frame));
+ builder->AppendInt(StackTraceFrame::GetPromiseCombinatorIndex(frame));
+ builder->AppendCString(")");
+ return;
+ }
+ if (is_promise_any) {
+ builder->AppendCString("Promise.any (index ");
+ builder->AppendInt(StackTraceFrame::GetPromiseCombinatorIndex(frame));
builder->AppendCString(")");
return;
}
diff --git a/chromium/v8/src/objects/stack-frame-info.h b/chromium/v8/src/objects/stack-frame-info.h
index 83a24c047a9..0da16a80c30 100644
--- a/chromium/v8/src/objects/stack-frame-info.h
+++ b/chromium/v8/src/objects/stack-frame-info.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_STACK_FRAME_INFO_H_
#include "src/objects/struct.h"
+#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,24 +17,12 @@ namespace internal {
class FrameArray;
class WasmInstanceObject;
-class StackFrameInfo : public Struct {
+class StackFrameInfo
+ : public TorqueGeneratedStackFrameInfo<StackFrameInfo, Struct> {
public:
NEVER_READ_ONLY_SPACE
- DECL_INT_ACCESSORS(line_number)
- DECL_INT_ACCESSORS(column_number)
- DECL_INT_ACCESSORS(script_id)
- DECL_INT_ACCESSORS(wasm_function_index)
- DECL_INT_ACCESSORS(promise_all_index)
- // Wasm frames only: function_offset instead of promise_all_index.
+ // Wasm frames only: function_offset instead of promise_combinator_index.
DECL_INT_ACCESSORS(function_offset)
- DECL_ACCESSORS(script_name, Object)
- DECL_ACCESSORS(script_name_or_source_url, Object)
- DECL_ACCESSORS(function_name, Object)
- DECL_ACCESSORS(method_name, Object)
- DECL_ACCESSORS(type_name, Object)
- DECL_ACCESSORS(eval_origin, Object)
- DECL_ACCESSORS(wasm_module_name, Object)
- DECL_ACCESSORS(wasm_instance, Object)
DECL_BOOLEAN_ACCESSORS(is_eval)
DECL_BOOLEAN_ACCESSORS(is_constructor)
DECL_BOOLEAN_ACCESSORS(is_wasm)
@@ -42,29 +31,16 @@ class StackFrameInfo : public Struct {
DECL_BOOLEAN_ACCESSORS(is_toplevel)
DECL_BOOLEAN_ACCESSORS(is_async)
DECL_BOOLEAN_ACCESSORS(is_promise_all)
- DECL_INT_ACCESSORS(flag)
-
- DECL_CAST(StackFrameInfo)
+ DECL_BOOLEAN_ACCESSORS(is_promise_any)
// Dispatched behavior.
DECL_PRINTER(StackFrameInfo)
- DECL_VERIFIER(StackFrameInfo)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
- TORQUE_GENERATED_STACK_FRAME_INFO_FIELDS)
private:
// Bit position in the flag, from least significant bit position.
- static const int kIsEvalBit = 0;
- static const int kIsConstructorBit = 1;
- static const int kIsWasmBit = 2;
- static const int kIsAsmJsWasmBit = 3;
- static const int kIsUserJavaScriptBit = 4;
- static const int kIsToplevelBit = 5;
- static const int kIsAsyncBit = 6;
- static const int kIsPromiseAllBit = 7;
-
- OBJECT_CONSTRUCTORS(StackFrameInfo, Struct);
+ DEFINE_TORQUE_GENERATED_STACK_FRAME_INFO_FLAGS()
+
+ TQ_OBJECT_CONSTRUCTORS(StackFrameInfo)
};
// This class is used to lazily initialize a StackFrameInfo object from
@@ -85,7 +61,7 @@ class StackTraceFrame
static int GetColumnNumber(Handle<StackTraceFrame> frame);
static int GetOneBasedColumnNumber(Handle<StackTraceFrame> frame);
static int GetScriptId(Handle<StackTraceFrame> frame);
- static int GetPromiseAllIndex(Handle<StackTraceFrame> frame);
+ static int GetPromiseCombinatorIndex(Handle<StackTraceFrame> frame);
static int GetFunctionOffset(Handle<StackTraceFrame> frame);
static int GetWasmFunctionIndex(Handle<StackTraceFrame> frame);
@@ -107,6 +83,7 @@ class StackTraceFrame
static bool IsToplevel(Handle<StackTraceFrame> frame);
static bool IsAsync(Handle<StackTraceFrame> frame);
static bool IsPromiseAll(Handle<StackTraceFrame> frame);
+ static bool IsPromiseAny(Handle<StackTraceFrame> frame);
private:
static Handle<StackFrameInfo> GetFrameInfo(Handle<StackTraceFrame> frame);
diff --git a/chromium/v8/src/objects/stack-frame-info.tq b/chromium/v8/src/objects/stack-frame-info.tq
index 801e2bc5a0d..b8b218bce14 100644
--- a/chromium/v8/src/objects/stack-frame-info.tq
+++ b/chromium/v8/src/objects/stack-frame-info.tq
@@ -2,21 +2,34 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+bitfield struct StackFrameInfoFlags extends uint31 {
+ is_eval: bool: 1 bit;
+ is_constructor: bool: 1 bit;
+ is_wasm: bool: 1 bit;
+ is_asm_js_wasm: bool: 1 bit;
+ is_user_java_script: bool: 1 bit;
+ is_toplevel: bool: 1 bit;
+ is_async: bool: 1 bit;
+ is_promise_all: bool: 1 bit;
+ is_promise_any: bool: 1 bit;
+}
+
+@generateCppClass
extern class StackFrameInfo extends Struct {
line_number: Smi;
column_number: Smi;
- promise_all_index: Smi;
+ promise_combinator_index: Smi;
script_id: Smi;
wasm_function_index: Smi;
- script_name: String|Null|Undefined;
- script_name_or_source_url: String|Null|Undefined;
+ script_name: Object;
+ script_name_or_source_url: Object;
function_name: String|Null|Undefined;
method_name: String|Null|Undefined;
type_name: String|Null|Undefined;
eval_origin: String|Null|Undefined;
wasm_module_name: String|Null|Undefined;
wasm_instance: WasmInstanceObject|Null|Undefined;
- flag: Smi;
+ flag: SmiTagged<StackFrameInfoFlags>;
}
@generateCppClass
diff --git a/chromium/v8/src/objects/string-table.h b/chromium/v8/src/objects/string-table.h
index 76f29a01e38..418eee0281d 100644
--- a/chromium/v8/src/objects/string-table.h
+++ b/chromium/v8/src/objects/string-table.h
@@ -113,7 +113,7 @@ class StringSet : public HashTable<StringSet, StringSetShape> {
public:
V8_EXPORT_PRIVATE static Handle<StringSet> New(Isolate* isolate);
V8_EXPORT_PRIVATE static Handle<StringSet> Add(Isolate* isolate,
- Handle<StringSet> blacklist,
+ Handle<StringSet> stringset,
Handle<String> name);
V8_EXPORT_PRIVATE bool Has(Isolate* isolate, Handle<String> name);
diff --git a/chromium/v8/src/objects/string.cc b/chromium/v8/src/objects/string.cc
index 90abd00ebac..9d07740e19c 100644
--- a/chromium/v8/src/objects/string.cc
+++ b/chromium/v8/src/objects/string.cc
@@ -298,69 +298,60 @@ bool String::SupportsExternalization() {
return !isolate->heap()->IsInGCPostProcessing();
}
-void String::StringShortPrint(StringStream* accumulator, bool show_details) {
- const char* internalized_marker = this->IsInternalizedString() ? "#" : "";
-
- int len = length();
- if (len > kMaxShortPrintLength) {
- accumulator->Add("<Very long string[%s%u]>", internalized_marker, len);
- return;
+const char* String::PrefixForDebugPrint() const {
+ StringShape shape(*this);
+ if (IsTwoByteRepresentation()) {
+ StringShape shape(*this);
+ if (shape.IsInternalized()) {
+ return "u#";
+ } else if (shape.IsCons()) {
+ return "uc\"";
+ } else if (shape.IsThin()) {
+ return "u>\"";
+ } else {
+ return "u\"";
+ }
+ } else {
+ StringShape shape(*this);
+ if (shape.IsInternalized()) {
+ return "#";
+ } else if (shape.IsCons()) {
+ return "c\"";
+ } else if (shape.IsThin()) {
+ return ">\"";
+ } else {
+ return "\"";
+ }
}
+ UNREACHABLE();
+}
+const char* String::SuffixForDebugPrint() const {
+ StringShape shape(*this);
+ if (shape.IsInternalized()) return "";
+ return "\"";
+}
+
+void String::StringShortPrint(StringStream* accumulator) {
if (!LooksValid()) {
accumulator->Add("<Invalid String>");
return;
}
- StringCharacterStream stream(*this);
+ const int len = length();
+ accumulator->Add("<String[%u]: ", len);
+ accumulator->Add(PrefixForDebugPrint());
- bool truncated = false;
if (len > kMaxShortPrintLength) {
- len = kMaxShortPrintLength;
- truncated = true;
+ accumulator->Add("...<truncated>>");
+ accumulator->Add(SuffixForDebugPrint());
+ accumulator->Put('>');
+ return;
}
- bool one_byte = true;
- for (int i = 0; i < len; i++) {
- uint16_t c = stream.GetNext();
- if (c < 32 || c >= 127) {
- one_byte = false;
- }
- }
- stream.Reset(*this);
- if (one_byte) {
- if (show_details)
- accumulator->Add("<String[%s%u]: ", internalized_marker, length());
- for (int i = 0; i < len; i++) {
- accumulator->Put(static_cast<char>(stream.GetNext()));
- }
- if (show_details) accumulator->Put('>');
- } else {
- // Backslash indicates that the string contains control
- // characters and that backslashes are therefore escaped.
- if (show_details)
- accumulator->Add("<String[%s%u]\\: ", internalized_marker, length());
- for (int i = 0; i < len; i++) {
- uint16_t c = stream.GetNext();
- if (c == '\n') {
- accumulator->Add("\\n");
- } else if (c == '\r') {
- accumulator->Add("\\r");
- } else if (c == '\\') {
- accumulator->Add("\\\\");
- } else if (c < 32 || c > 126) {
- accumulator->Add("\\x%02x", c);
- } else {
- accumulator->Put(static_cast<char>(c));
- }
- }
- if (truncated) {
- accumulator->Put('.');
- accumulator->Put('.');
- accumulator->Put('.');
- }
- if (show_details) accumulator->Put('>');
- }
+ PrintUC16(accumulator, 0, len);
+ accumulator->Add(SuffixForDebugPrint());
+ accumulator->Put('>');
}
void String::PrintUC16(std::ostream& os, int start, int end) { // NOLINT
@@ -371,6 +362,25 @@ void String::PrintUC16(std::ostream& os, int start, int end) { // NOLINT
}
}
+void String::PrintUC16(StringStream* accumulator, int start, int end) {
+ if (end < 0) end = length();
+ StringCharacterStream stream(*this, start);
+ for (int i = start; i < end && stream.HasMore(); i++) {
+ uint16_t c = stream.GetNext();
+ if (c == '\n') {
+ accumulator->Add("\\n");
+ } else if (c == '\r') {
+ accumulator->Add("\\r");
+ } else if (c == '\\') {
+ accumulator->Add("\\\\");
+ } else if (!std::isprint(c)) {
+ accumulator->Add("\\x%02x", c);
+ } else {
+ accumulator->Put(static_cast<char>(c));
+ }
+ }
+}
+
// static
Handle<String> String::Trim(Isolate* isolate, Handle<String> string,
TrimMode mode) {
@@ -410,9 +420,9 @@ int32_t String::ToArrayIndex(Address addr) {
bool String::LooksValid() {
// TODO(leszeks): Maybe remove this check entirely, Heap::Contains uses
// basically the same logic as the way we access the heap in the first place.
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(*this);
// RO_SPACE objects should always be valid.
if (ReadOnlyHeap::Contains(*this)) return true;
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(*this);
if (chunk->heap() == nullptr) return false;
return chunk->heap()->Contains(*this);
}
diff --git a/chromium/v8/src/objects/string.h b/chromium/v8/src/objects/string.h
index f9f92a53dd1..7c275cc6cbf 100644
--- a/chromium/v8/src/objects/string.h
+++ b/chromium/v8/src/objects/string.h
@@ -352,9 +352,20 @@ class String : public TorqueGeneratedString<String, Name> {
// For use during stack traces. Performs rudimentary sanity check.
bool LooksValid();
- // Dispatched behavior.
- void StringShortPrint(StringStream* accumulator, bool show_details = true);
+ // Printing utility functions.
+ // - PrintUC16 prints the raw string contents to the given stream.
+ // Non-printable characters are formatted as hex, but otherwise the string
+ // is printed as-is.
+ // - StringShortPrint and StringPrint have extra formatting: they add a
+ // prefix and suffix depending on the string kind, may add other information
+ // such as the string heap object address, may truncate long strings, etc.
+ const char* PrefixForDebugPrint() const;
+ const char* SuffixForDebugPrint() const;
+ void StringShortPrint(StringStream* accumulator);
void PrintUC16(std::ostream& os, int start = 0, int end = -1); // NOLINT
+ void PrintUC16(StringStream* accumulator, int start, int end);
+
+ // Dispatched behavior.
#if defined(DEBUG) || defined(OBJECT_PRINT)
char* ToAsciiArray();
#endif
diff --git a/chromium/v8/src/objects/string.tq b/chromium/v8/src/objects/string.tq
index 7d3f250964d..6c4ff691c0c 100644
--- a/chromium/v8/src/objects/string.tq
+++ b/chromium/v8/src/objects/string.tq
@@ -11,6 +11,7 @@ extern class String extends Name {
@generateCppClass
@generateBodyDescriptor
+@doNotGenerateCast
extern class ConsString extends String {
first: String;
second: String;
@@ -18,35 +19,46 @@ extern class ConsString extends String {
@abstract
@generateBodyDescriptor
+@doNotGenerateCast
extern class ExternalString extends String {
resource: ExternalPointer;
resource_data: ExternalPointer;
}
-extern class ExternalOneByteString extends ExternalString {}
-extern class ExternalTwoByteString extends ExternalString {}
+@doNotGenerateCast
+extern class ExternalOneByteString extends ExternalString {
+}
+
+@doNotGenerateCast
+extern class ExternalTwoByteString extends ExternalString {
+}
@generateCppClass
+@doNotGenerateCast
extern class InternalizedString extends String {
}
@abstract
@generateCppClass
+@doNotGenerateCast
extern class SeqString extends String {
}
@generateCppClass
@generateBodyDescriptor
+@doNotGenerateCast
extern class SeqOneByteString extends SeqString {
chars[length]: char8;
}
@generateCppClass
@generateBodyDescriptor
+@doNotGenerateCast
extern class SeqTwoByteString extends SeqString {
chars[length]: char16;
}
@generateCppClass
@generateBodyDescriptor
+@doNotGenerateCast
extern class SlicedString extends String {
parent: String;
offset: Smi;
@@ -54,6 +66,7 @@ extern class SlicedString extends String {
@generateCppClass
@generateBodyDescriptor
+@doNotGenerateCast
extern class ThinString extends String {
actual: String;
}
diff --git a/chromium/v8/src/objects/tagged-impl.h b/chromium/v8/src/objects/tagged-impl.h
index 036075fc2d6..9ef8b58d991 100644
--- a/chromium/v8/src/objects/tagged-impl.h
+++ b/chromium/v8/src/objects/tagged-impl.h
@@ -88,9 +88,8 @@ class TaggedImpl {
// Returns true if this tagged value is a strong pointer to a HeapObject.
constexpr inline bool IsStrong() const {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_IMPLIES(!kCanBeWeak, !IsSmi() == HAS_STRONG_HEAP_OBJECT_TAG(ptr_));
-#endif
+ CONSTEXPR_DCHECK(kCanBeWeak ||
+ (!IsSmi() == HAS_STRONG_HEAP_OBJECT_TAG(ptr_)));
return kCanBeWeak ? HAS_STRONG_HEAP_OBJECT_TAG(ptr_) : !IsSmi();
}
diff --git a/chromium/v8/src/objects/tagged-index.h b/chromium/v8/src/objects/tagged-index.h
index c7d6a85a0d1..e8cfbc76087 100644
--- a/chromium/v8/src/objects/tagged-index.h
+++ b/chromium/v8/src/objects/tagged-index.h
@@ -38,9 +38,7 @@ class TaggedIndex : public Object {
// special in that we want them to be constexprs.
constexpr TaggedIndex() : Object() {}
explicit constexpr TaggedIndex(Address ptr) : Object(ptr) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(HAS_SMI_TAG(ptr));
-#endif
+ CONSTEXPR_DCHECK(HAS_SMI_TAG(ptr));
}
// Returns the integer value.
@@ -51,9 +49,7 @@ class TaggedIndex : public Object {
// Convert a value to a TaggedIndex object.
static inline TaggedIndex FromIntptr(intptr_t value) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(TaggedIndex::IsValid(value));
-#endif
+ CONSTEXPR_DCHECK(TaggedIndex::IsValid(value));
return TaggedIndex((static_cast<Address>(value) << kSmiTagSize) | kSmiTag);
}
diff --git a/chromium/v8/src/objects/template.tq b/chromium/v8/src/objects/template.tq
index d3e251aa165..1336fb19ba7 100644
--- a/chromium/v8/src/objects/template.tq
+++ b/chromium/v8/src/objects/template.tq
@@ -5,46 +5,56 @@
@abstract
@generateCppClass
extern class TemplateInfo extends Struct {
- tag: Object;
- serial_number: Object;
+ tag: Smi;
+ serial_number: Smi;
number_of_properties: Smi;
- property_list: Object;
- property_accessors: Object;
+ property_list: TemplateList|Undefined;
+ property_accessors: TemplateList|Undefined;
}
@generateCppClass
+@generatePrint
extern class FunctionTemplateRareData extends Struct {
// See DECL_RARE_ACCESSORS in FunctionTemplateInfo.
- prototype_template: Object;
- prototype_provider_template: Object;
- parent_template: Object;
- named_property_handler: Object;
- indexed_property_handler: Object;
- instance_template: Object;
- instance_call_handler: Object;
- access_check_info: Object;
- c_function: Foreign|Smi;
- c_signature: Foreign|Smi;
+ prototype_template: ObjectTemplateInfo|Undefined;
+ prototype_provider_template: FunctionTemplateInfo|Undefined;
+ parent_template: FunctionTemplateInfo|Undefined;
+ named_property_handler: InterceptorInfo|Undefined;
+ indexed_property_handler: InterceptorInfo|Undefined;
+ instance_template: ObjectTemplateInfo|Undefined;
+ instance_call_handler: CallHandlerInfo|Undefined;
+ access_check_info: AccessCheckInfo|Undefined;
+ c_function: Foreign|Zero;
+ c_signature: Foreign|Zero;
+}
+
+bitfield struct FunctionTemplateInfoFlags extends uint31 {
+ undetectable: bool: 1 bit;
+ needs_access_check: bool: 1 bit;
+ read_only_prototype: bool: 1 bit;
+ remove_prototype: bool: 1 bit;
+ do_not_cache: bool: 1 bit;
+ accept_any_receiver: bool: 1 bit;
}
@generateCppClass
extern class FunctionTemplateInfo extends TemplateInfo {
// Handler invoked when calling an instance of this FunctionTemplateInfo.
// Either CallHandlerInfo or Undefined.
- call_code: Object;
- class_name: Object;
+ call_code: CallHandlerInfo|Undefined;
+ class_name: String|Undefined;
// If the signature is a FunctionTemplateInfo it is used to check whether the
// receiver calling the associated JSFunction is a compatible receiver, i.e.
// it is an instance of the signature FunctionTemplateInfo or any of the
// receiver's prototypes are.
- signature: Object;
+ signature: FunctionTemplateInfo|Undefined;
// If any of the setters declared by DECL_RARE_ACCESSORS are used then a
// FunctionTemplateRareData will be stored here. Until then this contains
// undefined.
- rare_data: HeapObject;
- shared_function_info: Object;
+ rare_data: FunctionTemplateRareData|Undefined;
+ shared_function_info: SharedFunctionInfo|Undefined;
// Internal field to store a flag bitfield.
- flag: Smi;
+ flag: SmiTagged<FunctionTemplateInfoFlags>;
// "length" property of the final JSFunction.
length: Smi;
// Either the_hole or a private symbol. Used to cache the result on
@@ -53,8 +63,13 @@ extern class FunctionTemplateInfo extends TemplateInfo {
cached_property_name: Object;
}
+bitfield struct ObjectTemplateInfoFlags extends uint31 {
+ is_immutable_prototype: bool: 1 bit;
+ embedder_field_count: int32: 29 bit;
+}
+
@generateCppClass
extern class ObjectTemplateInfo extends TemplateInfo {
- constructor: Object;
- data: Object;
+ constructor: FunctionTemplateInfo|Undefined;
+ data: SmiTagged<ObjectTemplateInfoFlags>;
}
diff --git a/chromium/v8/src/objects/templates-inl.h b/chromium/v8/src/objects/templates-inl.h
index 988230b7ae4..8dd5aa6e2db 100644
--- a/chromium/v8/src/objects/templates-inl.h
+++ b/chromium/v8/src/objects/templates-inl.h
@@ -24,16 +24,17 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(FunctionTemplateRareData)
NEVER_READ_ONLY_SPACE_IMPL(TemplateInfo)
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable,
+ UndetectableBit::kShift)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
- kNeedsAccessCheckBit)
+ NeedsAccessCheckBit::kShift)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, read_only_prototype,
- kReadOnlyPrototypeBit)
+ ReadOnlyPrototypeBit::kShift)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, remove_prototype,
- kRemovePrototypeBit)
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache, kDoNotCacheBit)
+ RemovePrototypeBit::kShift)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache, DoNotCacheBit::kShift)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
- kAcceptAnyReceiver)
+ AcceptAnyReceiverBit::kShift)
// static
FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
@@ -61,16 +62,18 @@ FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
rare_data.set_##Name(*Name); \
}
-RARE_ACCESSORS(prototype_template, PrototypeTemplate, Object, undefined)
-RARE_ACCESSORS(prototype_provider_template, PrototypeProviderTemplate, Object,
+RARE_ACCESSORS(prototype_template, PrototypeTemplate, HeapObject, undefined)
+RARE_ACCESSORS(prototype_provider_template, PrototypeProviderTemplate,
+ HeapObject, undefined)
+RARE_ACCESSORS(parent_template, ParentTemplate, HeapObject, undefined)
+RARE_ACCESSORS(named_property_handler, NamedPropertyHandler, HeapObject,
undefined)
-RARE_ACCESSORS(parent_template, ParentTemplate, Object, undefined)
-RARE_ACCESSORS(named_property_handler, NamedPropertyHandler, Object, undefined)
-RARE_ACCESSORS(indexed_property_handler, IndexedPropertyHandler, Object,
+RARE_ACCESSORS(indexed_property_handler, IndexedPropertyHandler, HeapObject,
undefined)
-RARE_ACCESSORS(instance_template, InstanceTemplate, Object, undefined)
-RARE_ACCESSORS(instance_call_handler, InstanceCallHandler, Object, undefined)
-RARE_ACCESSORS(access_check_info, AccessCheckInfo, Object, undefined)
+RARE_ACCESSORS(instance_template, InstanceTemplate, HeapObject, undefined)
+RARE_ACCESSORS(instance_call_handler, InstanceCallHandler, HeapObject,
+ undefined)
+RARE_ACCESSORS(access_check_info, AccessCheckInfo, HeapObject, undefined)
RARE_ACCESSORS(c_function, CFunction, Object, Smi(0))
RARE_ACCESSORS(c_signature, CSignature, Object, Smi(0))
#undef RARE_ACCESSORS
@@ -110,26 +113,20 @@ ObjectTemplateInfo ObjectTemplateInfo::GetParent(Isolate* isolate) {
}
int ObjectTemplateInfo::embedder_field_count() const {
- Object value = data();
- DCHECK(value.IsSmi());
- return EmbedderFieldCount::decode(Smi::ToInt(value));
+ return EmbedderFieldCountBits::decode(data());
}
void ObjectTemplateInfo::set_embedder_field_count(int count) {
DCHECK_LE(count, JSObject::kMaxEmbedderFields);
- return set_data(
- Smi::FromInt(EmbedderFieldCount::update(Smi::ToInt(data()), count)));
+ return set_data(EmbedderFieldCountBits::update(data(), count));
}
bool ObjectTemplateInfo::immutable_proto() const {
- Object value = data();
- DCHECK(value.IsSmi());
- return IsImmutablePrototype::decode(Smi::ToInt(value));
+ return IsImmutablePrototypeBit::decode(data());
}
void ObjectTemplateInfo::set_immutable_proto(bool immutable) {
- return set_data(Smi::FromInt(
- IsImmutablePrototype::update(Smi::ToInt(data()), immutable)));
+ return set_data(IsImmutablePrototypeBit::update(data(), immutable));
}
bool FunctionTemplateInfo::IsTemplateFor(JSObject object) {
diff --git a/chromium/v8/src/objects/templates.h b/chromium/v8/src/objects/templates.h
index 01d40eb29ae..a3b509928be 100644
--- a/chromium/v8/src/objects/templates.h
+++ b/chromium/v8/src/objects/templates.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_TEMPLATES_H_
#include "src/objects/struct.h"
+#include "torque-generated/bit-fields-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -32,9 +33,6 @@ class FunctionTemplateRareData
: public TorqueGeneratedFunctionTemplateRareData<FunctionTemplateRareData,
Struct> {
public:
- // Dispatched behavior.
- DECL_PRINTER(FunctionTemplateRareData)
-
TQ_OBJECT_CONSTRUCTORS(FunctionTemplateRareData)
};
@@ -51,36 +49,37 @@ class FunctionTemplateInfo
// ObjectTemplateInfo or Undefined, used for the prototype property of the
// resulting JSFunction instance of this FunctionTemplate.
- DECL_RARE_ACCESSORS(prototype_template, PrototypeTemplate, Object)
+ DECL_RARE_ACCESSORS(prototype_template, PrototypeTemplate, HeapObject)
// In the case the prototype_template is Undefined we use the
// prototype_provider_template to retrieve the instance prototype. Either
- // contains an ObjectTemplateInfo or Undefined.
+ // contains an FunctionTemplateInfo or Undefined.
DECL_RARE_ACCESSORS(prototype_provider_template, PrototypeProviderTemplate,
- Object)
+ HeapObject)
// Used to create prototype chains. The parent_template's prototype is set as
// __proto__ of this FunctionTemplate's instance prototype. Is either a
// FunctionTemplateInfo or Undefined.
- DECL_RARE_ACCESSORS(parent_template, ParentTemplate, Object)
+ DECL_RARE_ACCESSORS(parent_template, ParentTemplate, HeapObject)
// Returns an InterceptorInfo or Undefined for named properties.
- DECL_RARE_ACCESSORS(named_property_handler, NamedPropertyHandler, Object)
+ DECL_RARE_ACCESSORS(named_property_handler, NamedPropertyHandler, HeapObject)
// Returns an InterceptorInfo or Undefined for indexed properties/elements.
- DECL_RARE_ACCESSORS(indexed_property_handler, IndexedPropertyHandler, Object)
+ DECL_RARE_ACCESSORS(indexed_property_handler, IndexedPropertyHandler,
+ HeapObject)
// An ObjectTemplateInfo that is used when instantiating the JSFunction
// associated with this FunctionTemplateInfo. Contains either an
// ObjectTemplateInfo or Undefined. A default instance_template is assigned
// upon first instantiation if it's Undefined.
- DECL_RARE_ACCESSORS(instance_template, InstanceTemplate, Object)
+ DECL_RARE_ACCESSORS(instance_template, InstanceTemplate, HeapObject)
// Either a CallHandlerInfo or Undefined. If an instance_call_handler is
// provided the instances created from the associated JSFunction are marked as
// callable.
- DECL_RARE_ACCESSORS(instance_call_handler, InstanceCallHandler, Object)
+ DECL_RARE_ACCESSORS(instance_call_handler, InstanceCallHandler, HeapObject)
- DECL_RARE_ACCESSORS(access_check_info, AccessCheckInfo, Object)
+ DECL_RARE_ACCESSORS(access_check_info, AccessCheckInfo, HeapObject)
DECL_RARE_ACCESSORS(c_function, CFunction, Object)
DECL_RARE_ACCESSORS(c_signature, CSignature, Object)
@@ -138,12 +137,7 @@ class FunctionTemplateInfo
Handle<Object> getter);
// Bit position in the flag, from least significant bit position.
- static const int kUndetectableBit = 0;
- static const int kNeedsAccessCheckBit = 1;
- static const int kReadOnlyPrototypeBit = 2;
- static const int kRemovePrototypeBit = 3;
- static const int kDoNotCacheBit = 4;
- static const int kAcceptAnyReceiver = 5;
+ DEFINE_TORQUE_GENERATED_FUNCTION_TEMPLATE_INFO_FLAGS()
private:
static inline FunctionTemplateRareData EnsureFunctionTemplateRareData(
@@ -170,8 +164,7 @@ class ObjectTemplateInfo
inline ObjectTemplateInfo GetParent(Isolate* isolate);
private:
- using IsImmutablePrototype = base::BitField<bool, 0, 1>;
- using EmbedderFieldCount = IsImmutablePrototype::Next<int, 29>;
+ DEFINE_TORQUE_GENERATED_OBJECT_TEMPLATE_INFO_FLAGS()
TQ_OBJECT_CONSTRUCTORS(ObjectTemplateInfo)
};
diff --git a/chromium/v8/src/objects/transitions.cc b/chromium/v8/src/objects/transitions.cc
index e0ba40ce7d0..1309ca82be5 100644
--- a/chromium/v8/src/objects/transitions.cc
+++ b/chromium/v8/src/objects/transitions.cc
@@ -36,6 +36,7 @@ bool TransitionsAccessor::HasSimpleTransitionTo(Map map) {
void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
SimpleTransitionFlag flag) {
DCHECK(!map_handle_.is_null());
+ DCHECK_NE(kPrototypeInfo, encoding());
target->SetBackPointer(map_);
// If the map doesn't have any transitions at all yet, install the new one.
@@ -49,23 +50,25 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
isolate_->factory()->NewTransitionArray(0, 1);
ReplaceTransitions(MaybeObject::FromObject(*result));
Reload();
+ DCHECK_EQ(kFullTransitionArray, encoding());
}
- bool is_special_transition = flag == SPECIAL_TRANSITION;
// If the map has a simple transition, check if it should be overwritten.
Map simple_transition = GetSimpleTransition();
if (!simple_transition.is_null()) {
- Name key = GetSimpleTransitionKey(simple_transition);
- PropertyDetails old_details = GetSimpleTargetDetails(simple_transition);
- PropertyDetails new_details = is_special_transition
- ? PropertyDetails::Empty()
- : GetTargetDetails(*name, *target);
- if (flag == SIMPLE_PROPERTY_TRANSITION && key.Equals(*name) &&
- old_details.kind() == new_details.kind() &&
- old_details.attributes() == new_details.attributes()) {
- ReplaceTransitions(HeapObjectReference::Weak(*target));
- return;
+ DCHECK_EQ(kWeakRef, encoding());
+
+ if (flag == SIMPLE_PROPERTY_TRANSITION) {
+ Name key = GetSimpleTransitionKey(simple_transition);
+ PropertyDetails old_details = GetSimpleTargetDetails(simple_transition);
+ PropertyDetails new_details = GetTargetDetails(*name, *target);
+ if (key.Equals(*name) && old_details.kind() == new_details.kind() &&
+ old_details.attributes() == new_details.attributes()) {
+ ReplaceTransitions(HeapObjectReference::Weak(*target));
+ return;
+ }
}
+
// Otherwise allocate a full TransitionArray with slack for a new entry.
Handle<Map> map(simple_transition, isolate_);
Handle<TransitionArray> result =
@@ -75,12 +78,9 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
simple_transition = GetSimpleTransition();
if (!simple_transition.is_null()) {
DCHECK_EQ(*map, simple_transition);
- if (encoding_ == kWeakRef) {
- result->Set(0, GetSimpleTransitionKey(simple_transition),
- HeapObjectReference::Weak(simple_transition));
- } else {
- UNREACHABLE();
- }
+ DCHECK_EQ(kWeakRef, encoding());
+ result->Set(0, GetSimpleTransitionKey(simple_transition),
+ HeapObjectReference::Weak(simple_transition));
} else {
result->SetNumberOfTransitions(0);
}
@@ -94,6 +94,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
int number_of_transitions = 0;
int new_nof = 0;
int insertion_index = kNotFound;
+ const bool is_special_transition = flag == SPECIAL_TRANSITION;
DCHECK_EQ(is_special_transition,
IsSpecialTransition(ReadOnlyRoots(isolate_), *name));
PropertyDetails details = is_special_transition
@@ -104,7 +105,6 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
DisallowHeapAllocation no_gc;
TransitionArray array = transitions();
number_of_transitions = array.number_of_transitions();
- new_nof = number_of_transitions;
int index = is_special_transition
? array.SearchSpecial(Symbol::cast(*name), &insertion_index)
@@ -116,19 +116,20 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
return;
}
- ++new_nof;
+ new_nof = number_of_transitions + 1;
CHECK_LE(new_nof, kMaxNumberOfTransitions);
- DCHECK(insertion_index >= 0 && insertion_index <= number_of_transitions);
+ DCHECK_GE(insertion_index, 0);
+ DCHECK_LE(insertion_index, number_of_transitions);
// If there is enough capacity, insert new entry into the existing array.
if (new_nof <= array.Capacity()) {
array.SetNumberOfTransitions(new_nof);
- for (index = number_of_transitions; index > insertion_index; --index) {
- array.SetKey(index, array.GetKey(index - 1));
- array.SetRawTarget(index, array.GetRawTarget(index - 1));
+ for (int i = number_of_transitions; i > insertion_index; --i) {
+ array.SetKey(i, array.GetKey(i - 1));
+ array.SetRawTarget(i, array.GetRawTarget(i - 1));
}
- array.SetKey(index, *name);
- array.SetRawTarget(index, HeapObjectReference::Weak(*target));
+ array.SetKey(insertion_index, *name);
+ array.SetRawTarget(insertion_index, HeapObjectReference::Weak(*target));
SLOW_DCHECK(array.IsSortedNoDuplicates());
return;
}
@@ -146,23 +147,19 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
DisallowHeapAllocation no_gc;
TransitionArray array = transitions();
if (array.number_of_transitions() != number_of_transitions) {
- DCHECK(array.number_of_transitions() < number_of_transitions);
+ DCHECK_LT(array.number_of_transitions(), number_of_transitions);
- number_of_transitions = array.number_of_transitions();
- new_nof = number_of_transitions;
-
- insertion_index = kNotFound;
int index = is_special_transition
? array.SearchSpecial(Symbol::cast(*name), &insertion_index)
: array.Search(details.kind(), *name, details.attributes(),
&insertion_index);
- if (index == kNotFound) {
- ++new_nof;
- } else {
- insertion_index = index;
- }
- DCHECK(insertion_index >= 0 && insertion_index <= number_of_transitions);
+ CHECK_EQ(index, kNotFound);
+ USE(index);
+ DCHECK_GE(insertion_index, 0);
+ DCHECK_LE(insertion_index, number_of_transitions);
+ number_of_transitions = array.number_of_transitions();
+ new_nof = number_of_transitions + 1;
result->SetNumberOfTransitions(new_nof);
}
@@ -405,26 +402,14 @@ Map TransitionsAccessor::GetMigrationTarget() {
return Map();
}
-void TransitionArray::Zap(Isolate* isolate) {
- MemsetTagged(ObjectSlot(RawFieldOfElementAt(kPrototypeTransitionsIndex)),
- ReadOnlyRoots(isolate).the_hole_value(),
- length() - kPrototypeTransitionsIndex);
- SetNumberOfTransitions(0);
-}
-
void TransitionsAccessor::ReplaceTransitions(MaybeObject new_transitions) {
if (encoding() == kFullTransitionArray) {
- TransitionArray old_transitions = transitions();
#if DEBUG
+ TransitionArray old_transitions = transitions();
CheckNewTransitionsAreConsistent(
old_transitions, new_transitions->GetHeapObjectAssumeStrong());
DCHECK(old_transitions != new_transitions->GetHeapObjectAssumeStrong());
#endif
- // Transition arrays are not shared. When one is replaced, it should not
- // keep referenced objects alive, so we zap it.
- // When there is another reference to the array somewhere (e.g. a handle),
- // not zapping turns from a waste of memory into a source of crashes.
- old_transitions.Zap(isolate_);
}
map_.set_raw_transitions(new_transitions);
MarkNeedsReload();
diff --git a/chromium/v8/src/objects/transitions.h b/chromium/v8/src/objects/transitions.h
index 5a7db13e516..7bc4d70a35d 100644
--- a/chromium/v8/src/objects/transitions.h
+++ b/chromium/v8/src/objects/transitions.h
@@ -228,7 +228,7 @@ class TransitionArray : public WeakFixedArray {
int GetSortedKeyIndex(int transition_number) { return transition_number; }
inline int number_of_entries() const;
#ifdef DEBUG
- V8_EXPORT_PRIVATE bool IsSortedNoDuplicates(int valid_entries = -1);
+ V8_EXPORT_PRIVATE bool IsSortedNoDuplicates();
#endif
void Sort();
@@ -338,8 +338,6 @@ class TransitionArray : public WeakFixedArray {
inline void Set(int transition_number, Name key, MaybeObject target);
- void Zap(Isolate* isolate);
-
OBJECT_CONSTRUCTORS(TransitionArray, WeakFixedArray);
};
diff --git a/chromium/v8/src/objects/type-hints.cc b/chromium/v8/src/objects/type-hints.cc
index cb0a6a4ea9b..da63443b4ca 100644
--- a/chromium/v8/src/objects/type-hints.cc
+++ b/chromium/v8/src/objects/type-hints.cc
@@ -39,6 +39,8 @@ std::ostream& operator<<(std::ostream& os, CompareOperationHint hint) {
return os << "SignedSmall";
case CompareOperationHint::kNumber:
return os << "Number";
+ case CompareOperationHint::kNumberOrBoolean:
+ return os << "NumberOrBoolean";
case CompareOperationHint::kNumberOrOddball:
return os << "NumberOrOddball";
case CompareOperationHint::kInternalizedString:
diff --git a/chromium/v8/src/objects/type-hints.h b/chromium/v8/src/objects/type-hints.h
index 1aa27096652..d61ebef8457 100644
--- a/chromium/v8/src/objects/type-hints.h
+++ b/chromium/v8/src/objects/type-hints.h
@@ -35,6 +35,7 @@ enum class CompareOperationHint : uint8_t {
kNone,
kSignedSmall,
kNumber,
+ kNumberOrBoolean,
kNumberOrOddball,
kInternalizedString,
kString,
diff --git a/chromium/v8/src/parsing/parser-base.h b/chromium/v8/src/parsing/parser-base.h
index 903ce2bb7f8..3519599a882 100644
--- a/chromium/v8/src/parsing/parser-base.h
+++ b/chromium/v8/src/parsing/parser-base.h
@@ -786,7 +786,7 @@ class ParserBase {
// should automatically use scope() as parent, and be fine with
// NewScope(ScopeType) above.
Scope* NewScopeWithParent(Scope* parent, ScopeType scope_type) const {
- // Must always use the specific constructors for the blacklisted scope
+ // Must always use the specific constructors for the blocklisted scope
// types.
DCHECK_NE(FUNCTION_SCOPE, scope_type);
DCHECK_NE(SCRIPT_SCOPE, scope_type);
@@ -2755,8 +2755,7 @@ ParserBase<Impl>::ParseAssignmentExpressionCoverGrammar() {
Token::Value op = peek();
if (!Token::IsArrowOrAssignmentOp(op)) return expression;
- if ((op == Token::ASSIGN_NULLISH || op == Token::ASSIGN_OR ||
- op == Token::ASSIGN_AND) &&
+ if (Token::IsLogicalAssignmentOp(op) &&
!flags().allow_harmony_logical_assignment()) {
return expression;
}
@@ -2830,13 +2829,8 @@ ParserBase<Impl>::ParseAssignmentExpressionCoverGrammar() {
ExpressionT right = ParseAssignmentExpression();
- if (op == Token::ASSIGN) {
- // We try to estimate the set of properties set by constructors. We define a
- // new property whenever there is an assignment to a property of 'this'. We
- // should probably only add properties if we haven't seen them before.
- // Otherwise we'll probably overestimate the number of properties.
- if (impl()->IsThisProperty(expression)) function_state_->AddProperty();
-
+ // Anonymous function name inference applies to =, ||=, &&=, and ??=.
+ if (op == Token::ASSIGN || Token::IsLogicalAssignmentOp(op)) {
impl()->CheckAssigningFunctionLiteralToProperty(expression, right);
// Check if the right hand side is a call to avoid inferring a
@@ -2850,10 +2844,20 @@ ParserBase<Impl>::ParseAssignmentExpressionCoverGrammar() {
impl()->SetFunctionNameFromIdentifierRef(right, expression);
} else {
+ fni_.RemoveLastFunction();
+ }
+
+ if (op == Token::ASSIGN) {
+ // We try to estimate the set of properties set by constructors. We define a
+ // new property whenever there is an assignment to a property of 'this'. We
+ // should probably only add properties if we haven't seen them before.
+ // Otherwise we'll probably overestimate the number of properties.
+ if (impl()->IsThisProperty(expression)) function_state_->AddProperty();
+ } else {
+ // Only initializers (i.e. no compound assignments) are allowed in patterns.
expression_scope()->RecordPatternError(
Scanner::Location(lhs_beg_pos, end_position()),
MessageTemplate::kInvalidDestructuringTarget);
- fni_.RemoveLastFunction();
}
return factory()->NewAssignment(op, expression, right, op_position);
diff --git a/chromium/v8/src/parsing/parser.cc b/chromium/v8/src/parsing/parser.cc
index 63b8b9c6f94..9577b373973 100644
--- a/chromium/v8/src/parsing/parser.cc
+++ b/chromium/v8/src/parsing/parser.cc
@@ -357,8 +357,8 @@ Expression* Parser::NewV8Intrinsic(const AstRawString* name,
const Runtime::Function* function =
Runtime::FunctionForName(name->raw_data(), name->length());
- // Be more premissive when fuzzing. Intrinsics are not supported.
- if (FLAG_allow_natives_for_fuzzing) {
+ // Be more permissive when fuzzing. Intrinsics are not supported.
+ if (FLAG_fuzzing) {
return NewV8RuntimeFunctionForFuzzing(function, args, pos);
}
@@ -392,13 +392,13 @@ Expression* Parser::NewV8Intrinsic(const AstRawString* name,
Expression* Parser::NewV8RuntimeFunctionForFuzzing(
const Runtime::Function* function, const ScopedPtrList<Expression>& args,
int pos) {
- CHECK(FLAG_allow_natives_for_fuzzing);
+ CHECK(FLAG_fuzzing);
- // Intrinsics are not supported for fuzzing. Only allow whitelisted runtime
+ // Intrinsics are not supported for fuzzing. Only allow allowlisted runtime
// functions. Also prevent later errors due to too few arguments and just
// ignore this call.
if (function == nullptr ||
- !Runtime::IsWhitelistedForFuzzing(function->function_id) ||
+ !Runtime::IsAllowListedForFuzzing(function->function_id) ||
function->nargs > args.length()) {
return factory()->NewUndefinedLiteral(kNoSourcePosition);
}
diff --git a/chromium/v8/src/parsing/parser.h b/chromium/v8/src/parsing/parser.h
index 472c9a71ab4..431ed5a37e9 100644
--- a/chromium/v8/src/parsing/parser.h
+++ b/chromium/v8/src/parsing/parser.h
@@ -170,10 +170,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
friend class i::ArrowHeadParsingScope<ParserTypes<Parser>>;
friend bool v8::internal::parsing::ParseProgram(
ParseInfo*, Handle<Script>, MaybeHandle<ScopeInfo> maybe_outer_scope_info,
- Isolate*, parsing::ReportErrorsAndStatisticsMode stats_mode);
+ Isolate*, parsing::ReportStatisticsMode stats_mode);
friend bool v8::internal::parsing::ParseFunction(
ParseInfo*, Handle<SharedFunctionInfo> shared_info, Isolate*,
- parsing::ReportErrorsAndStatisticsMode stats_mode);
+ parsing::ReportStatisticsMode stats_mode);
bool AllowsLazyParsingWithoutUnresolvedVariables() const {
return !MaybeParsingArrowhead() &&
@@ -541,10 +541,14 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return property != nullptr && property->obj()->IsThisExpression();
}
- // Returns true if the expression is of type "obj.#foo".
+ // Returns true if the expression is of type "obj.#foo" or "obj?.#foo".
V8_INLINE static bool IsPrivateReference(Expression* expression) {
DCHECK_NOT_NULL(expression);
Property* property = expression->AsProperty();
+ if (expression->IsOptionalChain()) {
+ Expression* expr_inner = expression->AsOptionalChain()->expression();
+ property = expr_inner->AsProperty();
+ }
return property != nullptr && property->IsPrivateReference();
}
diff --git a/chromium/v8/src/parsing/parsing.cc b/chromium/v8/src/parsing/parsing.cc
index e126874d7dc..53f6cf045b7 100644
--- a/chromium/v8/src/parsing/parsing.cc
+++ b/chromium/v8/src/parsing/parsing.cc
@@ -7,6 +7,7 @@
#include <memory>
#include "src/ast/ast.h"
+#include "src/base/v8-fallthrough.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/maybe-handles.h"
#include "src/objects/objects-inl.h"
@@ -24,14 +25,13 @@ namespace {
void MaybeReportErrorsAndStatistics(ParseInfo* info, Handle<Script> script,
Isolate* isolate, Parser* parser,
- ReportErrorsAndStatisticsMode mode) {
- if (mode == ReportErrorsAndStatisticsMode::kYes) {
- if (info->literal() == nullptr) {
- info->pending_error_handler()->PrepareErrors(isolate,
- info->ast_value_factory());
- info->pending_error_handler()->ReportErrors(isolate, script);
- }
- parser->UpdateStatistics(isolate, script);
+ ReportStatisticsMode mode) {
+ switch (mode) {
+ case ReportStatisticsMode::kYes:
+ parser->UpdateStatistics(isolate, script);
+ break;
+ case ReportStatisticsMode::kNo:
+ break;
}
}
@@ -39,7 +39,7 @@ void MaybeReportErrorsAndStatistics(ParseInfo* info, Handle<Script> script,
bool ParseProgram(ParseInfo* info, Handle<Script> script,
MaybeHandle<ScopeInfo> maybe_outer_scope_info,
- Isolate* isolate, ReportErrorsAndStatisticsMode mode) {
+ Isolate* isolate, ReportStatisticsMode mode) {
DCHECK(info->flags().is_toplevel());
DCHECK_NULL(info->literal());
@@ -62,12 +62,12 @@ bool ParseProgram(ParseInfo* info, Handle<Script> script,
}
bool ParseProgram(ParseInfo* info, Handle<Script> script, Isolate* isolate,
- ReportErrorsAndStatisticsMode mode) {
+ ReportStatisticsMode mode) {
return ParseProgram(info, script, kNullMaybeHandle, isolate, mode);
}
bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate, ReportErrorsAndStatisticsMode mode) {
+ Isolate* isolate, ReportStatisticsMode mode) {
DCHECK(!info->flags().is_toplevel());
DCHECK(!shared_info.is_null());
DCHECK_NULL(info->literal());
@@ -93,7 +93,7 @@ bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
}
bool ParseAny(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate, ReportErrorsAndStatisticsMode mode) {
+ Isolate* isolate, ReportStatisticsMode mode) {
DCHECK(!shared_info.is_null());
if (info->flags().is_toplevel()) {
MaybeHandle<ScopeInfo> maybe_outer_scope_info;
diff --git a/chromium/v8/src/parsing/parsing.h b/chromium/v8/src/parsing/parsing.h
index f2350171391..f105b630d4f 100644
--- a/chromium/v8/src/parsing/parsing.h
+++ b/chromium/v8/src/parsing/parsing.h
@@ -15,36 +15,37 @@ class SharedFunctionInfo;
namespace parsing {
-enum class ReportErrorsAndStatisticsMode { kYes, kNo };
+enum class ReportStatisticsMode { kYes, kNo };
// Parses the top-level source code represented by the parse info and sets its
// function literal. Returns false (and deallocates any allocated AST nodes) if
// parsing failed.
-V8_EXPORT_PRIVATE bool ParseProgram(
- ParseInfo* info, Handle<Script> script, Isolate* isolate,
- ReportErrorsAndStatisticsMode mode = ReportErrorsAndStatisticsMode::kYes);
+V8_EXPORT_PRIVATE bool ParseProgram(ParseInfo* info, Handle<Script> script,
+ Isolate* isolate,
+ ReportStatisticsMode mode);
// Parses the top-level source code represented by the parse info and sets its
// function literal. Allows passing an |outer_scope| for programs that exist in
// another scope (e.g. eval). Returns false (and deallocates any allocated AST
// nodes) if parsing failed.
-V8_EXPORT_PRIVATE bool ParseProgram(
- ParseInfo* info, Handle<Script> script, MaybeHandle<ScopeInfo> outer_scope,
- Isolate* isolate,
- ReportErrorsAndStatisticsMode mode = ReportErrorsAndStatisticsMode::kYes);
+V8_EXPORT_PRIVATE bool ParseProgram(ParseInfo* info, Handle<Script> script,
+ MaybeHandle<ScopeInfo> outer_scope,
+ Isolate* isolate,
+ ReportStatisticsMode mode);
// Like ParseProgram but for an individual function which already has a
// allocated shared function info.
-V8_EXPORT_PRIVATE bool ParseFunction(
- ParseInfo* info, Handle<SharedFunctionInfo> shared_info, Isolate* isolate,
- ReportErrorsAndStatisticsMode mode = ReportErrorsAndStatisticsMode::kYes);
+V8_EXPORT_PRIVATE bool ParseFunction(ParseInfo* info,
+ Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate,
+ ReportStatisticsMode mode);
// If you don't know whether info->is_toplevel() is true or not, use this method
// to dispatch to either of the above functions. Prefer to use the above methods
// whenever possible.
-V8_EXPORT_PRIVATE bool ParseAny(
- ParseInfo* info, Handle<SharedFunctionInfo> shared_info, Isolate* isolate,
- ReportErrorsAndStatisticsMode mode = ReportErrorsAndStatisticsMode::kYes);
+V8_EXPORT_PRIVATE bool ParseAny(ParseInfo* info,
+ Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate, ReportStatisticsMode mode);
} // namespace parsing
} // namespace internal
diff --git a/chromium/v8/src/parsing/pending-compilation-error-handler.cc b/chromium/v8/src/parsing/pending-compilation-error-handler.cc
index f131b7ad8e6..5e0b8fec0e6 100644
--- a/chromium/v8/src/parsing/pending-compilation-error-handler.cc
+++ b/chromium/v8/src/parsing/pending-compilation-error-handler.cc
@@ -5,6 +5,7 @@
#include "src/parsing/pending-compilation-error-handler.h"
#include "src/ast/ast-value-factory.h"
+#include "src/base/export-template.h"
#include "src/base/logging.h"
#include "src/debug/debug.h"
#include "src/execution/isolate.h"
@@ -139,10 +140,13 @@ void PendingCompilationErrorHandler::PrepareErrors(
ast_value_factory->Internalize(isolate);
error_details_.Prepare(isolate);
}
-template void PendingCompilationErrorHandler::PrepareErrors(
- Isolate* isolate, AstValueFactory* ast_value_factory);
-template void PendingCompilationErrorHandler::PrepareErrors(
- OffThreadIsolate* isolate, AstValueFactory* ast_value_factory);
+template EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) void PendingCompilationErrorHandler::
+ PrepareErrors(Isolate* isolate, AstValueFactory* ast_value_factory);
+template EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) void PendingCompilationErrorHandler::
+ PrepareErrors(OffThreadIsolate* isolate,
+ AstValueFactory* ast_value_factory);
void PendingCompilationErrorHandler::ReportErrors(Isolate* isolate,
Handle<Script> script) const {
diff --git a/chromium/v8/src/parsing/pending-compilation-error-handler.h b/chromium/v8/src/parsing/pending-compilation-error-handler.h
index 4d15ac91cab..2b1e60c4e59 100644
--- a/chromium/v8/src/parsing/pending-compilation-error-handler.h
+++ b/chromium/v8/src/parsing/pending-compilation-error-handler.h
@@ -49,8 +49,10 @@ class PendingCompilationErrorHandler {
// Handle errors detected during parsing.
template <typename LocalIsolate>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void PrepareErrors(LocalIsolate* isolate, AstValueFactory* ast_value_factory);
- void ReportErrors(Isolate* isolate, Handle<Script> script) const;
+ V8_EXPORT_PRIVATE void ReportErrors(Isolate* isolate,
+ Handle<Script> script) const;
// Handle warnings detected during compilation.
template <typename LocalIsolate>
@@ -139,6 +141,15 @@ class PendingCompilationErrorHandler {
DISALLOW_COPY_AND_ASSIGN(PendingCompilationErrorHandler);
};
+extern template void PendingCompilationErrorHandler::PrepareErrors(
+ Isolate* isolate, AstValueFactory* ast_value_factory);
+extern template void PendingCompilationErrorHandler::PrepareErrors(
+ OffThreadIsolate* isolate, AstValueFactory* ast_value_factory);
+extern template void PendingCompilationErrorHandler::PrepareWarnings(
+ Isolate* isolate);
+extern template void PendingCompilationErrorHandler::PrepareWarnings(
+ OffThreadIsolate* isolate);
+
} // namespace internal
} // namespace v8
#endif // V8_PARSING_PENDING_COMPILATION_ERROR_HANDLER_H_
diff --git a/chromium/v8/src/parsing/preparse-data-impl.h b/chromium/v8/src/parsing/preparse-data-impl.h
index 707e76236d8..7a8b17bafbf 100644
--- a/chromium/v8/src/parsing/preparse-data-impl.h
+++ b/chromium/v8/src/parsing/preparse-data-impl.h
@@ -37,8 +37,6 @@ class BaseConsumedPreparseData : public ConsumedPreparseData {
public:
class ByteData : public PreparseByteDataConstants {
public:
- ByteData() {}
-
// Reading from the ByteData is only allowed when a ReadingScope is on the
// stack. This ensures that we have a DisallowHeapAllocation in place
// whenever ByteData holds a raw pointer into the heap.
diff --git a/chromium/v8/src/parsing/preparser.cc b/chromium/v8/src/parsing/preparser.cc
index f9af109d817..8b68f62c94e 100644
--- a/chromium/v8/src/parsing/preparser.cc
+++ b/chromium/v8/src/parsing/preparser.cc
@@ -325,10 +325,6 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// Parsing the body may change the language mode in our scope.
language_mode = function_scope->language_mode();
- if (is_sloppy(language_mode)) {
- function_scope->HoistSloppyBlockFunctions(nullptr);
- }
-
// Validate name and parameter names. We can do this only after parsing the
// function, since the function can declare itself strict.
CheckFunctionName(language_mode, function_name, function_name_validity,
diff --git a/chromium/v8/src/parsing/preparser.h b/chromium/v8/src/parsing/preparser.h
index 5280e3d2268..2b376d575a3 100644
--- a/chromium/v8/src/parsing/preparser.h
+++ b/chromium/v8/src/parsing/preparser.h
@@ -575,6 +575,10 @@ class PreParserFactory {
}
PreParserExpression NewOptionalChain(const PreParserExpression& expr) {
+ // Needed to track `delete a?.#b` early errors
+ if (expr.IsPrivateReference()) {
+ return PreParserExpression::PrivateReference();
+ }
return PreParserExpression::Default();
}
diff --git a/chromium/v8/src/parsing/scanner-inl.h b/chromium/v8/src/parsing/scanner-inl.h
index bd4d0284d86..b255dccc05e 100644
--- a/chromium/v8/src/parsing/scanner-inl.h
+++ b/chromium/v8/src/parsing/scanner-inl.h
@@ -305,7 +305,7 @@ V8_INLINE Token::Value Scanner::ScanIdentifierOrKeywordInner() {
// Special case for escapes at the start of an identifier.
escaped = true;
uc32 c = ScanIdentifierUnicodeEscape();
- DCHECK(!IsIdentifierStart(-1));
+ DCHECK(!IsIdentifierStart(Invalid()));
if (c == '\\' || !IsIdentifierStart(c)) {
return Token::ILLEGAL;
}
diff --git a/chromium/v8/src/parsing/scanner.cc b/chromium/v8/src/parsing/scanner.cc
index 52a1bf0724c..e27cb041020 100644
--- a/chromium/v8/src/parsing/scanner.cc
+++ b/chromium/v8/src/parsing/scanner.cc
@@ -107,6 +107,12 @@ void Scanner::Initialize() {
Scan();
}
+// static
+bool Scanner::IsInvalid(uc32 c) {
+ DCHECK(c == Invalid() || base::IsInRange(c, 0u, String::kMaxCodePoint));
+ return c == Scanner::Invalid();
+}
+
template <bool capture_raw, bool unicode>
uc32 Scanner::ScanHexNumber(int expected_length) {
DCHECK_LE(expected_length, 4); // prevent overflow
@@ -120,7 +126,7 @@ uc32 Scanner::ScanHexNumber(int expected_length) {
unicode
? MessageTemplate::kInvalidUnicodeEscapeSequence
: MessageTemplate::kInvalidHexEscapeSequence);
- return -1;
+ return Invalid();
}
x = x * 16 + d;
Advance<capture_raw>();
@@ -130,17 +136,17 @@ uc32 Scanner::ScanHexNumber(int expected_length) {
}
template <bool capture_raw>
-uc32 Scanner::ScanUnlimitedLengthHexNumber(int max_value, int beg_pos) {
+uc32 Scanner::ScanUnlimitedLengthHexNumber(uc32 max_value, int beg_pos) {
uc32 x = 0;
int d = HexValue(c0_);
- if (d < 0) return -1;
+ if (d < 0) return Invalid();
while (d >= 0) {
x = x * 16 + d;
if (x > max_value) {
ReportScannerError(Location(beg_pos, source_pos() + 1),
MessageTemplate::kUndefinedUnicodeCodePoint);
- return -1;
+ return Invalid();
}
Advance<capture_raw>();
d = HexValue(c0_);
@@ -386,7 +392,7 @@ bool Scanner::ScanEscape() {
case 't' : c = '\t'; break;
case 'u' : {
c = ScanUnicodeEscape<capture_raw>();
- if (c < 0) return false;
+ if (IsInvalid(c)) return false;
break;
}
case 'v':
@@ -394,7 +400,7 @@ bool Scanner::ScanEscape() {
break;
case 'x': {
c = ScanHexNumber<capture_raw>(2);
- if (c < 0) return false;
+ if (IsInvalid(c)) return false;
break;
}
case '0': // Fall through.
@@ -416,6 +422,7 @@ bool Scanner::ScanEscape() {
template <bool capture_raw>
uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
+ DCHECK('0' <= c && c <= '7');
uc32 x = c - '0';
int i = 0;
for (; i < length; i++) {
@@ -553,7 +560,7 @@ Token::Value Scanner::ScanTemplateSpan() {
scanner_error_state.MoveErrorTo(next_);
octal_error_state.MoveErrorTo(next_);
}
- } else if (c < 0) {
+ } else if (c == kEndOfInput) {
// Unterminated template literal
break;
} else {
@@ -861,7 +868,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
uc32 Scanner::ScanIdentifierUnicodeEscape() {
Advance();
- if (c0_ != 'u') return -1;
+ if (c0_ != 'u') return Invalid();
Advance();
return ScanUnicodeEscape<false>();
}
@@ -873,11 +880,12 @@ uc32 Scanner::ScanUnicodeEscape() {
if (c0_ == '{') {
int begin = source_pos() - 2;
Advance<capture_raw>();
- uc32 cp = ScanUnlimitedLengthHexNumber<capture_raw>(0x10FFFF, begin);
- if (cp < 0 || c0_ != '}') {
+ uc32 cp =
+ ScanUnlimitedLengthHexNumber<capture_raw>(String::kMaxCodePoint, begin);
+ if (cp == kInvalidSequence || c0_ != '}') {
ReportScannerError(source_pos(),
MessageTemplate::kInvalidUnicodeEscapeSequence);
- return -1;
+ return Invalid();
}
Advance<capture_raw>();
return cp;
@@ -895,7 +903,7 @@ Token::Value Scanner::ScanIdentifierOrKeywordInnerSlow(bool escaped,
// Only allow legal identifier part characters.
// TODO(verwaest): Make this true.
// DCHECK(!IsIdentifierPart('\'));
- DCHECK(!IsIdentifierPart(-1));
+ DCHECK(!IsIdentifierPart(Invalid()));
if (c == '\\' || !IsIdentifierPart(c)) {
return Token::ILLEGAL;
}
@@ -986,8 +994,9 @@ Maybe<int> Scanner::ScanRegExpFlags() {
// Scan regular expression flags.
JSRegExp::Flags flags;
while (IsIdentifierPart(c0_)) {
- JSRegExp::Flags flag = JSRegExp::FlagFromChar(c0_);
- if (flag == JSRegExp::kInvalid) return Nothing<int>();
+ base::Optional<JSRegExp::Flags> maybe_flag = JSRegExp::FlagFromChar(c0_);
+ if (!maybe_flag.has_value()) return Nothing<int>();
+ JSRegExp::Flags flag = *maybe_flag;
if (flags & flag) return Nothing<int>();
Advance();
flags |= flag;
diff --git a/chromium/v8/src/parsing/scanner.h b/chromium/v8/src/parsing/scanner.h
index 830067e1ad5..6ac7dde01b6 100644
--- a/chromium/v8/src/parsing/scanner.h
+++ b/chromium/v8/src/parsing/scanner.h
@@ -39,7 +39,7 @@ class Zone;
// or one part of a surrogate pair that make a single 21 bit code point.
class Utf16CharacterStream {
public:
- static const uc32 kEndOfInput = -1;
+ static constexpr uc32 kEndOfInput = static_cast<uc32>(-1);
virtual ~Utf16CharacterStream() = default;
@@ -267,8 +267,11 @@ class V8_EXPORT_PRIVATE Scanner {
};
// -1 is outside of the range of any real source code.
- static const int kNoOctalLocation = -1;
- static const uc32 kEndOfInput = Utf16CharacterStream::kEndOfInput;
+ static constexpr uc32 kEndOfInput = Utf16CharacterStream::kEndOfInput;
+ static constexpr uc32 kInvalidSequence = static_cast<uc32>(-1);
+
+ static constexpr uc32 Invalid() { return Scanner::kInvalidSequence; }
+ static bool IsInvalid(uc32 c);
explicit Scanner(Utf16CharacterStream* source, UnoptimizedCompileFlags flags);
@@ -541,7 +544,8 @@ class V8_EXPORT_PRIVATE Scanner {
}
void PushBack(uc32 ch) {
- DCHECK_LE(c0_, static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode));
+ DCHECK(IsInvalid(c0_) ||
+ base::IsInRange(c0_, 0u, unibrow::Utf16::kMaxNonSurrogateCharCode));
source_->Back();
c0_ = ch;
}
@@ -623,7 +627,7 @@ class V8_EXPORT_PRIVATE Scanner {
// number can be 000000001, so it's very long in characters but its value is
// small.
template <bool capture_raw>
- uc32 ScanUnlimitedLengthHexNumber(int max_value, int beg_pos);
+ uc32 ScanUnlimitedLengthHexNumber(uc32 max_value, int beg_pos);
// Scans a single JavaScript token.
V8_INLINE Token::Value ScanSingleToken();
diff --git a/chromium/v8/src/parsing/token.h b/chromium/v8/src/parsing/token.h
index ef92238de2a..dabbff0e0e7 100644
--- a/chromium/v8/src/parsing/token.h
+++ b/chromium/v8/src/parsing/token.h
@@ -284,6 +284,10 @@ class V8_EXPORT_PRIVATE Token {
return base::IsInRange(token, INIT, ASSIGN_SUB);
}
+ static bool IsLogicalAssignmentOp(Value token) {
+ return base::IsInRange(token, ASSIGN_NULLISH, ASSIGN_AND);
+ }
+
static bool IsBinaryOp(Value op) { return base::IsInRange(op, COMMA, SUB); }
static bool IsCompareOp(Value op) { return base::IsInRange(op, EQ, IN); }
diff --git a/chromium/v8/src/profiler/cpu-profiler.cc b/chromium/v8/src/profiler/cpu-profiler.cc
index 5f22a3d2fb4..71130f65ed6 100644
--- a/chromium/v8/src/profiler/cpu-profiler.cc
+++ b/chromium/v8/src/profiler/cpu-profiler.cc
@@ -208,7 +208,7 @@ SamplingEventsProcessor::ProcessOneSample() {
(record1.order == last_processed_code_event_id_)) {
TickSampleEventRecord record;
ticks_from_vm_buffer_.Dequeue(&record);
- generator_->RecordTickSample(record.sample);
+ generator_->SymbolizeTickSample(record.sample);
return OneSampleProcessed;
}
@@ -220,7 +220,7 @@ SamplingEventsProcessor::ProcessOneSample() {
if (record->order != last_processed_code_event_id_) {
return FoundSampleForNextCodeEvent;
}
- generator_->RecordTickSample(record->sample);
+ generator_->SymbolizeTickSample(record->sample);
ticks_buffer_.Remove();
return OneSampleProcessed;
}
diff --git a/chromium/v8/src/profiler/cpu-profiler.h b/chromium/v8/src/profiler/cpu-profiler.h
index e3ff5bb734d..65e6d13d44b 100644
--- a/chromium/v8/src/profiler/cpu-profiler.h
+++ b/chromium/v8/src/profiler/cpu-profiler.h
@@ -156,7 +156,7 @@ class ProfilerCodeObserver;
class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
public CodeEventObserver {
public:
- virtual ~ProfilerEventsProcessor();
+ ~ProfilerEventsProcessor() override;
void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
diff --git a/chromium/v8/src/profiler/heap-snapshot-generator.cc b/chromium/v8/src/profiler/heap-snapshot-generator.cc
index 16e87e43c77..2fa4f2e5e84 100644
--- a/chromium/v8/src/profiler/heap-snapshot-generator.cc
+++ b/chromium/v8/src/profiler/heap-snapshot-generator.cc
@@ -11,6 +11,7 @@
#include "src/debug/debug.h"
#include "src/handles/global-handles.h"
#include "src/heap/combined-heap.h"
+#include "src/heap/safepoint.h"
#include "src/numbers/conversions.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks.h"
@@ -1129,7 +1130,7 @@ void V8HeapExplorer::ExtractScriptReferences(HeapEntry* entry, Script script) {
SetInternalReference(entry, "source", script.source(), Script::kSourceOffset);
SetInternalReference(entry, "name", script.name(), Script::kNameOffset);
SetInternalReference(entry, "context_data", script.context_data(),
- Script::kContextOffset);
+ Script::kContextDataOffset);
TagObject(script.line_ends(), "(script line ends)");
SetInternalReference(entry, "line_ends", script.line_ends(),
Script::kLineEndsOffset);
@@ -2037,6 +2038,7 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
GarbageCollectionReason::kHeapProfiler);
NullContextForSnapshotScope null_context_scope(Isolate::FromHeap(heap_));
+ SafepointScope scope(heap_);
#ifdef VERIFY_HEAP
Heap* debug_heap = heap_;
diff --git a/chromium/v8/src/profiler/profile-generator.cc b/chromium/v8/src/profiler/profile-generator.cc
index 42ff71c2bb1..b38a67771ba 100644
--- a/chromium/v8/src/profiler/profile-generator.cc
+++ b/chromium/v8/src/profiler/profile-generator.cc
@@ -872,7 +872,7 @@ ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles,
CodeMap* code_map)
: profiles_(profiles), code_map_(code_map) {}
-void ProfileGenerator::RecordTickSample(const TickSample& sample) {
+void ProfileGenerator::SymbolizeTickSample(const TickSample& sample) {
ProfileStackTrace stack_trace;
// Conservatively reserve space for stack frames + pc + function + vm-state.
// There could in fact be more of them because of inlined entries.
diff --git a/chromium/v8/src/profiler/profile-generator.h b/chromium/v8/src/profiler/profile-generator.h
index e71a0abaead..1f9d5370ae7 100644
--- a/chromium/v8/src/profiler/profile-generator.h
+++ b/chromium/v8/src/profiler/profile-generator.h
@@ -520,7 +520,10 @@ class V8_EXPORT_PRIVATE ProfileGenerator {
public:
explicit ProfileGenerator(CpuProfilesCollection* profiles, CodeMap* code_map);
- void RecordTickSample(const TickSample& sample);
+ // Use the CodeMap to turn the raw addresses recorded in the sample into
+ // code/function names. The symbolized stack is added to the relevant
+ // profiles in the CpuProfilesCollection.
+ void SymbolizeTickSample(const TickSample& sample);
void UpdateNativeContextAddress(Address from, Address to);
diff --git a/chromium/v8/src/profiler/tick-sample.cc b/chromium/v8/src/profiler/tick-sample.cc
index 00bff91cd0a..1de13445dea 100644
--- a/chromium/v8/src/profiler/tick-sample.cc
+++ b/chromium/v8/src/profiler/tick-sample.cc
@@ -10,7 +10,7 @@
#include "src/execution/frames-inl.h"
#include "src/execution/simulator.h"
#include "src/execution/vm-state-inl.h"
-#include "src/heap/heap-inl.h" // For MemoryAllocator::code_range.
+#include "src/heap/heap-inl.h" // For Heap::code_range.
#include "src/logging/counters.h"
#include "src/sanitizer/asan.h"
#include "src/sanitizer/msan.h"
@@ -337,7 +337,10 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
continue;
}
}
- frames[i++] = reinterpret_cast<void*>(it.frame()->pc());
+ // For arm64, the PC for the frame sometimes doesn't come from the stack,
+ // but from the link register instead. For this reason, we skip
+ // authenticating it.
+ frames[i++] = reinterpret_cast<void*>(it.frame()->unauthenticated_pc());
}
sample_info->frames_count = i;
return true;
diff --git a/chromium/v8/src/profiler/tracing-cpu-profiler.cc b/chromium/v8/src/profiler/tracing-cpu-profiler.cc
index afed9ca73b1..d18ae09fb14 100644
--- a/chromium/v8/src/profiler/tracing-cpu-profiler.cc
+++ b/chromium/v8/src/profiler/tracing-cpu-profiler.cc
@@ -50,10 +50,7 @@ void TracingCpuProfilerImpl::OnTraceDisabled() {
void TracingCpuProfilerImpl::StartProfiling() {
base::MutexGuard lock(&mutex_);
if (!profiling_enabled_ || profiler_) return;
- bool enabled;
- TRACE_EVENT_CATEGORY_GROUP_ENABLED(
- TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler.hires"), &enabled);
- int sampling_interval_us = enabled ? 100 : 1000;
+ int sampling_interval_us = 100;
profiler_.reset(new CpuProfiler(isolate_, kDebugNaming));
profiler_->set_sampling_interval(
base::TimeDelta::FromMicroseconds(sampling_interval_us));
diff --git a/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 10dad83c28c..aaee9b196c6 100644
--- a/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -224,7 +224,7 @@ void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
}
void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
__ ldr(r0, register_location(start_reg)); // Index of start of capture
__ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
@@ -335,7 +335,10 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
{
AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ unicode ? ExternalReference::re_case_insensitive_compare_unicode(
+ isolate())
+ : ExternalReference::re_case_insensitive_compare_non_unicode(
+ isolate());
__ CallCFunction(function, argument_count);
}
diff --git a/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 549636a6744..910e5c46079 100644
--- a/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -37,7 +37,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
diff --git a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 055f5639f5b..b56a8ac709c 100644
--- a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -294,7 +294,7 @@ void RegExpMacroAssemblerARM64::CheckGreedyLoop(Label* on_equal) {
}
void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
Register capture_start_offset = w10;
@@ -425,7 +425,10 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
{
AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ unicode ? ExternalReference::re_case_insensitive_compare_unicode(
+ isolate())
+ : ExternalReference::re_case_insensitive_compare_non_unicode(
+ isolate());
__ CallCFunction(function, argument_count);
}
diff --git a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index 2b5feb1dbdc..aeb49aa9fff 100644
--- a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -42,7 +42,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerARM64
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
diff --git a/chromium/v8/src/regexp/gen-regexp-special-case.cc b/chromium/v8/src/regexp/gen-regexp-special-case.cc
index 9606c5d70d9..9ed338fc1d8 100644
--- a/chromium/v8/src/regexp/gen-regexp-special-case.cc
+++ b/chromium/v8/src/regexp/gen-regexp-special-case.cc
@@ -55,8 +55,9 @@ void PrintSpecial(std::ofstream& out) {
CHECK(U_SUCCESS(status));
// Iterate through all chars in BMP except surrogates.
- for (UChar32 i = 0; i < kNonBmpStart; i++) {
- if (i >= kSurrogateStart && i <= kSurrogateEnd) {
+ for (UChar32 i = 0; i < static_cast<UChar32>(kNonBmpStart); i++) {
+ if (i >= static_cast<UChar32>(kSurrogateStart) &&
+ i <= static_cast<UChar32>(kSurrogateEnd)) {
continue; // Ignore surrogate range
}
current.set(i, i);
diff --git a/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 501a0aff604..f439ae7de07 100644
--- a/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -206,7 +206,7 @@ void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
}
void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
__ mov(edx, register_location(start_reg)); // Index of start of capture
__ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
@@ -336,7 +336,10 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
{
AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ unicode ? ExternalReference::re_case_insensitive_compare_unicode(
+ isolate())
+ : ExternalReference::re_case_insensitive_compare_non_unicode(
+ isolate());
__ CallCFunction(compare, argument_count);
}
// Pop original values before reacting on result value.
diff --git a/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index 2339ca57e15..a30bff29a15 100644
--- a/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -37,7 +37,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIA32
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 5f8eb4c6d33..a6289254457 100644
--- a/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -226,7 +226,7 @@ void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
}
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
__ lw(a0, register_location(start_reg)); // Index of start of capture.
__ lw(a1, register_location(start_reg + 1)); // Index of end of capture.
@@ -340,7 +340,10 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
{
AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ unicode ? ExternalReference::re_case_insensitive_compare_unicode(
+ isolate())
+ : ExternalReference::re_case_insensitive_compare_non_unicode(
+ isolate());
__ CallCFunction(function, argument_count);
}
diff --git a/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index cafa7851803..e2aea1b0910 100644
--- a/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -37,7 +37,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index c443c8da467..e79038b00b7 100644
--- a/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -262,7 +262,7 @@ void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
}
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
__ Ld(a0, register_location(start_reg)); // Index of start of capture.
__ Ld(a1, register_location(start_reg + 1)); // Index of end of capture.
@@ -376,7 +376,10 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
{
AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ unicode ? ExternalReference::re_case_insensitive_compare_unicode(
+ isolate())
+ : ExternalReference::re_case_insensitive_compare_non_unicode(
+ isolate());
__ CallCFunction(function, argument_count);
}
diff --git a/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index 161a01e2fca..aebfec10604 100644
--- a/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -37,7 +37,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 5a6eb315103..9db26777d31 100644
--- a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -242,7 +242,7 @@ void RegExpMacroAssemblerPPC::CheckGreedyLoop(Label* on_equal) {
}
void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
__ LoadP(r3, register_location(start_reg), r0); // Index of start of capture
__ LoadP(r4, register_location(start_reg + 1), r0); // Index of end
@@ -356,7 +356,10 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
{
AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ unicode ? ExternalReference::re_case_insensitive_compare_unicode(
+ isolate())
+ : ExternalReference::re_case_insensitive_compare_non_unicode(
+ isolate());
__ CallCFunction(function, argument_count);
}
diff --git a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 598691d9883..f6b959837fc 100644
--- a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -36,7 +36,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerPPC
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
diff --git a/chromium/v8/src/regexp/regexp-ast.h b/chromium/v8/src/regexp/regexp-ast.h
index a9106d3d304..643e1fc983f 100644
--- a/chromium/v8/src/regexp/regexp-ast.h
+++ b/chromium/v8/src/regexp/regexp-ast.h
@@ -76,9 +76,8 @@ class Interval {
int to_;
};
-
-// Represents code units in the range from from_ to to_, both ends are
-// inclusive.
+// Represents code points (with values up to 0x10FFFF) in the range from from_
+// to to_, both ends are inclusive.
class CharacterRange {
public:
CharacterRange() : from_(0), to_(0) {}
diff --git a/chromium/v8/src/regexp/regexp-bytecode-generator.cc b/chromium/v8/src/regexp/regexp-bytecode-generator.cc
index e82b67b530a..8abd15384e7 100644
--- a/chromium/v8/src/regexp/regexp-bytecode-generator.cc
+++ b/chromium/v8/src/regexp/regexp-bytecode-generator.cc
@@ -182,7 +182,7 @@ void RegExpBytecodeGenerator::LoadCurrentCharacterImpl(int cp_offset,
int eats_at_least) {
DCHECK_GE(eats_at_least, characters);
if (eats_at_least > characters && check_bounds) {
- DCHECK(is_uint24(cp_offset + eats_at_least));
+ DCHECK(is_int24(cp_offset + eats_at_least));
Emit(BC_CHECK_CURRENT_POSITION, cp_offset + eats_at_least);
EmitOrLink(on_failure);
check_bounds = false; // Load below doesn't need to check.
@@ -329,11 +329,13 @@ void RegExpBytecodeGenerator::CheckNotBackReference(int start_reg,
}
void RegExpBytecodeGenerator::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_not_equal) {
+ int start_reg, bool read_backward, bool unicode, Label* on_not_equal) {
DCHECK_LE(0, start_reg);
DCHECK_GE(kMaxRegister, start_reg);
- Emit(read_backward ? BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD
- : BC_CHECK_NOT_BACK_REF_NO_CASE,
+ Emit(read_backward ? (unicode ? BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD
+ : BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD)
+ : (unicode ? BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE
+ : BC_CHECK_NOT_BACK_REF_NO_CASE),
start_reg);
EmitOrLink(on_not_equal);
}
diff --git a/chromium/v8/src/regexp/regexp-bytecode-generator.h b/chromium/v8/src/regexp/regexp-bytecode-generator.h
index fdb9b468619..9c4b6057c23 100644
--- a/chromium/v8/src/regexp/regexp-bytecode-generator.h
+++ b/chromium/v8/src/regexp/regexp-bytecode-generator.h
@@ -69,6 +69,7 @@ class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler {
void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match) override;
void CheckNotBackReferenceIgnoreCase(int start_reg, bool read_backward,
+ bool unicode,
Label* on_no_match) override;
void IfRegisterLT(int register_index, int comparand, Label* if_lt) override;
void IfRegisterGE(int register_index, int comparand, Label* if_ge) override;
diff --git a/chromium/v8/src/regexp/regexp-bytecode-peephole.cc b/chromium/v8/src/regexp/regexp-bytecode-peephole.cc
index f0957f0779a..dcbafac334f 100644
--- a/chromium/v8/src/regexp/regexp-bytecode-peephole.cc
+++ b/chromium/v8/src/regexp/regexp-bytecode-peephole.cc
@@ -187,7 +187,8 @@ class RegExpBytecodePeephole {
BytecodeSequenceNode& CreateSequence(int bytecode);
// Checks for optimization candidates at pc and emits optimized bytecode to
// the internal buffer. Returns the length of replaced bytecodes in bytes.
- int TryOptimizeSequence(const byte* bytecode, int start_pc);
+ int TryOptimizeSequence(const byte* bytecode, int bytecode_length,
+ int start_pc);
// Emits optimized bytecode to the internal buffer. start_pc points to the
// start of the sequence in bytecode and last_node is the last
// BytecodeSequenceNode of the matching sequence found.
@@ -626,7 +627,7 @@ bool RegExpBytecodePeephole::OptimizeBytecode(const byte* bytecode,
bool did_optimize = false;
while (old_pc < length) {
- int replaced_len = TryOptimizeSequence(bytecode, old_pc);
+ int replaced_len = TryOptimizeSequence(bytecode, length, old_pc);
if (replaced_len > 0) {
old_pc += replaced_len;
did_optimize = true;
@@ -659,6 +660,7 @@ BytecodeSequenceNode& RegExpBytecodePeephole::CreateSequence(int bytecode) {
}
int RegExpBytecodePeephole::TryOptimizeSequence(const byte* bytecode,
+ int bytecode_length,
int start_pc) {
BytecodeSequenceNode* seq_node = sequences_;
BytecodeSequenceNode* valid_seq_end = nullptr;
@@ -667,13 +669,12 @@ int RegExpBytecodePeephole::TryOptimizeSequence(const byte* bytecode,
// Check for the longest valid sequence matching any of the pre-defined
// sequences in the Trie data structure.
- while ((seq_node = seq_node->Find(bytecode[current_pc]))) {
- if (!seq_node->CheckArguments(bytecode, start_pc)) {
- break;
- }
- if (seq_node->IsSequence()) {
- valid_seq_end = seq_node;
- }
+ while (current_pc < bytecode_length) {
+ seq_node = seq_node->Find(bytecode[current_pc]);
+ if (seq_node == nullptr) break;
+ if (!seq_node->CheckArguments(bytecode, start_pc)) break;
+
+ if (seq_node->IsSequence()) valid_seq_end = seq_node;
current_pc += RegExpBytecodeLength(bytecode[current_pc]);
}
diff --git a/chromium/v8/src/regexp/regexp-bytecodes.h b/chromium/v8/src/regexp/regexp-bytecodes.h
index 1664a476d29..e3248d7b837 100644
--- a/chromium/v8/src/regexp/regexp-bytecodes.h
+++ b/chromium/v8/src/regexp/regexp-bytecodes.h
@@ -5,6 +5,7 @@
#ifndef V8_REGEXP_REGEXP_BYTECODES_H_
#define V8_REGEXP_REGEXP_BYTECODES_H_
+#include "src/base/bounds.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
@@ -27,6 +28,7 @@ STATIC_ASSERT(1 << BYTECODE_SHIFT > BYTECODE_MASK);
// TODO(pthier): Argument offsets of bytecodes should be easily accessible by
// name or at least by position.
+// TODO(jgruber): More precise types (e.g. int32/uint32 instead of value32).
#define BYTECODE_ITERATOR(V) \
V(BREAK, 0, 4) /* bc8 */ \
V(PUSH_CP, 1, 4) /* bc8 pad24 */ \
@@ -101,12 +103,12 @@ STATIC_ASSERT(1 << BYTECODE_SHIFT > BYTECODE_MASK);
V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128 */ \
V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32 */ \
V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32 */ \
- V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \
- V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */ \
- V(CHECK_NOT_BACK_REF_NO_CASE_UNICODE, 39, 8) /* UNUSED */ \
+ V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \
+ V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */ \
+ V(CHECK_NOT_BACK_REF_NO_CASE_UNICODE, 39, 8) \
V(CHECK_NOT_BACK_REF_BACKWARD, 40, 8) /* bc8 reg_idx24 addr32 */ \
V(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD, 41, 8) /* bc8 reg_idx24 addr32 */ \
- V(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD, 42, 8) /* UNUSED */ \
+ V(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD, 42, 8) \
V(CHECK_NOT_REGS_EQUAL, 43, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
V(CHECK_REGISTER_LT, 44, 12) /* bc8 reg_idx24 value32 addr32 */ \
V(CHECK_REGISTER_GE, 45, 12) /* bc8 reg_idx24 value32 addr32 */ \
@@ -229,16 +231,18 @@ static constexpr int kRegExpBytecodeLengths[] = {
};
inline constexpr int RegExpBytecodeLength(int bytecode) {
+ CONSTEXPR_DCHECK(base::IsInRange(bytecode, 0, kRegExpBytecodeCount - 1));
return kRegExpBytecodeLengths[bytecode];
}
-static const char* const kRegExpBytecodeNames[] = {
+static constexpr const char* const kRegExpBytecodeNames[] = {
#define DECLARE_BYTECODE_NAME(name, ...) #name,
BYTECODE_ITERATOR(DECLARE_BYTECODE_NAME)
#undef DECLARE_BYTECODE_NAME
};
-inline const char* RegExpBytecodeName(int bytecode) {
+inline constexpr const char* RegExpBytecodeName(int bytecode) {
+ CONSTEXPR_DCHECK(base::IsInRange(bytecode, 0, kRegExpBytecodeCount - 1));
return kRegExpBytecodeNames[bytecode];
}
diff --git a/chromium/v8/src/regexp/regexp-compiler-tonode.cc b/chromium/v8/src/regexp/regexp-compiler-tonode.cc
index 9496de83e10..5fd53390797 100644
--- a/chromium/v8/src/regexp/regexp-compiler-tonode.cc
+++ b/chromium/v8/src/regexp/regexp-compiler-tonode.cc
@@ -56,11 +56,11 @@ static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
return false;
}
for (int i = 0; i < length; i += 2) {
- if (special_class[i] != (range.to() + 1)) {
+ if (static_cast<uc32>(special_class[i]) != (range.to() + 1)) {
return false;
}
range = ranges->at((i >> 1) + 1);
- if (special_class[i + 1] != range.from()) {
+ if (static_cast<uc32>(special_class[i + 1]) != range.from()) {
return false;
}
}
@@ -79,8 +79,8 @@ static bool CompareRanges(ZoneList<CharacterRange>* ranges,
}
for (int i = 0; i < length; i += 2) {
CharacterRange range = ranges->at(i >> 1);
- if (range.from() != special_class[i] ||
- range.to() != special_class[i + 1] - 1) {
+ if (range.from() != static_cast<uc32>(special_class[i]) ||
+ range.to() != static_cast<uc32>(special_class[i + 1] - 1)) {
return false;
}
}
@@ -1154,7 +1154,7 @@ void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
CharacterRange range = ranges->at(i);
uc32 from = range.from();
if (from > String::kMaxUtf16CodeUnit) continue;
- uc32 to = Min(range.to(), String::kMaxUtf16CodeUnit);
+ uc32 to = Min(range.to(), String::kMaxUtf16CodeUnitU);
// Nothing to be done for surrogates.
if (from >= kLeadSurrogateStart && to <= kTrailSurrogateEnd) continue;
if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
@@ -1197,7 +1197,7 @@ void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
CharacterRange range = ranges->at(i);
uc32 bottom = range.from();
if (bottom > String::kMaxUtf16CodeUnit) continue;
- uc32 top = Min(range.to(), String::kMaxUtf16CodeUnit);
+ uc32 top = Min(range.to(), String::kMaxUtf16CodeUnitU);
// Nothing to be done for surrogates.
if (bottom >= kLeadSurrogateStart && top <= kTrailSurrogateEnd) continue;
if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
@@ -1232,7 +1232,7 @@ void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
// block we do this for all the blocks covered by the range (handling
// characters that is not in a block as a "singleton block").
unibrow::uchar equivalents[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int pos = bottom;
+ uc32 pos = bottom;
while (pos <= top) {
int length =
isolate->jsregexp_canonrange()->get(pos, '\0', equivalents);
@@ -1265,7 +1265,7 @@ bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
DCHECK_NOT_NULL(ranges);
int n = ranges->length();
if (n <= 1) return true;
- int max = ranges->at(0).to();
+ uc32 max = ranges->at(0).to();
for (int i = 1; i < n; i++) {
CharacterRange next_range = ranges->at(i);
if (next_range.from() <= max + 1) return false;
@@ -1366,7 +1366,7 @@ void CharacterRange::Canonicalize(ZoneList<CharacterRange>* character_ranges) {
// Check whether ranges are already canonical (increasing, non-overlapping,
// non-adjacent).
int n = character_ranges->length();
- int max = character_ranges->at(0).to();
+ uc32 max = character_ranges->at(0).to();
int i = 1;
while (i < n) {
CharacterRange current = character_ranges->at(i);
diff --git a/chromium/v8/src/regexp/regexp-compiler.cc b/chromium/v8/src/regexp/regexp-compiler.cc
index a04180fd346..58d598ca768 100644
--- a/chromium/v8/src/regexp/regexp-compiler.cc
+++ b/chromium/v8/src/regexp/regexp-compiler.cc
@@ -174,6 +174,24 @@ using namespace regexp_compiler_constants; // NOLINT(build/namespaces)
// trace is not recorded in the node and so it cannot currently be reused in
// the event that code generation is requested for an identical trace.
+namespace {
+
+constexpr uc32 MaxCodeUnit(const bool one_byte) {
+ STATIC_ASSERT(String::kMaxOneByteCharCodeU <=
+ std::numeric_limits<uint16_t>::max());
+ STATIC_ASSERT(String::kMaxUtf16CodeUnitU <=
+ std::numeric_limits<uint16_t>::max());
+ return one_byte ? String::kMaxOneByteCharCodeU : String::kMaxUtf16CodeUnitU;
+}
+
+constexpr uint32_t CharMask(const bool one_byte) {
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(String::kMaxOneByteCharCodeU + 1));
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(String::kMaxUtf16CodeUnitU + 1));
+ return MaxCodeUnit(one_byte);
+}
+
+} // namespace
+
void RegExpTree::AppendToText(RegExpText* text, Zone* zone) { UNREACHABLE(); }
void RegExpAtom::AppendToText(RegExpText* text, Zone* zone) {
@@ -386,9 +404,7 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
int pushes = 0;
for (int reg = 0; reg <= max_register; reg++) {
- if (!affected_registers.Get(reg)) {
- continue;
- }
+ if (!affected_registers.Get(reg)) continue;
// The chronologically first deferred action in the trace
// is used to infer the action needed to restore a register
@@ -710,6 +726,20 @@ void ChoiceNode::GenerateGuard(RegExpMacroAssembler* macro_assembler,
}
}
+namespace {
+
+#ifdef DEBUG
+bool ContainsOnlyUtf16CodeUnits(unibrow::uchar* chars, int length) {
+ STATIC_ASSERT(sizeof(unibrow::uchar) == 4);
+ for (int i = 0; i < length; i++) {
+ if (chars[i] > String::kMaxUtf16CodeUnit) return false;
+ }
+ return true;
+}
+#endif // DEBUG
+
+} // namespace
+
// Returns the number of characters in the equivalence class, omitting those
// that cannot occur in the source string because it is Latin1.
static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
@@ -719,6 +749,7 @@ static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
#ifdef V8_INTL_SUPPORT
if (RegExpCaseFolding::IgnoreSet().contains(character)) {
letters[0] = character;
+ DCHECK(ContainsOnlyUtf16CodeUnits(letters, 1));
return 1;
}
bool in_special_add_set =
@@ -744,9 +775,10 @@ static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
if (in_special_add_set && RegExpCaseFolding::Canonicalize(cu) != canon) {
continue;
}
- letters[items++] = (unibrow::uchar)(cu);
+ letters[items++] = static_cast<unibrow::uchar>(cu);
}
}
+ DCHECK(ContainsOnlyUtf16CodeUnits(letters, items));
return items;
#else
int length =
@@ -768,6 +800,7 @@ static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
length = new_length;
}
+ DCHECK(ContainsOnlyUtf16CodeUnits(letters, length));
return length;
#endif // V8_INTL_SUPPORT
}
@@ -820,12 +853,7 @@ static inline bool EmitAtomNonLetter(Isolate* isolate, RegExpCompiler* compiler,
static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
bool one_byte, uc16 c1, uc16 c2,
Label* on_failure) {
- uc16 char_mask;
- if (one_byte) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
+ const uint32_t char_mask = CharMask(one_byte);
uc16 exor = c1 ^ c2;
// Check whether exor has only one bit set.
if (((exor - 1) & exor) == 0) {
@@ -1126,7 +1154,7 @@ static void GenerateBranches(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
return;
}
- if ((min_char >> kBits) != (first >> kBits)) {
+ if ((min_char >> kBits) != static_cast<uc32>(first >> kBits)) {
masm->CheckCharacterLT(first, odd_label);
GenerateBranches(masm, ranges, start_index + 1, end_index, first, max_char,
fall_through, odd_label, even_label);
@@ -1185,21 +1213,13 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
ZoneList<CharacterRange>* ranges = cc->ranges(zone);
CharacterRange::Canonicalize(ranges);
- int max_char;
- if (one_byte) {
- max_char = String::kMaxOneByteCharCode;
- } else {
- max_char = String::kMaxUtf16CodeUnit;
- }
-
+ const uc32 max_char = MaxCodeUnit(one_byte);
int range_count = ranges->length();
int last_valid_range = range_count - 1;
while (last_valid_range >= 0) {
CharacterRange& range = ranges->at(last_valid_range);
- if (range.from() <= max_char) {
- break;
- }
+ if (range.from() <= max_char) break;
last_valid_range--;
}
@@ -1240,6 +1260,7 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
// entry at zero which goes to the failure label, but if there
// was already one there we fall through for success on that entry.
// Subsequent entries have alternating meaning (success/failure).
+ // TODO(jgruber,v8:10568): Change `range_boundaries` to a ZoneList<uc32>.
ZoneList<int>* range_boundaries =
new (zone) ZoneList<int>(last_valid_range, zone);
@@ -1256,7 +1277,7 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
range_boundaries->Add(range.to() + 1, zone);
}
int end_index = range_boundaries->length() - 1;
- if (range_boundaries->at(end_index) > max_char) {
+ if (static_cast<uc32>(range_boundaries->at(end_index)) > max_char) {
end_index--;
}
@@ -1370,12 +1391,7 @@ static inline uint32_t SmearBitsRight(uint32_t v) {
bool QuickCheckDetails::Rationalize(bool asc) {
bool found_useful_op = false;
- uint32_t char_mask;
- if (asc) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
+ const uint32_t char_mask = CharMask(asc);
mask_ = 0;
value_ = 0;
int char_shift = 0;
@@ -1495,12 +1511,7 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
if (details->characters() == 1) {
// If number of characters preloaded is 1 then we used a byte or 16 bit
// load so the value is already masked down.
- uint32_t char_mask;
- if (compiler->one_byte()) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
+ const uint32_t char_mask = CharMask(compiler->one_byte());
if ((mask & char_mask) == char_mask) need_mask = false;
mask &= char_mask;
} else {
@@ -1551,12 +1562,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
Isolate* isolate = compiler->macro_assembler()->isolate();
DCHECK(characters_filled_in < details->characters());
int characters = details->characters();
- int char_mask;
- if (compiler->one_byte()) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
+ const uint32_t char_mask = CharMask(compiler->one_byte());
for (int k = 0; k < elements()->length(); k++) {
TextElement elm = elements()->at(k);
if (elm.text_type() == TextElement::ATOM) {
@@ -1645,26 +1651,22 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
}
}
CharacterRange range = ranges->at(first_range);
- uc16 from = range.from();
- uc16 to = range.to();
- if (to > char_mask) {
- to = char_mask;
- }
- uint32_t differing_bits = (from ^ to);
+ const uc32 first_from = range.from();
+ const uc32 first_to = (range.to() > char_mask) ? char_mask : range.to();
+ const uint32_t differing_bits = (first_from ^ first_to);
// A mask and compare is only perfect if the differing bits form a
// number like 00011111 with one single block of trailing 1s.
if ((differing_bits & (differing_bits + 1)) == 0 &&
- from + differing_bits == to) {
+ first_from + differing_bits == first_to) {
pos->determines_perfectly = true;
}
uint32_t common_bits = ~SmearBitsRight(differing_bits);
- uint32_t bits = (from & common_bits);
+ uint32_t bits = (first_from & common_bits);
for (int i = first_range + 1; i < ranges->length(); i++) {
CharacterRange range = ranges->at(i);
- uc16 from = range.from();
- uc16 to = range.to();
+ const uc32 from = range.from();
if (from > char_mask) continue;
- if (to > char_mask) to = char_mask;
+ const uc32 to = (range.to() > char_mask) ? char_mask : range.to();
// Here we are combining more ranges into the mask and compare
// value. With each new range the mask becomes more sparse and
// so the chances of a false positive rise. A character class
@@ -1684,9 +1686,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
}
characters_filled_in++;
DCHECK(characters_filled_in <= details->characters());
- if (characters_filled_in == details->characters()) {
- return;
- }
+ if (characters_filled_in == details->characters()) return;
}
}
DCHECK(characters_filled_in != details->characters());
@@ -1748,7 +1748,7 @@ void QuickCheckDetails::Merge(QuickCheckDetails* other, int from_index) {
pos->mask &= other_pos->mask;
pos->value &= pos->mask;
other_pos->value &= pos->mask;
- uc16 differing_bits = (pos->value ^ other_pos->value);
+ uint32_t differing_bits = (pos->value ^ other_pos->value);
pos->mask &= ~differing_bits;
pos->value &= pos->mask;
}
@@ -1858,16 +1858,20 @@ RegExpNode* TextNode::FilterOneByte(int depth) {
if (range_count != 0 && ranges->at(0).from() == 0 &&
ranges->at(0).to() >= String::kMaxOneByteCharCode) {
// This will be handled in a later filter.
- if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges))
+ if (IgnoreCase(cc->flags()) &&
+ RangesContainLatin1Equivalents(ranges)) {
continue;
+ }
return set_replacement(nullptr);
}
} else {
if (range_count == 0 ||
ranges->at(0).from() > String::kMaxOneByteCharCode) {
// This will be handled in a later filter.
- if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges))
+ if (IgnoreCase(cc->flags()) &&
+ RangesContainLatin1Equivalents(ranges)) {
continue;
+ }
return set_replacement(nullptr);
}
}
@@ -2504,12 +2508,7 @@ RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
return ranges->length() == 0 ? on_success() : nullptr;
}
if (ranges->length() != 1) return nullptr;
- uint32_t max_char;
- if (compiler->one_byte()) {
- max_char = String::kMaxOneByteCharCode;
- } else {
- max_char = String::kMaxUtf16CodeUnit;
- }
+ const uc32 max_char = MaxCodeUnit(compiler->one_byte());
return ranges->at(0).IsEverything(max_char) ? on_success() : nullptr;
}
@@ -2719,12 +2718,9 @@ void BoyerMoorePositionInfo::SetAll() {
BoyerMooreLookahead::BoyerMooreLookahead(int length, RegExpCompiler* compiler,
Zone* zone)
- : length_(length), compiler_(compiler) {
- if (compiler->one_byte()) {
- max_char_ = String::kMaxOneByteCharCode;
- } else {
- max_char_ = String::kMaxUtf16CodeUnit;
- }
+ : length_(length),
+ compiler_(compiler),
+ max_char_(MaxCodeUnit(compiler->one_byte())) {
bitmaps_ = new (zone) ZoneList<BoyerMoorePositionInfo*>(length, zone);
for (int i = 0; i < length; i++) {
bitmaps_->Add(new (zone) BoyerMoorePositionInfo(), zone);
@@ -3421,8 +3417,9 @@ void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
DCHECK_EQ(start_reg_ + 1, end_reg_);
if (IgnoreCase(flags_)) {
+ bool unicode = IsUnicode(flags_);
assembler->CheckNotBackReferenceIgnoreCase(start_reg_, read_backward(),
- trace->backtrack());
+ unicode, trace->backtrack());
} else {
assembler->CheckNotBackReference(start_reg_, read_backward(),
trace->backtrack());
@@ -3787,7 +3784,7 @@ void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
} else {
for (int k = 0; k < ranges->length(); k++) {
CharacterRange& range = ranges->at(k);
- if (range.from() > max_char) continue;
+ if (static_cast<int>(range.from()) > max_char) continue;
int to = Min(max_char, static_cast<int>(range.to()));
bm->SetInterval(offset, Interval(range.from(), to));
}
diff --git a/chromium/v8/src/regexp/regexp-compiler.h b/chromium/v8/src/regexp/regexp-compiler.h
index a35ffcd01a2..4e7652883c4 100644
--- a/chromium/v8/src/regexp/regexp-compiler.h
+++ b/chromium/v8/src/regexp/regexp-compiler.h
@@ -96,8 +96,8 @@ class QuickCheckDetails {
void set_cannot_match() { cannot_match_ = true; }
struct Position {
Position() : mask(0), value(0), determines_perfectly(false) {}
- uc16 mask;
- uc16 value;
+ uc32 mask;
+ uc32 value;
bool determines_perfectly;
};
int characters() { return characters_; }
diff --git a/chromium/v8/src/regexp/regexp-dotprinter.cc b/chromium/v8/src/regexp/regexp-dotprinter.cc
index b6640626f2c..7cf1e82c4d0 100644
--- a/chromium/v8/src/regexp/regexp-dotprinter.cc
+++ b/chromium/v8/src/regexp/regexp-dotprinter.cc
@@ -143,7 +143,7 @@ void DotPrinterImpl::VisitText(TextNode* that) {
if (node->is_negated()) os_ << "^";
for (int j = 0; j < node->ranges(zone)->length(); j++) {
CharacterRange range = node->ranges(zone)->at(j);
- os_ << AsUC16(range.from()) << "-" << AsUC16(range.to());
+ os_ << AsUC32(range.from()) << "-" << AsUC32(range.to());
}
os_ << "]";
break;
diff --git a/chromium/v8/src/regexp/regexp-interpreter.cc b/chromium/v8/src/regexp/regexp-interpreter.cc
index 0c6d8d5b4be..49215a25446 100644
--- a/chromium/v8/src/regexp/regexp-interpreter.cc
+++ b/chromium/v8/src/regexp/regexp-interpreter.cc
@@ -35,18 +35,23 @@ namespace internal {
namespace {
bool BackRefMatchesNoCase(Isolate* isolate, int from, int current, int len,
- Vector<const uc16> subject) {
+ Vector<const uc16> subject, bool unicode) {
Address offset_a =
reinterpret_cast<Address>(const_cast<uc16*>(&subject.at(from)));
Address offset_b =
reinterpret_cast<Address>(const_cast<uc16*>(&subject.at(current)));
size_t length = len * kUC16Size;
- return RegExpMacroAssembler::CaseInsensitiveCompareUC16(offset_a, offset_b,
- length, isolate) == 1;
+
+ bool result = unicode
+ ? RegExpMacroAssembler::CaseInsensitiveCompareUnicode(
+ offset_a, offset_b, length, isolate)
+ : RegExpMacroAssembler::CaseInsensitiveCompareNonUnicode(
+ offset_a, offset_b, length, isolate);
+ return result == 1;
}
bool BackRefMatchesNoCase(Isolate* isolate, int from, int current, int len,
- Vector<const uint8_t> subject) {
+ Vector<const uint8_t> subject, bool unicode) {
// For Latin1 characters the unicode flag makes no difference.
for (int i = 0; i < len; i++) {
unsigned int old_char = subject[from++];
@@ -100,6 +105,18 @@ int32_t Load16AlignedSigned(const byte* pc) {
return *reinterpret_cast<const int16_t*>(pc);
}
+// Helpers to access the packed argument. Takes the 32 bits containing the
+// current bytecode, where the 8 LSB contain the bytecode and the rest contains
+// a packed 24-bit argument.
+// TODO(jgruber): Specify signed-ness in bytecode signature declarations, and
+// police restrictions during bytecode generation.
+int32_t LoadPacked24Signed(int32_t bytecode_and_packed_arg) {
+ return bytecode_and_packed_arg >> BYTECODE_SHIFT;
+}
+uint32_t LoadPacked24Unsigned(int32_t bytecode_and_packed_arg) {
+ return static_cast<uint32_t>(bytecode_and_packed_arg) >> BYTECODE_SHIFT;
+}
+
// A simple abstraction over the backtracking stack used by the interpreter.
//
// Despite the name 'backtracking' stack, it's actually used as a generic stack
@@ -296,6 +313,12 @@ bool CheckBitInTable(const uint32_t current_char, const byte* const table) {
return (b & (1 << bit)) != 0;
}
+// Returns true iff 0 <= index < length.
+bool IndexIsInBounds(int index, int length) {
+ DCHECK_GE(length, 0);
+ return static_cast<uintptr_t>(index) < static_cast<uintptr_t>(length);
+}
+
// If computed gotos are supported by the compiler, we can get addresses to
// labels directly in C/C++. Every bytecode handler has its own label and we
// store the addresses in a dispatch table indexed by bytecode. To execute the
@@ -337,6 +360,14 @@ bool CheckBitInTable(const uint32_t current_char, const byte* const table) {
next_pc = code_base + offset; \
DECODE()
+// Current position mutations.
+#define SET_CURRENT_POSITION(value) \
+ do { \
+ current = (value); \
+ DCHECK(base::IsInRange(current, 0, subject.length())); \
+ } while (false)
+#define ADVANCE_CURRENT_POSITION(by) SET_CURRENT_POSITION(current + (by))
+
#ifdef DEBUG
#define BYTECODE(name) \
BC_LABEL(name) \
@@ -447,44 +478,44 @@ IrregexpInterpreter::Result RawMatch(
}
BYTECODE(PUSH_REGISTER) {
ADVANCE(PUSH_REGISTER);
- if (!backtrack_stack.push(registers[insn >> BYTECODE_SHIFT])) {
+ if (!backtrack_stack.push(registers[LoadPacked24Unsigned(insn)])) {
return MaybeThrowStackOverflow(isolate, call_origin);
}
DISPATCH();
}
BYTECODE(SET_REGISTER) {
ADVANCE(SET_REGISTER);
- registers[insn >> BYTECODE_SHIFT] = Load32Aligned(pc + 4);
+ registers[LoadPacked24Unsigned(insn)] = Load32Aligned(pc + 4);
DISPATCH();
}
BYTECODE(ADVANCE_REGISTER) {
ADVANCE(ADVANCE_REGISTER);
- registers[insn >> BYTECODE_SHIFT] += Load32Aligned(pc + 4);
+ registers[LoadPacked24Unsigned(insn)] += Load32Aligned(pc + 4);
DISPATCH();
}
BYTECODE(SET_REGISTER_TO_CP) {
ADVANCE(SET_REGISTER_TO_CP);
- registers[insn >> BYTECODE_SHIFT] = current + Load32Aligned(pc + 4);
+ registers[LoadPacked24Unsigned(insn)] = current + Load32Aligned(pc + 4);
DISPATCH();
}
BYTECODE(SET_CP_TO_REGISTER) {
ADVANCE(SET_CP_TO_REGISTER);
- current = registers[insn >> BYTECODE_SHIFT];
+ SET_CURRENT_POSITION(registers[LoadPacked24Unsigned(insn)]);
DISPATCH();
}
BYTECODE(SET_REGISTER_TO_SP) {
ADVANCE(SET_REGISTER_TO_SP);
- registers[insn >> BYTECODE_SHIFT] = backtrack_stack.sp();
+ registers[LoadPacked24Unsigned(insn)] = backtrack_stack.sp();
DISPATCH();
}
BYTECODE(SET_SP_TO_REGISTER) {
ADVANCE(SET_SP_TO_REGISTER);
- backtrack_stack.set_sp(registers[insn >> BYTECODE_SHIFT]);
+ backtrack_stack.set_sp(registers[LoadPacked24Unsigned(insn)]);
DISPATCH();
}
BYTECODE(POP_CP) {
ADVANCE(POP_CP);
- current = backtrack_stack.pop();
+ SET_CURRENT_POSITION(backtrack_stack.pop());
DISPATCH();
}
BYTECODE(POP_BT) {
@@ -504,7 +535,7 @@ IrregexpInterpreter::Result RawMatch(
}
BYTECODE(POP_REGISTER) {
ADVANCE(POP_REGISTER);
- registers[insn >> BYTECODE_SHIFT] = backtrack_stack.pop();
+ registers[LoadPacked24Unsigned(insn)] = backtrack_stack.pop();
DISPATCH();
}
BYTECODE(FAIL) {
@@ -520,7 +551,7 @@ IrregexpInterpreter::Result RawMatch(
}
BYTECODE(ADVANCE_CP) {
ADVANCE(ADVANCE_CP);
- current += insn >> BYTECODE_SHIFT;
+ ADVANCE_CURRENT_POSITION(LoadPacked24Signed(insn));
DISPATCH();
}
BYTECODE(GOTO) {
@@ -529,7 +560,7 @@ IrregexpInterpreter::Result RawMatch(
}
BYTECODE(ADVANCE_CP_AND_GOTO) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
- current += insn >> BYTECODE_SHIFT;
+ ADVANCE_CURRENT_POSITION(LoadPacked24Signed(insn));
DISPATCH();
}
BYTECODE(CHECK_GREEDY) {
@@ -542,7 +573,7 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(LOAD_CURRENT_CHAR) {
- int pos = current + (insn >> BYTECODE_SHIFT);
+ int pos = current + LoadPacked24Signed(insn);
if (pos >= subject.length() || pos < 0) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
} else {
@@ -553,12 +584,12 @@ IrregexpInterpreter::Result RawMatch(
}
BYTECODE(LOAD_CURRENT_CHAR_UNCHECKED) {
ADVANCE(LOAD_CURRENT_CHAR_UNCHECKED);
- int pos = current + (insn >> BYTECODE_SHIFT);
+ int pos = current + LoadPacked24Signed(insn);
current_char = subject[pos];
DISPATCH();
}
BYTECODE(LOAD_2_CURRENT_CHARS) {
- int pos = current + (insn >> BYTECODE_SHIFT);
+ int pos = current + LoadPacked24Signed(insn);
if (pos + 2 > subject.length() || pos < 0) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
} else {
@@ -570,14 +601,14 @@ IrregexpInterpreter::Result RawMatch(
}
BYTECODE(LOAD_2_CURRENT_CHARS_UNCHECKED) {
ADVANCE(LOAD_2_CURRENT_CHARS_UNCHECKED);
- int pos = current + (insn >> BYTECODE_SHIFT);
+ int pos = current + LoadPacked24Signed(insn);
Char next = subject[pos + 1];
current_char = (subject[pos] | (next << (kBitsPerByte * sizeof(Char))));
DISPATCH();
}
BYTECODE(LOAD_4_CURRENT_CHARS) {
DCHECK_EQ(1, sizeof(Char));
- int pos = current + (insn >> BYTECODE_SHIFT);
+ int pos = current + LoadPacked24Signed(insn);
if (pos + 4 > subject.length() || pos < 0) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
} else {
@@ -593,7 +624,7 @@ IrregexpInterpreter::Result RawMatch(
BYTECODE(LOAD_4_CURRENT_CHARS_UNCHECKED) {
ADVANCE(LOAD_4_CURRENT_CHARS_UNCHECKED);
DCHECK_EQ(1, sizeof(Char));
- int pos = current + (insn >> BYTECODE_SHIFT);
+ int pos = current + LoadPacked24Signed(insn);
Char next1 = subject[pos + 1];
Char next2 = subject[pos + 2];
Char next3 = subject[pos + 3];
@@ -611,7 +642,7 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(CHECK_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
+ uint32_t c = LoadPacked24Unsigned(insn);
if (c == current_char) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
} else {
@@ -629,7 +660,7 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(CHECK_NOT_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
+ uint32_t c = LoadPacked24Unsigned(insn);
if (c != current_char) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
} else {
@@ -647,7 +678,7 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(AND_CHECK_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
+ uint32_t c = LoadPacked24Unsigned(insn);
if (c == (current_char & Load32Aligned(pc + 4))) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
} else {
@@ -665,7 +696,7 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(AND_CHECK_NOT_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
+ uint32_t c = LoadPacked24Unsigned(insn);
if (c != (current_char & Load32Aligned(pc + 4))) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
} else {
@@ -674,7 +705,7 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(MINUS_AND_CHECK_NOT_CHAR) {
- uint32_t c = (insn >> BYTECODE_SHIFT);
+ uint32_t c = LoadPacked24Unsigned(insn);
uint32_t minus = Load16Aligned(pc + 4);
uint32_t mask = Load16Aligned(pc + 6);
if (c != ((current_char - minus) & mask)) {
@@ -713,7 +744,7 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(CHECK_LT) {
- uint32_t limit = (insn >> BYTECODE_SHIFT);
+ uint32_t limit = LoadPacked24Unsigned(insn);
if (current_char < limit) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
} else {
@@ -722,7 +753,7 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(CHECK_GT) {
- uint32_t limit = (insn >> BYTECODE_SHIFT);
+ uint32_t limit = LoadPacked24Unsigned(insn);
if (current_char > limit) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
} else {
@@ -731,7 +762,7 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(CHECK_REGISTER_LT) {
- if (registers[insn >> BYTECODE_SHIFT] < Load32Aligned(pc + 4)) {
+ if (registers[LoadPacked24Unsigned(insn)] < Load32Aligned(pc + 4)) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
} else {
ADVANCE(CHECK_REGISTER_LT);
@@ -739,7 +770,7 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(CHECK_REGISTER_GE) {
- if (registers[insn >> BYTECODE_SHIFT] >= Load32Aligned(pc + 4)) {
+ if (registers[LoadPacked24Unsigned(insn)] >= Load32Aligned(pc + 4)) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
} else {
ADVANCE(CHECK_REGISTER_GE);
@@ -747,7 +778,7 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(CHECK_REGISTER_EQ_POS) {
- if (registers[insn >> BYTECODE_SHIFT] == current) {
+ if (registers[LoadPacked24Unsigned(insn)] == current) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
} else {
ADVANCE(CHECK_REGISTER_EQ_POS);
@@ -755,7 +786,7 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(CHECK_NOT_REGS_EQUAL) {
- if (registers[insn >> BYTECODE_SHIFT] ==
+ if (registers[LoadPacked24Unsigned(insn)] ==
registers[Load32Aligned(pc + 4)]) {
ADVANCE(CHECK_NOT_REGS_EQUAL);
} else {
@@ -764,69 +795,94 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(CHECK_NOT_BACK_REF) {
- int from = registers[insn >> BYTECODE_SHIFT];
- int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+ int from = registers[LoadPacked24Unsigned(insn)];
+ int len = registers[LoadPacked24Unsigned(insn) + 1] - from;
if (from >= 0 && len > 0) {
if (current + len > subject.length() ||
CompareChars(&subject[from], &subject[current], len) != 0) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
DISPATCH();
}
- current += len;
+ ADVANCE_CURRENT_POSITION(len);
}
ADVANCE(CHECK_NOT_BACK_REF);
DISPATCH();
}
BYTECODE(CHECK_NOT_BACK_REF_BACKWARD) {
- int from = registers[insn >> BYTECODE_SHIFT];
- int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+ int from = registers[LoadPacked24Unsigned(insn)];
+ int len = registers[LoadPacked24Unsigned(insn) + 1] - from;
if (from >= 0 && len > 0) {
if (current - len < 0 ||
CompareChars(&subject[from], &subject[current - len], len) != 0) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
DISPATCH();
}
- current -= len;
+ SET_CURRENT_POSITION(current - len);
}
ADVANCE(CHECK_NOT_BACK_REF_BACKWARD);
DISPATCH();
}
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE) {
- UNREACHABLE(); // TODO(jgruber): Remove this unused bytecode.
+ int from = registers[LoadPacked24Unsigned(insn)];
+ int len = registers[LoadPacked24Unsigned(insn) + 1] - from;
+ if (from >= 0 && len > 0) {
+ if (current + len > subject.length() ||
+ !BackRefMatchesNoCase(isolate, from, current, len, subject, true)) {
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+ DISPATCH();
+ }
+ ADVANCE_CURRENT_POSITION(len);
+ }
+ ADVANCE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE);
+ DISPATCH();
}
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE) {
- int from = registers[insn >> BYTECODE_SHIFT];
- int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+ int from = registers[LoadPacked24Unsigned(insn)];
+ int len = registers[LoadPacked24Unsigned(insn) + 1] - from;
if (from >= 0 && len > 0) {
if (current + len > subject.length() ||
- !BackRefMatchesNoCase(isolate, from, current, len, subject)) {
+ !BackRefMatchesNoCase(isolate, from, current, len, subject,
+ false)) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
DISPATCH();
}
- current += len;
+ ADVANCE_CURRENT_POSITION(len);
}
ADVANCE(CHECK_NOT_BACK_REF_NO_CASE);
DISPATCH();
}
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD) {
- UNREACHABLE(); // TODO(jgruber): Remove this unused bytecode.
+ int from = registers[LoadPacked24Unsigned(insn)];
+ int len = registers[LoadPacked24Unsigned(insn) + 1] - from;
+ if (from >= 0 && len > 0) {
+ if (current - len < 0 ||
+ !BackRefMatchesNoCase(isolate, from, current - len, len, subject,
+ true)) {
+ SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
+ DISPATCH();
+ }
+ SET_CURRENT_POSITION(current - len);
+ }
+ ADVANCE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD);
+ DISPATCH();
}
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD) {
- int from = registers[insn >> BYTECODE_SHIFT];
- int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+ int from = registers[LoadPacked24Unsigned(insn)];
+ int len = registers[LoadPacked24Unsigned(insn) + 1] - from;
if (from >= 0 && len > 0) {
if (current - len < 0 ||
- !BackRefMatchesNoCase(isolate, from, current - len, len, subject)) {
+ !BackRefMatchesNoCase(isolate, from, current - len, len, subject,
+ false)) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
DISPATCH();
}
- current -= len;
+ SET_CURRENT_POSITION(current - len);
}
ADVANCE(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD);
DISPATCH();
}
BYTECODE(CHECK_AT_START) {
- if (current + (insn >> BYTECODE_SHIFT) == 0) {
+ if (current + LoadPacked24Signed(insn) == 0) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
} else {
ADVANCE(CHECK_AT_START);
@@ -834,7 +890,7 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(CHECK_NOT_AT_START) {
- if (current + (insn >> BYTECODE_SHIFT) == 0) {
+ if (current + LoadPacked24Signed(insn) == 0) {
ADVANCE(CHECK_NOT_AT_START);
} else {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
@@ -843,15 +899,15 @@ IrregexpInterpreter::Result RawMatch(
}
BYTECODE(SET_CURRENT_POSITION_FROM_END) {
ADVANCE(SET_CURRENT_POSITION_FROM_END);
- int by = static_cast<uint32_t>(insn) >> BYTECODE_SHIFT;
+ int by = LoadPacked24Unsigned(insn);
if (subject.length() - current > by) {
- current = subject.length() - by;
+ SET_CURRENT_POSITION(subject.length() - by);
current_char = subject[current - 1];
}
DISPATCH();
}
BYTECODE(CHECK_CURRENT_POSITION) {
- int pos = current + (insn >> BYTECODE_SHIFT);
+ int pos = current + LoadPacked24Signed(insn);
if (pos > subject.length() || pos < 0) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 4));
} else {
@@ -860,23 +916,22 @@ IrregexpInterpreter::Result RawMatch(
DISPATCH();
}
BYTECODE(SKIP_UNTIL_CHAR) {
- int load_offset = (insn >> BYTECODE_SHIFT);
+ int32_t load_offset = LoadPacked24Signed(insn);
int32_t advance = Load16AlignedSigned(pc + 4);
uint32_t c = Load16Aligned(pc + 6);
- while (static_cast<uintptr_t>(current + load_offset) <
- static_cast<uintptr_t>(subject.length())) {
+ while (IndexIsInBounds(current + load_offset, subject.length())) {
current_char = subject[current + load_offset];
if (c == current_char) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 8));
DISPATCH();
}
- current += advance;
+ ADVANCE_CURRENT_POSITION(advance);
}
SET_PC_FROM_OFFSET(Load32Aligned(pc + 12));
DISPATCH();
}
BYTECODE(SKIP_UNTIL_CHAR_AND) {
- int load_offset = (insn >> BYTECODE_SHIFT);
+ int32_t load_offset = LoadPacked24Signed(insn);
int32_t advance = Load16AlignedSigned(pc + 4);
uint16_t c = Load16Aligned(pc + 6);
uint32_t mask = Load32Aligned(pc + 8);
@@ -888,13 +943,13 @@ IrregexpInterpreter::Result RawMatch(
SET_PC_FROM_OFFSET(Load32Aligned(pc + 16));
DISPATCH();
}
- current += advance;
+ ADVANCE_CURRENT_POSITION(advance);
}
SET_PC_FROM_OFFSET(Load32Aligned(pc + 20));
DISPATCH();
}
BYTECODE(SKIP_UNTIL_CHAR_POS_CHECKED) {
- int load_offset = (insn >> BYTECODE_SHIFT);
+ int32_t load_offset = LoadPacked24Signed(insn);
int32_t advance = Load16AlignedSigned(pc + 4);
uint16_t c = Load16Aligned(pc + 6);
int32_t maximum_offset = Load32Aligned(pc + 8);
@@ -905,34 +960,32 @@ IrregexpInterpreter::Result RawMatch(
SET_PC_FROM_OFFSET(Load32Aligned(pc + 12));
DISPATCH();
}
- current += advance;
+ ADVANCE_CURRENT_POSITION(advance);
}
SET_PC_FROM_OFFSET(Load32Aligned(pc + 16));
DISPATCH();
}
BYTECODE(SKIP_UNTIL_BIT_IN_TABLE) {
- int load_offset = (insn >> BYTECODE_SHIFT);
+ int32_t load_offset = LoadPacked24Signed(insn);
int32_t advance = Load16AlignedSigned(pc + 4);
const byte* table = pc + 8;
- while (static_cast<uintptr_t>(current + load_offset) <
- static_cast<uintptr_t>(subject.length())) {
+ while (IndexIsInBounds(current + load_offset, subject.length())) {
current_char = subject[current + load_offset];
if (CheckBitInTable(current_char, table)) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 24));
DISPATCH();
}
- current += advance;
+ ADVANCE_CURRENT_POSITION(advance);
}
SET_PC_FROM_OFFSET(Load32Aligned(pc + 28));
DISPATCH();
}
BYTECODE(SKIP_UNTIL_GT_OR_NOT_BIT_IN_TABLE) {
- int load_offset = (insn >> BYTECODE_SHIFT);
+ int32_t load_offset = LoadPacked24Signed(insn);
int32_t advance = Load16AlignedSigned(pc + 4);
uint16_t limit = Load16Aligned(pc + 6);
const byte* table = pc + 8;
- while (static_cast<uintptr_t>(current + load_offset) <
- static_cast<uintptr_t>(subject.length())) {
+ while (IndexIsInBounds(current + load_offset, subject.length())) {
current_char = subject[current + load_offset];
if (current_char > limit) {
SET_PC_FROM_OFFSET(Load32Aligned(pc + 24));
@@ -942,18 +995,17 @@ IrregexpInterpreter::Result RawMatch(
SET_PC_FROM_OFFSET(Load32Aligned(pc + 24));
DISPATCH();
}
- current += advance;
+ ADVANCE_CURRENT_POSITION(advance);
}
SET_PC_FROM_OFFSET(Load32Aligned(pc + 28));
DISPATCH();
}
BYTECODE(SKIP_UNTIL_CHAR_OR_CHAR) {
- int load_offset = (insn >> BYTECODE_SHIFT);
+ int32_t load_offset = LoadPacked24Signed(insn);
int32_t advance = Load32Aligned(pc + 4);
uint16_t c = Load16Aligned(pc + 8);
uint16_t c2 = Load16Aligned(pc + 10);
- while (static_cast<uintptr_t>(current + load_offset) <
- static_cast<uintptr_t>(subject.length())) {
+ while (IndexIsInBounds(current + load_offset, subject.length())) {
current_char = subject[current + load_offset];
// The two if-statements below are split up intentionally, as combining
// them seems to result in register allocation behaving quite
@@ -966,7 +1018,7 @@ IrregexpInterpreter::Result RawMatch(
SET_PC_FROM_OFFSET(Load32Aligned(pc + 12));
DISPATCH();
}
- current += advance;
+ ADVANCE_CURRENT_POSITION(advance);
}
SET_PC_FROM_OFFSET(Load32Aligned(pc + 16));
DISPATCH();
@@ -986,6 +1038,8 @@ IrregexpInterpreter::Result RawMatch(
}
#undef BYTECODE
+#undef ADVANCE_CURRENT_POSITION
+#undef SET_CURRENT_POSITION
#undef DISPATCH
#undef DECODE
#undef SET_PC_FROM_OFFSET
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc b/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc
index 0a122017437..d1feec4c33d 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -352,11 +352,11 @@ void RegExpMacroAssemblerTracer::CheckNotBackReference(int start_reg,
}
void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
- PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, %s, label[%08x]);\n",
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, %s %s, label[%08x]);\n",
start_reg, read_backward ? "backward" : "forward",
- LabelToInt(on_no_match));
- assembler_->CheckNotBackReferenceIgnoreCase(start_reg, read_backward,
+ unicode ? "unicode" : "non-unicode", LabelToInt(on_no_match));
+ assembler_->CheckNotBackReferenceIgnoreCase(start_reg, read_backward, unicode,
on_no_match);
}
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler-tracer.h b/chromium/v8/src/regexp/regexp-macro-assembler-tracer.h
index b6ad63071f4..2a44146e738 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler-tracer.h
+++ b/chromium/v8/src/regexp/regexp-macro-assembler-tracer.h
@@ -33,6 +33,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match) override;
void CheckNotBackReferenceIgnoreCase(int start_reg, bool read_backward,
+ bool unicode,
Label* on_no_match) override;
void CheckNotCharacter(unsigned c, Label* on_not_equal) override;
void CheckNotCharacterAfterAnd(unsigned c, unsigned and_with,
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler.cc b/chromium/v8/src/regexp/regexp-macro-assembler.cc
index 6cc9cae6e1d..cf4346309eb 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler.cc
+++ b/chromium/v8/src/regexp/regexp-macro-assembler.cc
@@ -9,6 +9,7 @@
#include "src/execution/pointer-authentication.h"
#include "src/execution/simulator.h"
#include "src/regexp/regexp-stack.h"
+#include "src/regexp/special-case.h"
#include "src/strings/unicode-inl.h"
#ifdef V8_INTL_SUPPORT
@@ -27,17 +28,46 @@ RegExpMacroAssembler::RegExpMacroAssembler(Isolate* isolate, Zone* zone)
RegExpMacroAssembler::~RegExpMacroAssembler() = default;
-int RegExpMacroAssembler::CaseInsensitiveCompareUC16(Address byte_offset1,
- Address byte_offset2,
- size_t byte_length,
- Isolate* isolate) {
+int RegExpMacroAssembler::CaseInsensitiveCompareNonUnicode(Address byte_offset1,
+ Address byte_offset2,
+ size_t byte_length,
+ Isolate* isolate) {
+#ifdef V8_INTL_SUPPORT
+ // This function is not allowed to cause a garbage collection.
+ // A GC might move the calling generated code and invalidate the
+ // return address on the stack.
+ DisallowHeapAllocation no_gc;
+ DCHECK_EQ(0, byte_length % 2);
+ size_t length = byte_length / 2;
+ uc16* substring1 = reinterpret_cast<uc16*>(byte_offset1);
+ uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2);
+
+ for (size_t i = 0; i < length; i++) {
+ UChar32 c1 = RegExpCaseFolding::Canonicalize(substring1[i]);
+ UChar32 c2 = RegExpCaseFolding::Canonicalize(substring2[i]);
+ if (c1 != c2) {
+ return 0;
+ }
+ }
+ return 1;
+#else
+ return CaseInsensitiveCompareUnicode(byte_offset1, byte_offset2, byte_length,
+ isolate);
+#endif
+}
+
+int RegExpMacroAssembler::CaseInsensitiveCompareUnicode(Address byte_offset1,
+ Address byte_offset2,
+ size_t byte_length,
+ Isolate* isolate) {
// This function is not allowed to cause a garbage collection.
// A GC might move the calling generated code and invalidate the
// return address on the stack.
+ DisallowHeapAllocation no_gc;
DCHECK_EQ(0, byte_length % 2);
#ifdef V8_INTL_SUPPORT
- int32_t length = (int32_t)(byte_length >> 1);
+ int32_t length = static_cast<int32_t>(byte_length >> 1);
icu::UnicodeString uni_str_1(reinterpret_cast<const char16_t*>(byte_offset1),
length);
return uni_str_1.caseCompare(reinterpret_cast<const char16_t*>(byte_offset2),
@@ -68,7 +98,6 @@ int RegExpMacroAssembler::CaseInsensitiveCompareUC16(Address byte_offset1,
#endif // V8_INTL_SUPPORT
}
-
void RegExpMacroAssembler::CheckNotInSurrogatePair(int cp_offset,
Label* on_failure) {
Label ok;
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler.h b/chromium/v8/src/regexp/regexp-macro-assembler.h
index 289c2a979e6..52465610cb6 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler.h
+++ b/chromium/v8/src/regexp/regexp-macro-assembler.h
@@ -88,7 +88,7 @@ class RegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match) = 0;
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match) = 0;
// Check the current character for a match with a literal character. If we
// fail to match then goto the on_failure label. End of input always
@@ -165,11 +165,16 @@ class RegExpMacroAssembler {
virtual void ClearRegisters(int reg_from, int reg_to) = 0;
virtual void WriteStackPointerToRegister(int reg) = 0;
- // Compares two-byte strings case insensitively.
+ // Compare two-byte strings case insensitively.
// Called from generated RegExp code.
- static int CaseInsensitiveCompareUC16(Address byte_offset1,
- Address byte_offset2,
- size_t byte_length, Isolate* isolate);
+ static int CaseInsensitiveCompareNonUnicode(Address byte_offset1,
+ Address byte_offset2,
+ size_t byte_length,
+ Isolate* isolate);
+ static int CaseInsensitiveCompareUnicode(Address byte_offset1,
+ Address byte_offset2,
+ size_t byte_length,
+ Isolate* isolate);
// Check that we are not in the middle of a surrogate pair.
void CheckNotInSurrogatePair(int cp_offset, Label* on_failure);
diff --git a/chromium/v8/src/regexp/regexp-parser.cc b/chromium/v8/src/regexp/regexp-parser.cc
index 3c1115414fb..7b87044ca65 100644
--- a/chromium/v8/src/regexp/regexp-parser.cc
+++ b/chromium/v8/src/regexp/regexp-parser.cc
@@ -1301,7 +1301,7 @@ bool LookupSpecialPropertyValueName(const char* name,
return true;
}
-// Explicitly whitelist supported binary properties. The spec forbids supporting
+// Explicitly allowlist supported binary properties. The spec forbids supporting
// properties outside of this set to ensure interoperability.
bool IsSupportedBinaryProperty(UProperty property) {
switch (property) {
@@ -1550,7 +1550,7 @@ bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
}
while (d >= 0) {
x = x * 16 + d;
- if (x > max_value) {
+ if (x > static_cast<uc32>(max_value)) {
return false;
}
Advance();
@@ -1789,34 +1789,54 @@ RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
#undef CHECK_FAILED
-
-bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
- FlatStringReader* input, JSRegExp::Flags flags,
- RegExpCompileData* result) {
+bool RegExpParser::Parse(RegExpCompileData* result,
+ const DisallowHeapAllocation&) {
DCHECK(result != nullptr);
- RegExpParser parser(input, flags, isolate, zone);
- RegExpTree* tree = parser.ParsePattern();
- if (parser.failed()) {
+ RegExpTree* tree = ParsePattern();
+ if (failed()) {
DCHECK(tree == nullptr);
- DCHECK(parser.error_ != RegExpError::kNone);
- result->error = parser.error_;
- result->error_pos = parser.error_pos_;
+ DCHECK(error_ != RegExpError::kNone);
+ result->error = error_;
+ result->error_pos = error_pos_;
} else {
DCHECK(tree != nullptr);
- DCHECK(parser.error_ == RegExpError::kNone);
+ DCHECK(error_ == RegExpError::kNone);
if (FLAG_trace_regexp_parser) {
StdoutStream os;
- tree->Print(os, zone);
+ tree->Print(os, zone());
os << "\n";
}
result->tree = tree;
- int capture_count = parser.captures_started();
- result->simple = tree->IsAtom() && parser.simple() && capture_count == 0;
- result->contains_anchor = parser.contains_anchor();
- result->capture_name_map = parser.CreateCaptureNameMap();
+ int capture_count = captures_started();
+ result->simple = tree->IsAtom() && simple() && capture_count == 0;
+ result->contains_anchor = contains_anchor();
result->capture_count = capture_count;
}
- return !parser.failed();
+ return !failed();
+}
+
+bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
+ FlatStringReader* input, JSRegExp::Flags flags,
+ RegExpCompileData* result) {
+ RegExpParser parser(input, flags, isolate, zone);
+ bool success;
+ {
+ DisallowHeapAllocation no_gc;
+ success = parser.Parse(result, no_gc);
+ }
+ if (success) {
+ result->capture_name_map = parser.CreateCaptureNameMap();
+ }
+ return success;
+}
+
+bool RegExpParser::VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
+ FlatStringReader* input,
+ JSRegExp::Flags flags,
+ RegExpCompileData* result,
+ const DisallowHeapAllocation& no_gc) {
+ RegExpParser parser(input, flags, isolate, zone);
+ return parser.Parse(result, no_gc);
}
RegExpBuilder::RegExpBuilder(Zone* zone, JSRegExp::Flags flags)
diff --git a/chromium/v8/src/regexp/regexp-parser.h b/chromium/v8/src/regexp/regexp-parser.h
index aff1746bc53..bfb08208980 100644
--- a/chromium/v8/src/regexp/regexp-parser.h
+++ b/chromium/v8/src/regexp/regexp-parser.h
@@ -159,6 +159,13 @@ class V8_EXPORT_PRIVATE RegExpParser {
static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
JSRegExp::Flags flags, RegExpCompileData* result);
+ static bool VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
+ FlatStringReader* input, JSRegExp::Flags flags,
+ RegExpCompileData* result,
+ const DisallowHeapAllocation& no_gc);
+
+ private:
+ bool Parse(RegExpCompileData* result, const DisallowHeapAllocation&);
RegExpTree* ParsePattern();
RegExpTree* ParseDisjunction();
diff --git a/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index 2109b45314a..9ac4f755227 100644
--- a/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -230,7 +230,7 @@ void RegExpMacroAssemblerS390::CheckGreedyLoop(Label* on_equal) {
}
void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
__ LoadP(r2, register_location(start_reg)); // Index of start of
// capture
@@ -346,7 +346,10 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
{
AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ unicode ? ExternalReference::re_case_insensitive_compare_unicode(
+ isolate())
+ : ExternalReference::re_case_insensitive_compare_non_unicode(
+ isolate());
__ CallCFunction(function, argument_count);
}
diff --git a/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index 9ced67fe274..e4f88f51b9a 100644
--- a/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/chromium/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -36,7 +36,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerS390
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
diff --git a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index cf8eb6604c9..ef3e48428f0 100644
--- a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -215,7 +215,7 @@ void RegExpMacroAssemblerX64::CheckGreedyLoop(Label* on_equal) {
}
void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
ReadPositionFromRegister(rbx, start_reg + 1); // Offset of end of capture
@@ -354,7 +354,10 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// linter.
AllowExternalCallThatCantCauseGC scope(&masm_);
ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(isolate());
+ unicode ? ExternalReference::re_case_insensitive_compare_unicode(
+ isolate())
+ : ExternalReference::re_case_insensitive_compare_non_unicode(
+ isolate());
__ CallCFunction(compare, num_arguments);
}
diff --git a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 551e9bc6ec7..ea4d45edba8 100644
--- a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -37,6 +37,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match) override;
void CheckNotBackReferenceIgnoreCase(int start_reg, bool read_backward,
+ bool unicode,
Label* on_no_match) override;
void CheckNotCharacter(uint32_t c, Label* on_not_equal) override;
void CheckNotCharacterAfterAnd(uint32_t c, uint32_t mask,
diff --git a/chromium/v8/src/roots/roots-inl.h b/chromium/v8/src/roots/roots-inl.h
index c9dc033aa38..2bec843aa36 100644
--- a/chromium/v8/src/roots/roots-inl.h
+++ b/chromium/v8/src/roots/roots-inl.h
@@ -5,9 +5,8 @@
#ifndef V8_ROOTS_ROOTS_INL_H_
#define V8_ROOTS_ROOTS_INL_H_
-#include "src/roots/roots.h"
-
#include "src/execution/isolate.h"
+#include "src/execution/local-isolate-wrapper.h"
#include "src/execution/off-thread-isolate.h"
#include "src/handles/handles.h"
#include "src/heap/read-only-heap.h"
@@ -23,6 +22,7 @@
#include "src/objects/scope-info.h"
#include "src/objects/slots.h"
#include "src/objects/string.h"
+#include "src/roots/roots.h"
namespace v8 {
namespace internal {
@@ -72,7 +72,14 @@ ReadOnlyRoots::ReadOnlyRoots(Isolate* isolate)
ReadOnlyRoots::ReadOnlyRoots(OffThreadIsolate* isolate)
: ReadOnlyRoots(isolate->factory()->read_only_roots()) {}
-ReadOnlyRoots::ReadOnlyRoots(Address* ro_roots) : read_only_roots_(ro_roots) {}
+ReadOnlyRoots::ReadOnlyRoots(LocalHeapWrapper heap)
+ : ReadOnlyRoots(heap.is_off_thread() ? ReadOnlyRoots(heap.off_thread())
+ : ReadOnlyRoots(heap.main_thread())) {}
+
+ReadOnlyRoots::ReadOnlyRoots(LocalIsolateWrapper isolate)
+ : ReadOnlyRoots(isolate.is_off_thread()
+ ? ReadOnlyRoots(isolate.off_thread())
+ : ReadOnlyRoots(isolate.main_thread())) {}
// We use unchecked_cast below because we trust our read-only roots to
// have the right type, and to avoid the heavy #includes that would be
diff --git a/chromium/v8/src/roots/roots.h b/chromium/v8/src/roots/roots.h
index 0d6c0f30c6b..1f017260b31 100644
--- a/chromium/v8/src/roots/roots.h
+++ b/chromium/v8/src/roots/roots.h
@@ -8,6 +8,7 @@
#include "src/base/macros.h"
#include "src/builtins/accessors.h"
#include "src/common/globals.h"
+#include "src/execution/local-isolate-wrapper.h"
#include "src/handles/handles.h"
#include "src/init/heap-symbols.h"
#include "src/objects/objects-definitions.h"
@@ -107,7 +108,6 @@ class Symbol;
V(Map, next_call_side_effect_free_call_handler_info_map, \
NextCallSideEffectFreeCallHandlerInfoMap) \
V(Map, simple_number_dictionary_map, SimpleNumberDictionaryMap) \
- V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
V(Map, small_ordered_hash_map_map, SmallOrderedHashMapMap) \
V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap) \
V(Map, small_ordered_name_dictionary_map, SmallOrderedNameDictionaryMap) \
@@ -155,6 +155,7 @@ class Symbol;
V(Map, optimized_out_map, OptimizedOutMap) \
V(Map, stale_register_map, StaleRegisterMap) \
V(Map, self_reference_marker_map, SelfReferenceMarkerMap) \
+ V(Map, basic_block_counters_marker_map, BasicBlockCountersMarkerMap) \
/* Canonical empty values */ \
V(EnumCache, empty_enum_cache, EmptyEnumCache) \
V(PropertyArray, empty_property_array, EmptyPropertyArray) \
@@ -165,7 +166,6 @@ class Symbol;
EmptyArrayBoilerplateDescription) \
V(ClosureFeedbackCellArray, empty_closure_feedback_cell_array, \
EmptyClosureFeedbackCellArray) \
- V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
V(NumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
V(FixedArray, empty_ordered_hash_map, EmptyOrderedHashMap) \
@@ -184,6 +184,8 @@ class Symbol;
V(HeapNumber, minus_infinity_value, MinusInfinityValue) \
/* Marker for self-references during code-generation */ \
V(HeapObject, self_reference_marker, SelfReferenceMarker) \
+ /* Marker for basic-block usage counters array during code-generation */ \
+ V(Oddball, basic_block_counters_marker, BasicBlockCountersMarker) \
/* Canonical off-heap trampoline data */ \
V(ByteArray, off_heap_trampoline_relocation_info, \
OffHeapTrampolineRelocationInfo) \
@@ -302,6 +304,7 @@ class Symbol;
InterpreterEntryTrampolineForProfiling) \
V(Object, pending_optimize_for_test_bytecode, \
PendingOptimizeForTestBytecode) \
+ V(ArrayList, basic_block_profiling_data, BasicBlockProfilingData) \
V(WeakArrayList, shared_wasm_memories, SharedWasmMemories)
// Entries in this list are limited to Smis and are not visited during GC.
@@ -353,7 +356,7 @@ class Symbol;
PUBLIC_SYMBOL_ROOT_LIST(V) \
WELL_KNOWN_SYMBOL_ROOT_LIST(V) \
STRUCT_MAPS_LIST(V) \
- TORQUE_INTERNAL_MAP_ROOT_LIST(V) \
+ TORQUE_DEFINED_MAP_ROOT_LIST(V) \
ALLOCATION_SITE_MAPS_LIST(V) \
DATA_HANDLER_MAPS_LIST(V)
@@ -527,6 +530,8 @@ class ReadOnlyRoots {
V8_INLINE explicit ReadOnlyRoots(OffThreadHeap* heap);
V8_INLINE explicit ReadOnlyRoots(Isolate* isolate);
V8_INLINE explicit ReadOnlyRoots(OffThreadIsolate* isolate);
+ V8_INLINE explicit ReadOnlyRoots(LocalIsolateWrapper wrapper);
+ V8_INLINE explicit ReadOnlyRoots(LocalHeapWrapper wrapper);
#define ROOT_ACCESSOR(Type, name, CamelName) \
V8_INLINE class Type name() const; \
@@ -553,13 +558,15 @@ class ReadOnlyRoots {
#undef ROOT_TYPE_CHECK
#endif
- V8_INLINE explicit ReadOnlyRoots(Address* ro_roots);
+ V8_INLINE explicit ReadOnlyRoots(Address* ro_roots)
+ : read_only_roots_(ro_roots) {}
V8_INLINE Address* GetLocation(RootIndex root_index) const;
Address* read_only_roots_;
friend class ReadOnlyHeap;
+ friend class DeserializerAllocator;
};
} // namespace internal
diff --git a/chromium/v8/src/runtime/runtime-debug.cc b/chromium/v8/src/runtime/runtime-debug.cc
index 3b8eefcee15..e60256b0d9c 100644
--- a/chromium/v8/src/runtime/runtime-debug.cc
+++ b/chromium/v8/src/runtime/runtime-debug.cc
@@ -241,7 +241,7 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
Handle<String> generator_status =
- factory->NewStringFromAsciiChecked("[[GeneratorStatus]]");
+ factory->NewStringFromAsciiChecked("[[GeneratorState]]");
result->set(0, *generator_status);
Handle<String> status_str = factory->NewStringFromAsciiChecked(status);
result->set(1, *status_str);
@@ -261,7 +261,7 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
const char* status = JSPromise::Status(promise->status());
Handle<FixedArray> result = factory->NewFixedArray(2 * 2);
Handle<String> promise_status =
- factory->NewStringFromAsciiChecked("[[PromiseStatus]]");
+ factory->NewStringFromAsciiChecked("[[PromiseState]]");
result->set(0, *promise_status);
Handle<String> status_str = factory->NewStringFromAsciiChecked(status);
result->set(1, *status_str);
@@ -271,7 +271,7 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
: promise->result(),
isolate);
Handle<String> promise_value =
- factory->NewStringFromAsciiChecked("[[PromiseValue]]");
+ factory->NewStringFromAsciiChecked("[[PromiseResult]]");
result->set(2, *promise_value);
result->set(3, *value_obj);
return factory->NewJSArrayWithElements(result);
@@ -495,7 +495,8 @@ int ScriptLinePosition(Handle<Script> script, int line) {
if (line < 0) return -1;
if (script->type() == Script::TYPE_WASM) {
- return GetWasmFunctionOffset(script->wasm_native_module()->module(), line);
+ // Wasm positions are relative to the start of the module.
+ return 0;
}
Script::InitLineEnds(script->GetIsolate(), script);
diff --git a/chromium/v8/src/runtime/runtime-internal.cc b/chromium/v8/src/runtime/runtime-internal.cc
index bdb2931e200..08086fadfe3 100644
--- a/chromium/v8/src/runtime/runtime-internal.cc
+++ b/chromium/v8/src/runtime/runtime-internal.cc
@@ -332,7 +332,8 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
function->raw_feedback_cell().set_interrupt_budget(FLAG_interrupt_budget);
if (!function->has_feedback_vector()) {
- JSFunction::EnsureFeedbackVector(function);
+ IsCompiledScope is_compiled_scope(function->shared().is_compiled_scope());
+ JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
// Also initialize the invocation count here. This is only really needed for
// OSR. When we OSR functions with lazy feedback allocation we want to have
// a non zero invocation count so we can inline functions.
diff --git a/chromium/v8/src/runtime/runtime-object.cc b/chromium/v8/src/runtime/runtime-object.cc
index 2dfa9e53bec..a147991c322 100644
--- a/chromium/v8/src/runtime/runtime-object.cc
+++ b/chromium/v8/src/runtime/runtime-object.cc
@@ -1188,6 +1188,19 @@ RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
return *value;
}
+RUNTIME_FUNCTION(Runtime_SetOwnPropertyIgnoreAttributes) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, o, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, attributes, 3);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+ o, key, value, PropertyAttributes(attributes->value())));
+}
+
RUNTIME_FUNCTION(Runtime_GetOwnPropertyDescriptor) {
HandleScope scope(isolate);
diff --git a/chromium/v8/src/runtime/runtime-promise.cc b/chromium/v8/src/runtime/runtime-promise.cc
index 4d1c5ea9d2a..dcc2c69013e 100644
--- a/chromium/v8/src/runtime/runtime-promise.cc
+++ b/chromium/v8/src/runtime/runtime-promise.cc
@@ -217,10 +217,8 @@ RUNTIME_FUNCTION(Runtime_PromiseHookBefore) {
return ReadOnlyRoots(isolate).undefined_value();
Handle<JSPromise> promise = Handle<JSPromise>::cast(maybe_promise);
if (isolate->debug()->is_active()) isolate->PushPromise(promise);
- if (promise->IsJSPromise()) {
- isolate->RunPromiseHook(PromiseHookType::kBefore, promise,
- isolate->factory()->undefined_value());
- }
+ isolate->RunPromiseHook(PromiseHookType::kBefore, promise,
+ isolate->factory()->undefined_value());
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -232,10 +230,8 @@ RUNTIME_FUNCTION(Runtime_PromiseHookAfter) {
return ReadOnlyRoots(isolate).undefined_value();
Handle<JSPromise> promise = Handle<JSPromise>::cast(maybe_promise);
if (isolate->debug()->is_active()) isolate->PopPromise();
- if (promise->IsJSPromise()) {
- isolate->RunPromiseHook(PromiseHookType::kAfter, promise,
- isolate->factory()->undefined_value());
- }
+ isolate->RunPromiseHook(PromiseHookType::kAfter, promise,
+ isolate->factory()->undefined_value());
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/chromium/v8/src/runtime/runtime-scopes.cc b/chromium/v8/src/runtime/runtime-scopes.cc
index 4b1f6f2231f..b78ca1d5340 100644
--- a/chromium/v8/src/runtime/runtime-scopes.cc
+++ b/chromium/v8/src/runtime/runtime-scopes.cc
@@ -18,6 +18,8 @@
#include "src/objects/module-inl.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime-utils.h"
+#include "torque-generated/exported-class-definitions-tq-inl.h"
+#include "torque-generated/exported-class-definitions-tq.h"
namespace v8 {
namespace internal {
@@ -408,20 +410,19 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
if (argument_count > 0) {
if (parameter_count > 0) {
int mapped_count = Min(argument_count, parameter_count);
- Handle<FixedArray> parameter_map = isolate->factory()->NewFixedArray(
- mapped_count + 2, AllocationType::kYoung);
- parameter_map->set_map(
- ReadOnlyRoots(isolate).sloppy_arguments_elements_map());
- result->set_map(isolate->native_context()->fast_aliased_arguments_map());
- result->set_elements(*parameter_map);
// Store the context and the arguments array at the beginning of the
// parameter map.
Handle<Context> context(isolate->context(), isolate);
Handle<FixedArray> arguments = isolate->factory()->NewFixedArray(
argument_count, AllocationType::kYoung);
- parameter_map->set(0, *context);
- parameter_map->set(1, *arguments);
+
+ Handle<SloppyArgumentsElements> parameter_map =
+ isolate->factory()->NewSloppyArgumentsElements(
+ mapped_count, context, arguments, AllocationType::kYoung);
+
+ result->set_map(isolate->native_context()->fast_aliased_arguments_map());
+ result->set_elements(*parameter_map);
// Loop over the actual parameters backwards.
int index = argument_count - 1;
@@ -438,7 +439,8 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
// arguments object.
for (int i = 0; i < mapped_count; i++) {
arguments->set(i, parameters[i]);
- parameter_map->set_the_hole(i + 2);
+ parameter_map->set_mapped_entries(
+ i, *isolate->factory()->the_hole_value());
}
// Walk all context slots to find context allocated parameters. Mark each
@@ -449,7 +451,7 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
if (parameter >= mapped_count) continue;
arguments->set_the_hole(parameter);
Smi slot = Smi::FromInt(scope_info->ContextHeaderLength() + i);
- parameter_map->set(parameter + 2, slot);
+ parameter_map->set_mapped_entries(parameter, slot);
}
} else {
// If there is no aliasing, the arguments object elements are not
@@ -610,40 +612,35 @@ RUNTIME_FUNCTION(Runtime_NewFunctionContext) {
return *isolate->factory()->NewFunctionContext(outer, scope_info);
}
+// TODO(jgruber): Rename these functions to 'New...Context'.
RUNTIME_FUNCTION(Runtime_PushWithContext) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, extension_object, 0);
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
Handle<Context> current(isolate->context(), isolate);
- Handle<Context> context =
- isolate->factory()->NewWithContext(current, scope_info, extension_object);
- isolate->set_context(*context);
- return *context;
+ return *isolate->factory()->NewWithContext(current, scope_info,
+ extension_object);
}
+// TODO(jgruber): Rename these functions to 'New...Context'.
RUNTIME_FUNCTION(Runtime_PushCatchContext) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, thrown_object, 0);
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
Handle<Context> current(isolate->context(), isolate);
- Handle<Context> context =
- isolate->factory()->NewCatchContext(current, scope_info, thrown_object);
- isolate->set_context(*context);
- return *context;
+ return *isolate->factory()->NewCatchContext(current, scope_info,
+ thrown_object);
}
-
+// TODO(jgruber): Rename these functions to 'New...Context'.
RUNTIME_FUNCTION(Runtime_PushBlockContext) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 0);
Handle<Context> current(isolate->context(), isolate);
- Handle<Context> context =
- isolate->factory()->NewBlockContext(current, scope_info);
- isolate->set_context(*context);
- return *context;
+ return *isolate->factory()->NewBlockContext(current, scope_info);
}
diff --git a/chromium/v8/src/runtime/runtime-test.cc b/chromium/v8/src/runtime/runtime-test.cc
index db804490f4c..63a4ae35653 100644
--- a/chromium/v8/src/runtime/runtime-test.cc
+++ b/chromium/v8/src/runtime/runtime-test.cc
@@ -323,7 +323,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
function->set_code(*BUILTIN_CODE(isolate, InterpreterEntryTrampoline));
}
- JSFunction::EnsureFeedbackVector(function);
+ JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
function->MarkForOptimization(concurrency_mode);
return ReadOnlyRoots(isolate).undefined_value();
@@ -353,7 +353,7 @@ bool EnsureFeedbackVector(Handle<JSFunction> function) {
// Ensure function has a feedback vector to hold type feedback for
// optimization.
- JSFunction::EnsureFeedbackVector(function);
+ JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
return true;
}
@@ -369,8 +369,9 @@ RUNTIME_FUNCTION(Runtime_EnsureFeedbackVectorForFunction) {
RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1 || args.length() == 2);
- if (!args[0].IsJSFunction()) return CrashUnlessFuzzing(isolate);
+ if ((args.length() != 1 && args.length() != 2) || !args[0].IsJSFunction()) {
+ return CrashUnlessFuzzing(isolate);
+ }
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
bool allow_heuristic_optimization = false;
@@ -457,7 +458,8 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
function->ShortPrint(scope.file());
PrintF(scope.file(), " for non-concurrent optimization]\n");
}
- JSFunction::EnsureFeedbackVector(function);
+ IsCompiledScope is_compiled_scope(function->shared().is_compiled_scope());
+ JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
// Make the profiler arm all back edges in unoptimized code.
@@ -752,38 +754,21 @@ RUNTIME_FUNCTION(Runtime_DebugPrint) {
bool weak = maybe_object.IsWeak();
#ifdef OBJECT_PRINT
- if (object.IsString() && !isolate->context().is_null()) {
- DCHECK(!weak);
- // If we have a string, assume it's a code "marker"
- // and print some interesting cpu debugging info.
- object.Print(os);
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- os << "fp = " << reinterpret_cast<void*>(frame->fp())
- << ", sp = " << reinterpret_cast<void*>(frame->sp())
- << ", caller_sp = " << reinterpret_cast<void*>(frame->caller_sp())
- << ": ";
- } else {
- os << "DebugPrint: ";
- if (weak) {
- os << "[weak] ";
- }
- object.Print(os);
- }
+ os << "DebugPrint: ";
+ if (weak) os << "[weak] ";
+ object.Print(os);
if (object.IsHeapObject()) {
HeapObject::cast(object).map().Print(os);
}
#else
- if (weak) {
- os << "[weak] ";
- }
+ if (weak) os << "[weak] ";
// ShortPrint is available in release mode. Print is not.
os << Brief(object);
#endif
}
os << std::endl;
- return args[0]; // return TOS
+ return args[0];
}
RUNTIME_FUNCTION(Runtime_PrintWithNameForAssert) {
@@ -931,13 +916,12 @@ int StackSize(Isolate* isolate) {
return n;
}
-void PrintIndentation(Isolate* isolate) {
- const int nmax = 80;
- int n = StackSize(isolate);
- if (n <= nmax) {
- PrintF("%4d:%*s", n, n, "");
+void PrintIndentation(int stack_size) {
+ const int max_display = 80;
+ if (stack_size <= max_display) {
+ PrintF("%4d:%*s", stack_size, stack_size, "");
} else {
- PrintF("%4d:%*s", n, nmax, "...");
+ PrintF("%4d:%*s", stack_size, max_display, "...");
}
}
@@ -946,24 +930,126 @@ void PrintIndentation(Isolate* isolate) {
RUNTIME_FUNCTION(Runtime_TraceEnter) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- PrintIndentation(isolate);
+ PrintIndentation(StackSize(isolate));
JavaScriptFrame::PrintTop(isolate, stdout, true, false);
PrintF(" {\n");
return ReadOnlyRoots(isolate).undefined_value();
}
-
RUNTIME_FUNCTION(Runtime_TraceExit) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
- PrintIndentation(isolate);
+ PrintIndentation(StackSize(isolate));
PrintF("} -> ");
obj.ShortPrint();
PrintF("\n");
return obj; // return TOS
}
+namespace {
+
+int WasmStackSize(Isolate* isolate) {
+ // TODO(wasm): Fix this for mixed JS/Wasm stacks with both --trace and
+ // --trace-wasm.
+ int n = 0;
+ for (StackTraceFrameIterator it(isolate); !it.done(); it.Advance()) {
+ if (it.is_wasm()) n++;
+ }
+ return n;
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_WasmTraceEnter) {
+ HandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ PrintIndentation(WasmStackSize(isolate));
+
+ // Find the caller wasm frame.
+ wasm::WasmCodeRefScope wasm_code_ref_scope;
+ StackTraceFrameIterator it(isolate);
+ DCHECK(!it.done());
+ DCHECK(it.is_wasm());
+ WasmFrame* frame = WasmFrame::cast(it.frame());
+
+ // Find the function name.
+ int func_index = frame->function_index();
+ const wasm::WasmModule* module = frame->wasm_instance().module();
+ wasm::ModuleWireBytes wire_bytes =
+ wasm::ModuleWireBytes(frame->native_module()->wire_bytes());
+ wasm::WireBytesRef name_ref =
+ module->lazily_generated_names.LookupFunctionName(
+ wire_bytes, func_index, VectorOf(module->export_table));
+ wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
+
+ wasm::WasmCode* code = frame->wasm_code();
+ PrintF(code->is_liftoff() ? "~" : "*");
+
+ if (name.empty()) {
+ PrintF("wasm-function[%d] {\n", func_index);
+ } else {
+ PrintF("wasm-function[%d] \"%.*s\" {\n", func_index, name.length(),
+ name.begin());
+ }
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_WasmTraceExit) {
+ HandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Smi, value_addr_smi, 0);
+
+ PrintIndentation(WasmStackSize(isolate));
+ PrintF("}");
+
+ // Find the caller wasm frame.
+ wasm::WasmCodeRefScope wasm_code_ref_scope;
+ StackTraceFrameIterator it(isolate);
+ DCHECK(!it.done());
+ DCHECK(it.is_wasm());
+ WasmFrame* frame = WasmFrame::cast(it.frame());
+ int func_index = frame->function_index();
+ const wasm::FunctionSig* sig =
+ frame->wasm_instance().module()->functions[func_index].sig;
+
+ size_t num_returns = sig->return_count();
+ if (num_returns == 1) {
+ wasm::ValueType return_type = sig->GetReturn(0);
+ switch (return_type.kind()) {
+ case wasm::ValueType::kI32: {
+ int32_t value = ReadUnalignedValue<int32_t>(value_addr_smi.ptr());
+ PrintF(" -> %d\n", value);
+ break;
+ }
+ case wasm::ValueType::kI64: {
+ int64_t value = ReadUnalignedValue<int64_t>(value_addr_smi.ptr());
+ PrintF(" -> %" PRId64 "\n", value);
+ break;
+ }
+ case wasm::ValueType::kF32: {
+ float_t value = ReadUnalignedValue<float_t>(value_addr_smi.ptr());
+ PrintF(" -> %f\n", value);
+ break;
+ }
+ case wasm::ValueType::kF64: {
+ double_t value = ReadUnalignedValue<double_t>(value_addr_smi.ptr());
+ PrintF(" -> %f\n", value);
+ break;
+ }
+ default:
+ PrintF(" -> Unsupported type\n");
+ break;
+ }
+ } else {
+ // TODO(wasm) Handle multiple return values.
+ PrintF("\n");
+ }
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_HaveSameMap) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
@@ -1384,7 +1470,7 @@ RUNTIME_FUNCTION(Runtime_WasmTierDownModule) {
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
auto* native_module = instance->module_object().native_module();
native_module->SetTieringState(wasm::kTieredDown);
- native_module->TriggerRecompilation();
+ native_module->RecompileForTiering();
CHECK(!native_module->compilation_state()->failed());
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1395,7 +1481,7 @@ RUNTIME_FUNCTION(Runtime_WasmTierUpModule) {
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
auto* native_module = instance->module_object().native_module();
native_module->SetTieringState(wasm::kTieredUp);
- native_module->TriggerRecompilation();
+ native_module->RecompileForTiering();
CHECK(!native_module->compilation_state()->failed());
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/chromium/v8/src/runtime/runtime-wasm.cc b/chromium/v8/src/runtime/runtime-wasm.cc
index 96c88357003..2431cc12b23 100644
--- a/chromium/v8/src/runtime/runtime-wasm.cc
+++ b/chromium/v8/src/runtime/runtime-wasm.cc
@@ -209,15 +209,12 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
}
// Should be called from within a handle scope
-Handle<JSArrayBuffer> GetSharedArrayBuffer(Handle<WasmInstanceObject> instance,
- Isolate* isolate, uint32_t address) {
+Handle<JSArrayBuffer> GetArrayBuffer(Handle<WasmInstanceObject> instance,
+ Isolate* isolate, uint32_t address) {
DCHECK(instance->has_memory_object());
Handle<JSArrayBuffer> array_buffer(instance->memory_object().array_buffer(),
isolate);
- // Validation should have failed if the memory was not shared.
- DCHECK(array_buffer->is_shared());
-
// Should have trapped if address was OOB
DCHECK_LT(address, array_buffer->byte_length());
return array_buffer;
@@ -231,8 +228,12 @@ RUNTIME_FUNCTION(Runtime_WasmAtomicNotify) {
CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
CONVERT_NUMBER_CHECKED(uint32_t, count, Uint32, args[2]);
Handle<JSArrayBuffer> array_buffer =
- GetSharedArrayBuffer(instance, isolate, address);
- return FutexEmulation::Wake(array_buffer, address, count);
+ GetArrayBuffer(instance, isolate, address);
+ if (array_buffer->is_shared()) {
+ return FutexEmulation::Wake(array_buffer, address, count);
+ } else {
+ return Smi::FromInt(0);
+ }
}
RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
@@ -245,7 +246,12 @@ RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
Handle<JSArrayBuffer> array_buffer =
- GetSharedArrayBuffer(instance, isolate, address);
+ GetArrayBuffer(instance, isolate, address);
+
+ // Trap if memory is not shared
+ if (!array_buffer->is_shared()) {
+ return ThrowWasmError(isolate, MessageTemplate::kAtomicsWaitNotAllowed);
+ }
return FutexEmulation::WaitWasm32(isolate, array_buffer, address,
expected_value, timeout_ns->AsInt64());
}
@@ -260,7 +266,12 @@ RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) {
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
Handle<JSArrayBuffer> array_buffer =
- GetSharedArrayBuffer(instance, isolate, address);
+ GetArrayBuffer(instance, isolate, address);
+
+ // Trap if memory is not shared
+ if (!array_buffer->is_shared()) {
+ return ThrowWasmError(isolate, MessageTemplate::kAtomicsWaitNotAllowed);
+ }
return FutexEmulation::WaitWasm64(isolate, array_buffer, address,
expected_value->AsInt64(),
timeout_ns->AsInt64());
@@ -344,6 +355,9 @@ RUNTIME_FUNCTION(Runtime_WasmTableInit) {
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_UINT32_ARG_CHECKED(table_index, 1);
CONVERT_UINT32_ARG_CHECKED(elem_segment_index, 2);
+ static_assert(
+ wasm::kV8MaxWasmTableSize < kSmiMaxValue,
+ "Make sure clamping to Smi range doesn't make an invalid call valid");
CONVERT_UINT32_ARG_CHECKED(dst, 3);
CONVERT_UINT32_ARG_CHECKED(src, 4);
CONVERT_UINT32_ARG_CHECKED(count, 5);
@@ -363,6 +377,9 @@ RUNTIME_FUNCTION(Runtime_WasmTableCopy) {
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_UINT32_ARG_CHECKED(table_dst_index, 1);
CONVERT_UINT32_ARG_CHECKED(table_src_index, 2);
+ static_assert(
+ wasm::kV8MaxWasmTableSize < kSmiMaxValue,
+ "Make sure clamping to Smi range doesn't make an invalid call valid");
CONVERT_UINT32_ARG_CHECKED(dst, 3);
CONVERT_UINT32_ARG_CHECKED(src, 4);
CONVERT_UINT32_ARG_CHECKED(count, 5);
@@ -440,14 +457,13 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
// Enter the debugger.
DebugScope debug_scope(isolate->debug());
- const auto undefined = ReadOnlyRoots(isolate).undefined_value();
WasmFrame* frame = frame_finder.frame();
auto* debug_info = frame->native_module()->GetDebugInfo();
if (debug_info->IsStepping(frame)) {
- debug_info->ClearStepping();
+ debug_info->ClearStepping(isolate);
isolate->debug()->ClearStepping();
isolate->debug()->OnDebugBreak(isolate->factory()->empty_fixed_array());
- return undefined;
+ return ReadOnlyRoots(isolate).undefined_value();
}
// Check whether we hit a breakpoint.
@@ -455,7 +471,7 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
Handle<FixedArray> breakpoints;
if (WasmScript::CheckBreakPoints(isolate, script, position)
.ToHandle(&breakpoints)) {
- debug_info->ClearStepping();
+ debug_info->ClearStepping(isolate);
isolate->debug()->ClearStepping();
if (isolate->debug()->break_points_active()) {
// We hit one or several breakpoints. Notify the debug listeners.
@@ -474,7 +490,7 @@ RUNTIME_FUNCTION(Runtime_WasmDebugBreak) {
debug_info->RemoveBreakpoint(frame->function_index(), position, isolate);
}
- return undefined;
+ return ReadOnlyRoots(isolate).undefined_value();
}
} // namespace internal
diff --git a/chromium/v8/src/runtime/runtime.cc b/chromium/v8/src/runtime/runtime.cc
index bd6853de8e8..63be622b0df 100644
--- a/chromium/v8/src/runtime/runtime.cc
+++ b/chromium/v8/src/runtime/runtime.cc
@@ -192,10 +192,10 @@ bool Runtime::MayAllocate(FunctionId id) {
}
}
-bool Runtime::IsWhitelistedForFuzzing(FunctionId id) {
- CHECK(FLAG_allow_natives_for_fuzzing);
+bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
+ CHECK(FLAG_fuzzing);
switch (id) {
- // Runtime functions whitelisted for all fuzzers. Only add functions that
+ // Runtime functions allowlisted for all fuzzers. Only add functions that
// help increase coverage.
case Runtime::kArrayBufferDetach:
case Runtime::kDeoptimizeFunction:
diff --git a/chromium/v8/src/runtime/runtime.h b/chromium/v8/src/runtime/runtime.h
index 8f8903d9656..75f9c39bc13 100644
--- a/chromium/v8/src/runtime/runtime.h
+++ b/chromium/v8/src/runtime/runtime.h
@@ -329,6 +329,7 @@ namespace internal {
F(SetDataProperties, 2, 1) \
F(SetKeyedProperty, 3, 1) \
F(SetNamedProperty, 3, 1) \
+ F(SetOwnPropertyIgnoreAttributes, 4, 1) \
F(StoreDataPropertyInLiteral, 3, 1) \
F(ShrinkPropertyDictionary, 1, 1) \
F(ToFastProperties, 1, 1) \
@@ -542,6 +543,8 @@ namespace internal {
F(WasmTierDownModule, 1, 1) \
F(WasmTierUpFunction, 2, 1) \
F(WasmTierUpModule, 1, 1) \
+ F(WasmTraceEnter, 0, 1) \
+ F(WasmTraceExit, 1, 1) \
F(WasmTraceMemory, 1, 1) \
I(DeoptimizeNow, 0, 1)
@@ -718,9 +721,9 @@ class Runtime : public AllStatic {
// allocation.
static bool MayAllocate(FunctionId id);
- // Check if a runtime function with the given {id} is whitelisted for
+ // Check if a runtime function with the given {id} is allowlisted for
// using it with fuzzers.
- static bool IsWhitelistedForFuzzing(FunctionId id);
+ static bool IsAllowListedForFuzzing(FunctionId id);
// Get the intrinsic function with the given name.
static const Function* FunctionForName(const unsigned char* name, int length);
diff --git a/chromium/v8/src/snapshot/code-serializer.cc b/chromium/v8/src/snapshot/code-serializer.cc
index f9093012b27..f4cf0b07072 100644
--- a/chromium/v8/src/snapshot/code-serializer.cc
+++ b/chromium/v8/src/snapshot/code-serializer.cc
@@ -4,9 +4,12 @@
#include "src/snapshot/code-serializer.h"
+#include "src/base/platform/platform.h"
#include "src/codegen/macro-assembler.h"
+#include "src/common/globals.h"
#include "src/debug/debug.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/off-thread-factory-inl.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/objects/objects-inl.h"
@@ -104,14 +107,14 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
// create a back reference that encodes the page number as the chunk_index and
// the offset within the page as the chunk_offset.
Address address = obj.address();
- Page* page = Page::FromAddress(address);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(address);
uint32_t chunk_index = 0;
ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
- for (Page* p : *read_only_space) {
- if (p == page) break;
+ for (ReadOnlyPage* page : read_only_space->pages()) {
+ if (chunk == page) break;
++chunk_index;
}
- uint32_t chunk_offset = static_cast<uint32_t>(page->Offset(address));
+ uint32_t chunk_offset = static_cast<uint32_t>(chunk->Offset(address));
SerializerReference back_reference = SerializerReference::BackReference(
SnapshotSpace::kReadOnlyHeap, chunk_index, chunk_offset);
reference_map()->Add(reinterpret_cast<void*>(obj.ptr()), back_reference);
@@ -259,6 +262,39 @@ void CreateInterpreterDataForDeserializedCode(Isolate* isolate,
}
#endif // V8_TARGET_ARCH_ARM
+namespace {
+class StressOffThreadDeserializeThread final : public base::Thread {
+ public:
+ explicit StressOffThreadDeserializeThread(
+ OffThreadIsolate* off_thread_isolate, const SerializedCodeData* scd)
+ : Thread(
+ base::Thread::Options("StressOffThreadDeserializeThread", 2 * MB)),
+ off_thread_isolate_(off_thread_isolate),
+ scd_(scd) {}
+
+ MaybeHandle<SharedFunctionInfo> maybe_result() const {
+ return maybe_result_.ToHandle();
+ }
+
+ void Run() final {
+ off_thread_isolate_->PinToCurrentThread();
+
+ MaybeHandle<SharedFunctionInfo> off_thread_maybe_result =
+ ObjectDeserializer::DeserializeSharedFunctionInfoOffThread(
+ off_thread_isolate_, scd_,
+ off_thread_isolate_->factory()->empty_string());
+
+ maybe_result_ =
+ off_thread_isolate_->TransferHandle(off_thread_maybe_result);
+ }
+
+ private:
+ OffThreadIsolate* off_thread_isolate_;
+ const SerializedCodeData* scd_;
+ OffThreadTransferMaybeHandle<SharedFunctionInfo> maybe_result_;
+};
+} // namespace
+
MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
Isolate* isolate, ScriptData* cached_data, Handle<String> source,
ScriptOriginOptions origin_options) {
@@ -281,8 +317,29 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
}
// Deserialize.
- MaybeHandle<SharedFunctionInfo> maybe_result =
- ObjectDeserializer::DeserializeSharedFunctionInfo(isolate, &scd, source);
+ MaybeHandle<SharedFunctionInfo> maybe_result;
+ if (FLAG_stress_background_compile) {
+ Zone zone(isolate->allocator(), "Deserialize");
+ OffThreadIsolate off_thread_isolate(isolate, &zone);
+
+ StressOffThreadDeserializeThread thread(&off_thread_isolate, &scd);
+ CHECK(thread.Start());
+ thread.Join();
+
+ off_thread_isolate.FinishOffThread();
+ off_thread_isolate.Publish(isolate);
+
+ maybe_result = thread.maybe_result();
+
+ // Fix-up result script source.
+ Handle<SharedFunctionInfo> result;
+ if (maybe_result.ToHandle(&result)) {
+ Script::cast(result->script()).set_source(*source);
+ }
+ } else {
+ maybe_result = ObjectDeserializer::DeserializeSharedFunctionInfo(
+ isolate, &scd, source);
+ }
Handle<SharedFunctionInfo> result;
if (!maybe_result.ToHandle(&result)) {
@@ -356,7 +413,6 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
return scope.CloseAndEscape(result);
}
-
SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
const CodeSerializer* cs) {
DisallowHeapAllocation no_gc;
diff --git a/chromium/v8/src/snapshot/context-deserializer.cc b/chromium/v8/src/snapshot/context-deserializer.cc
index 2a3d77646a9..b6b6da54f91 100644
--- a/chromium/v8/src/snapshot/context-deserializer.cc
+++ b/chromium/v8/src/snapshot/context-deserializer.cc
@@ -59,12 +59,12 @@ MaybeHandle<Object> ContextDeserializer::Deserialize(
// new code, which also has to be flushed from instruction cache.
CHECK_EQ(start_address, code_space->top());
- if (FLAG_rehash_snapshot && can_rehash()) Rehash();
LogNewMapEvents();
result = handle(root, isolate);
}
+ if (FLAG_rehash_snapshot && can_rehash()) Rehash();
SetupOffHeapArrayBufferBackingStores();
return result;
diff --git a/chromium/v8/src/snapshot/deserializer-allocator.cc b/chromium/v8/src/snapshot/deserializer-allocator.cc
index a3d3eca7114..c1eaaed2960 100644
--- a/chromium/v8/src/snapshot/deserializer-allocator.cc
+++ b/chromium/v8/src/snapshot/deserializer-allocator.cc
@@ -6,10 +6,17 @@
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/heap/memory-chunk.h"
+#include "src/roots/roots.h"
namespace v8 {
namespace internal {
+void DeserializerAllocator::Initialize(LocalHeapWrapper heap) {
+ heap_ = heap;
+ roots_ = heap.is_off_thread() ? ReadOnlyRoots(heap.off_thread())
+ : ReadOnlyRoots(heap.main_thread());
+}
+
// We know the space requirements before deserialization and can
// pre-allocate that reserved space. During deserialization, all we need
// to do is to bump up the pointer for each space in the reserved
@@ -24,12 +31,18 @@ namespace internal {
Address DeserializerAllocator::AllocateRaw(SnapshotSpace space, int size) {
const int space_number = static_cast<int>(space);
if (space == SnapshotSpace::kLargeObject) {
- AlwaysAllocateScope scope(heap_);
// Note that we currently do not support deserialization of large code
// objects.
- OldLargeObjectSpace* lo_space = heap_->lo_space();
- AllocationResult result = lo_space->AllocateRaw(size);
- HeapObject obj = result.ToObjectChecked();
+ HeapObject obj;
+ if (heap_.is_off_thread()) {
+ obj = heap_.off_thread()->lo_space_.AllocateRaw(size).ToObjectChecked();
+ } else {
+ Heap* heap = heap_.main_thread();
+ AlwaysAllocateScope scope(heap);
+ OldLargeObjectSpace* lo_space = heap->lo_space();
+ AllocationResult result = lo_space->AllocateRaw(size);
+ obj = result.ToObjectChecked();
+ }
deserialized_large_objects_.push_back(obj);
return obj.address();
} else if (space == SnapshotSpace::kMap) {
@@ -82,11 +95,10 @@ Address DeserializerAllocator::Allocate(SnapshotSpace space, int size) {
// If one of the following assertions fails, then we are deserializing an
// aligned object when the filler maps have not been deserialized yet.
// We require filler maps as padding to align the object.
- DCHECK(ReadOnlyRoots(heap_).free_space_map().IsMap());
- DCHECK(ReadOnlyRoots(heap_).one_pointer_filler_map().IsMap());
- DCHECK(ReadOnlyRoots(heap_).two_pointer_filler_map().IsMap());
- obj = Heap::AlignWithFiller(ReadOnlyRoots(heap_), obj, size, reserved,
- next_alignment_);
+ DCHECK(roots_.free_space_map().IsMap());
+ DCHECK(roots_.one_pointer_filler_map().IsMap());
+ DCHECK(roots_.two_pointer_filler_map().IsMap());
+ obj = Heap::AlignWithFiller(roots_, obj, size, reserved, next_alignment_);
address = obj.address();
next_alignment_ = kWordAligned;
return address;
@@ -109,6 +121,7 @@ void DeserializerAllocator::MoveToNextChunk(SnapshotSpace space) {
}
HeapObject DeserializerAllocator::GetMap(uint32_t index) {
+ DCHECK(!heap_.is_off_thread());
DCHECK_LT(index, next_map_index_);
return HeapObject::FromAddress(allocated_maps_[index]);
}
@@ -156,10 +169,16 @@ bool DeserializerAllocator::ReserveSpace() {
}
#endif // DEBUG
DCHECK(allocated_maps_.empty());
- // TODO(v8:7464): Allocate using the off-heap ReadOnlySpace here once
- // implemented.
- if (!heap_->ReserveSpace(reservations_, &allocated_maps_)) {
- return false;
+ if (heap_.is_off_thread()) {
+ if (!heap_.off_thread()->ReserveSpace(reservations_)) {
+ return false;
+ }
+ } else {
+ // TODO(v8:7464): Allocate using the off-heap ReadOnlySpace here once
+ // implemented.
+ if (!heap_.main_thread()->ReserveSpace(reservations_, &allocated_maps_)) {
+ return false;
+ }
}
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
high_water_[i] = reservations_[i][0].start;
@@ -181,7 +200,8 @@ bool DeserializerAllocator::ReservationsAreFullyUsed() const {
}
void DeserializerAllocator::RegisterDeserializedObjectsForBlackAllocation() {
- heap_->RegisterDeserializedObjectsForBlackAllocation(
+ DCHECK(!heap_.is_off_thread());
+ heap_.main_thread()->RegisterDeserializedObjectsForBlackAllocation(
reservations_, deserialized_large_objects_, allocated_maps_);
}
diff --git a/chromium/v8/src/snapshot/deserializer-allocator.h b/chromium/v8/src/snapshot/deserializer-allocator.h
index 979e6ed2a8b..9381e1302bd 100644
--- a/chromium/v8/src/snapshot/deserializer-allocator.h
+++ b/chromium/v8/src/snapshot/deserializer-allocator.h
@@ -6,8 +6,10 @@
#define V8_SNAPSHOT_DESERIALIZER_ALLOCATOR_H_
#include "src/common/globals.h"
+#include "src/execution/local-isolate-wrapper.h"
#include "src/heap/heap.h"
#include "src/objects/heap-object.h"
+#include "src/roots/roots.h"
#include "src/snapshot/references.h"
#include "src/snapshot/snapshot-data.h"
@@ -16,12 +18,13 @@ namespace internal {
class Deserializer;
class StartupDeserializer;
+class OffThreadHeap;
class DeserializerAllocator final {
public:
DeserializerAllocator() = default;
- void Initialize(Heap* heap) { heap_ = heap; }
+ void Initialize(LocalHeapWrapper heap);
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
@@ -99,7 +102,9 @@ class DeserializerAllocator final {
// back-references.
std::vector<HeapObject> deserialized_large_objects_;
- Heap* heap_;
+ // ReadOnlyRoots and heap are null until Initialize is called.
+ LocalHeapWrapper heap_ = LocalHeapWrapper(nullptr);
+ ReadOnlyRoots roots_ = ReadOnlyRoots(static_cast<Address*>(nullptr));
DISALLOW_COPY_AND_ASSIGN(DeserializerAllocator);
};
diff --git a/chromium/v8/src/snapshot/deserializer.cc b/chromium/v8/src/snapshot/deserializer.cc
index 33e4db43931..3c4f9503f2a 100644
--- a/chromium/v8/src/snapshot/deserializer.cc
+++ b/chromium/v8/src/snapshot/deserializer.cc
@@ -8,11 +8,13 @@
#include "src/codegen/assembler-inl.h"
#include "src/common/external-pointer.h"
#include "src/execution/isolate.h"
+#include "src/execution/local-isolate-wrapper-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/interpreter/interpreter.h"
#include "src/logging/log.h"
+#include "src/logging/off-thread-logger.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/cell-inl.h"
#include "src/objects/hash-table.h"
@@ -56,22 +58,25 @@ TSlot Deserializer::WriteExternalPointer(TSlot dest, Address value) {
return dest + (kExternalPointerSize / TSlot::kSlotDataSize);
}
-void Deserializer::Initialize(Isolate* isolate) {
- DCHECK_NULL(isolate_);
- DCHECK_NOT_NULL(isolate);
- isolate_ = isolate;
- allocator()->Initialize(isolate->heap());
+void Deserializer::Initialize(LocalIsolateWrapper local_isolate) {
+ DCHECK(local_isolate_.is_null());
+ DCHECK(!local_isolate.is_null());
+ local_isolate_ = local_isolate;
+ allocator()->Initialize(local_isolate->heap());
#ifdef DEBUG
- // The read-only deserializer is run by read-only heap set-up before the heap
- // is fully set up. External reference table relies on a few parts of this
- // set-up (like old-space), so it may be uninitialized at this point.
- if (isolate->isolate_data()->external_reference_table()->is_initialized()) {
- // Count the number of external references registered through the API.
- num_api_references_ = 0;
- if (isolate_->api_external_references() != nullptr) {
- while (isolate_->api_external_references()[num_api_references_] != 0) {
- num_api_references_++;
+ num_api_references_ = 0;
+ if (!local_isolate.is_off_thread()) {
+ Isolate* isolate = local_isolate.main_thread();
+ // The read-only deserializer is run by read-only heap set-up before the
+ // heap is fully set up. External reference table relies on a few parts of
+ // this set-up (like old-space), so it may be uninitialized at this point.
+ if (isolate->isolate_data()->external_reference_table()->is_initialized()) {
+ // Count the number of external references registered through the API.
+ if (isolate->api_external_references() != nullptr) {
+ while (isolate->api_external_references()[num_api_references_] != 0) {
+ num_api_references_++;
+ }
}
}
}
@@ -82,7 +87,7 @@ void Deserializer::Initialize(Isolate* isolate) {
void Deserializer::Rehash() {
DCHECK(can_rehash() || deserializing_user_code());
for (HeapObject item : to_rehash_) {
- item.RehashBasedOnMap(ReadOnlyRoots(isolate_));
+ item.RehashBasedOnMap(local_isolate());
}
}
@@ -142,34 +147,32 @@ void Deserializer::DeserializeDeferredObjects() {
}
}
}
-}
-void Deserializer::LogNewObjectEvents() {
- {
- // {new_maps_} and {new_code_objects_} are vectors containing raw
- // pointers, hence there should be no GC happening.
- DisallowHeapAllocation no_gc;
- // Issue code events for newly deserialized code objects.
- LOG_CODE_EVENT(isolate_, LogCodeObjects());
+ // When the deserialization of maps are deferred, they will be created
+ // as filler maps, and we postpone the post processing until the maps
+ // are also deserialized.
+ for (const auto& pair : fillers_to_post_process_) {
+ DCHECK(!pair.first.IsFiller());
+ PostProcessNewObject(pair.first, pair.second);
}
- LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
- LogNewMapEvents();
}
void Deserializer::LogNewMapEvents() {
DisallowHeapAllocation no_gc;
- for (Map map : new_maps()) {
+ DCHECK(is_main_thread());
+
+ for (Map map : new_maps_) {
DCHECK(FLAG_trace_maps);
- LOG(isolate_, MapCreate(map));
- LOG(isolate_, MapDetails(map));
+ LOG(isolate(), MapCreate(map));
+ LOG(isolate(), MapDetails(map));
}
}
void Deserializer::LogScriptEvents(Script script) {
DisallowHeapAllocation no_gc;
- LOG(isolate_,
+ LOG(local_isolate(),
ScriptEvent(Logger::ScriptEventType::kDeserialize, script.id()));
- LOG(isolate_, ScriptDetails(script));
+ LOG(local_isolate(), ScriptDetails(script));
}
StringTableInsertionKey::StringTableInsertionKey(String string)
@@ -213,7 +216,11 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
DisallowHeapAllocation no_gc;
if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
- if (obj.IsString()) {
+ if (obj.IsFiller()) {
+ DCHECK_EQ(fillers_to_post_process_.find(obj),
+ fillers_to_post_process_.end());
+ fillers_to_post_process_.insert({obj, space});
+ } else if (obj.IsString()) {
// Uninitialize hash field as we need to recompute the hash.
String string = String::cast(obj);
string.set_hash_field(String::kEmptyHashField);
@@ -231,18 +238,22 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
if (obj.IsString()) {
String string = String::cast(obj);
if (string.IsInternalizedString()) {
+ // Off-thread internalized strings are canonicalized during off-thread
+ // isolate publish, so we don't have to canonicalize them here.
+ if (local_isolate().is_off_thread()) return string;
+
// Canonicalize the internalized string. If it already exists in the
// string table, set it to forward to the existing one.
StringTableInsertionKey key(string);
- String canonical = ForwardStringIfExists(isolate_, &key);
+ String canonical = ForwardStringIfExists(isolate(), &key);
if (!canonical.is_null()) return canonical;
- new_internalized_strings_.push_back(handle(string, isolate_));
+ new_internalized_strings_.push_back(handle(string, isolate()));
return string;
}
} else if (obj.IsScript()) {
- new_scripts_.push_back(handle(Script::cast(obj), isolate_));
+ new_scripts_.push_back(handle(Script::cast(obj), local_isolate()));
} else if (obj.IsAllocationSite()) {
// We should link new allocation sites, but we can't do this immediately
// because |AllocationSite::HasWeakNext()| internally accesses
@@ -278,11 +289,11 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
ExternalString string = ExternalString::cast(obj);
uint32_t index = string.resource_as_uint32();
Address address =
- static_cast<Address>(isolate_->api_external_references()[index]);
- string.set_address_as_resource(isolate_, address);
- isolate_->heap()->UpdateExternalString(string, 0,
- string.ExternalPayloadSize());
- isolate_->heap()->RegisterExternalString(String::cast(obj));
+ static_cast<Address>(isolate()->api_external_references()[index]);
+ string.set_address_as_resource(isolate(), address);
+ isolate()->heap()->UpdateExternalString(string, 0,
+ string.ExternalPayloadSize());
+ isolate()->heap()->RegisterExternalString(String::cast(obj));
} else if (obj.IsJSDataView()) {
JSDataView data_view = JSDataView::cast(obj);
JSArrayBuffer buffer = JSArrayBuffer::cast(data_view.buffer());
@@ -295,7 +306,7 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
backing_store = backing_stores_[store_index]->buffer_start();
}
data_view.set_data_pointer(
- isolate_,
+ isolate(),
reinterpret_cast<uint8_t*>(backing_store) + data_view.byte_offset());
} else if (obj.IsJSTypedArray()) {
JSTypedArray typed_array = JSTypedArray::cast(obj);
@@ -319,7 +330,7 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
JSArrayBuffer buffer = JSArrayBuffer::cast(obj);
// Postpone allocation of backing store to avoid triggering the GC.
if (buffer.backing_store() != nullptr) {
- new_off_heap_array_buffers_.push_back(handle(buffer, isolate_));
+ new_off_heap_array_buffers_.push_back(handle(buffer, local_isolate()));
}
} else if (obj.IsBytecodeArray()) {
// TODO(mythria): Remove these once we store the default values for these
@@ -352,12 +363,10 @@ HeapObject Deserializer::GetBackReferencedObject(SnapshotSpace space) {
case SnapshotSpace::kReadOnlyHeap: {
uint32_t chunk_index = source_.GetInt();
uint32_t chunk_offset = source_.GetInt();
- if (isolate()->heap()->deserialization_complete()) {
- PagedSpace* read_only_space = isolate()->heap()->read_only_space();
- Page* page = read_only_space->first_page();
- for (uint32_t i = 0; i < chunk_index; ++i) {
- page = page->next_page();
- }
+ if (is_off_thread() || isolate()->heap()->deserialization_complete()) {
+ ReadOnlySpace* read_only_space =
+ local_isolate()->heap()->read_only_space();
+ ReadOnlyPage* page = read_only_space->pages()[chunk_index];
Address address = page->OffsetToAddress(chunk_offset);
obj = HeapObject::FromAddress(address);
} else {
@@ -401,7 +410,7 @@ HeapObject Deserializer::ReadObject(SnapshotSpace space) {
Address address = allocator()->Allocate(space, size);
HeapObject obj = HeapObject::FromAddress(address);
- isolate_->heap()->OnAllocationEvent(obj, size);
+ local_isolate()->heap()->OnAllocationEvent(obj, size);
MaybeObjectSlot current(address);
MaybeObjectSlot limit(address + size);
@@ -449,7 +458,7 @@ void Deserializer::VisitCodeTarget(Code host, RelocInfo* rinfo) {
void Deserializer::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
HeapObject object = ReadObject();
// Embedded object reference must be a strong one.
- rinfo->set_target_object(isolate_->heap(), object);
+ rinfo->set_target_object(isolate()->heap(), object);
}
void Deserializer::VisitRuntimeEntry(Code host, RelocInfo* rinfo) {
@@ -492,7 +501,7 @@ void Deserializer::VisitOffHeapTarget(Code host, RelocInfo* rinfo) {
int builtin_index = source_.GetInt();
DCHECK(Builtins::IsBuiltinId(builtin_index));
- CHECK_NOT_NULL(isolate_->embedded_blob());
+ CHECK_NOT_NULL(isolate()->embedded_blob());
EmbeddedData d = EmbeddedData::FromBlob();
Address address = d.InstructionStartOfBuiltin(builtin_index);
CHECK_NE(kNullAddress, address);
@@ -533,7 +542,6 @@ template <typename TSlot>
bool Deserializer::ReadData(TSlot current, TSlot limit,
SnapshotSpace source_space,
Address current_object_address) {
- Isolate* const isolate = isolate_;
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
// but that may change.
@@ -548,9 +556,9 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
case bytecode + static_cast<int>(snapshot_space): \
STATIC_ASSERT((static_cast<int>(snapshot_space) & ~kSpaceMask) == 0);
-#define CASE_BODY(bytecode, space_number_if_any) \
- current = ReadDataCase<TSlot, bytecode, space_number_if_any>( \
- isolate, current, current_object_address, data, write_barrier_needed); \
+#define CASE_BODY(bytecode, space_number_if_any) \
+ current = ReadDataCase<TSlot, bytecode, space_number_if_any>( \
+ current, current_object_address, data, write_barrier_needed); \
break;
// This generates a case and a body for the new space (which has to do extra
@@ -689,11 +697,11 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
}
case kOffHeapBackingStore: {
- AlwaysAllocateScope scope(isolate->heap());
+ AlwaysAllocateScope scope(isolate()->heap());
int byte_length = source_.GetInt();
- std::unique_ptr<BackingStore> backing_store =
- BackingStore::Allocate(isolate, byte_length, SharedFlag::kNotShared,
- InitializedFlag::kUninitialized);
+ std::unique_ptr<BackingStore> backing_store = BackingStore::Allocate(
+ isolate(), byte_length, SharedFlag::kNotShared,
+ InitializedFlag::kUninitialized);
CHECK_NOT_NULL(backing_store);
source_.CopyRaw(backing_store->buffer_start(), byte_length);
backing_stores_.push_back(std::move(backing_store));
@@ -704,12 +712,14 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
case kApiReference: {
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
Address address;
- if (isolate->api_external_references()) {
+ if (isolate()->api_external_references()) {
DCHECK_WITH_MSG(
reference_id < num_api_references_,
"too few external references provided through the API");
address = static_cast<Address>(
- isolate->api_external_references()[reference_id]);
+ local_isolate()
+ .main_thread()
+ ->api_external_references()[reference_id]);
} else {
address = reinterpret_cast<Address>(NoExternalReferencesCallback);
}
@@ -723,7 +733,8 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
}
case kClearedWeakReference:
- current = Write(current, HeapObjectReference::ClearedValue(isolate_));
+ current =
+ Write(current, HeapObjectReference::ClearedValue(local_isolate()));
break;
case kWeakPrefix:
@@ -750,7 +761,8 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
SIXTEEN_CASES(kRootArrayConstants + 16) {
int id = data & kRootArrayConstantsMask;
RootIndex root_index = static_cast<RootIndex>(id);
- MaybeObject object = MaybeObject::FromObject(isolate->root(root_index));
+ MaybeObject object =
+ MaybeObject(ReadOnlyRoots(local_isolate()).at(root_index));
DCHECK(!Heap::InYoungGeneration(object));
current = Write(current, object);
break;
@@ -819,14 +831,13 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
Address Deserializer::ReadExternalReferenceCase() {
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
- return isolate_->external_reference_table()->address(reference_id);
+ return isolate()->external_reference_table()->address(reference_id);
}
template <typename TSlot, SerializerDeserializer::Bytecode bytecode,
SnapshotSpace space_number_if_any>
-TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
- Address current_object_address, byte data,
- bool write_barrier_needed) {
+TSlot Deserializer::ReadDataCase(TSlot current, Address current_object_address,
+ byte data, bool write_barrier_needed) {
bool emit_write_barrier = false;
SnapshotSpace space = static_cast<SnapshotSpace>(
space_number_if_any == kAnyOldSpace
@@ -847,19 +858,20 @@ TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
} else if (bytecode == kRootArray) {
int id = source_.GetInt();
RootIndex root_index = static_cast<RootIndex>(id);
- heap_object = HeapObject::cast(isolate->root(root_index));
+ heap_object = HeapObject::cast(local_isolate()->root(root_index));
emit_write_barrier = Heap::InYoungGeneration(heap_object);
hot_objects_.Add(heap_object);
} else if (bytecode == kReadOnlyObjectCache) {
int cache_index = source_.GetInt();
heap_object = HeapObject::cast(
- isolate->read_only_heap()->cached_read_only_object(cache_index));
+ local_isolate()->read_only_heap()->cached_read_only_object(
+ cache_index));
DCHECK(!Heap::InYoungGeneration(heap_object));
emit_write_barrier = false;
} else if (bytecode == kStartupObjectCache) {
int cache_index = source_.GetInt();
heap_object =
- HeapObject::cast(isolate->startup_object_cache()->at(cache_index));
+ HeapObject::cast(isolate()->startup_object_cache()->at(cache_index));
emit_write_barrier = Heap::InYoungGeneration(heap_object);
} else {
DCHECK_EQ(bytecode, kAttachedReference);
@@ -876,7 +888,7 @@ TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
if (emit_write_barrier && write_barrier_needed) {
DCHECK_IMPLIES(FLAG_disable_write_barriers, !write_barrier_needed);
HeapObject host_object = HeapObject::FromAddress(current_object_address);
- SLOW_DCHECK(isolate->heap()->Contains(host_object));
+ SLOW_DCHECK(local_isolate()->heap()->Contains(host_object));
GenerationalBarrier(host_object, MaybeObjectSlot(current.address()),
heap_object_ref);
}
diff --git a/chromium/v8/src/snapshot/deserializer.h b/chromium/v8/src/snapshot/deserializer.h
index 3af3eca5910..344db431a18 100644
--- a/chromium/v8/src/snapshot/deserializer.h
+++ b/chromium/v8/src/snapshot/deserializer.h
@@ -8,6 +8,7 @@
#include <utility>
#include <vector>
+#include "src/execution/local-isolate-wrapper.h"
#include "src/objects/allocation-site.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/backing-store.h"
@@ -47,7 +48,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// Create a deserializer from a snapshot byte source.
template <class Data>
Deserializer(Data* data, bool deserializing_user_code)
- : isolate_(nullptr),
+ : local_isolate_(nullptr),
source_(data->Payload()),
magic_number_(data->GetMagicNumber()),
deserializing_user_code_(deserializing_user_code),
@@ -58,7 +59,10 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
backing_stores_.push_back({});
}
- void Initialize(Isolate* isolate);
+ void Initialize(Isolate* isolate) {
+ Initialize(LocalIsolateWrapper(isolate));
+ }
+ void Initialize(LocalIsolateWrapper isolate);
void DeserializeDeferredObjects();
// Create Log events for newly deserialized objects.
@@ -80,7 +84,11 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
CHECK_EQ(new_off_heap_array_buffers().size(), 0);
}
- Isolate* isolate() const { return isolate_; }
+ LocalIsolateWrapper local_isolate() const { return local_isolate_; }
+ Isolate* isolate() const { return local_isolate().main_thread(); }
+ bool is_main_thread() const { return local_isolate().is_main_thread(); }
+ bool is_off_thread() const { return local_isolate().is_off_thread(); }
+
SnapshotByteSource* source() { return &source_; }
const std::vector<AllocationSite>& new_allocation_sites() const {
return new_allocation_sites_;
@@ -117,9 +125,6 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
void Rehash();
- // Cached current isolate.
- Isolate* isolate_;
-
private:
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override;
@@ -148,9 +153,8 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// Returns the new value of {current}.
template <typename TSlot, Bytecode bytecode,
SnapshotSpace space_number_if_any>
- inline TSlot ReadDataCase(Isolate* isolate, TSlot current,
- Address current_object_address, byte data,
- bool write_barrier_needed);
+ inline TSlot ReadDataCase(TSlot current, Address current_object_address,
+ byte data, bool write_barrier_needed);
// A helper function for ReadData for reading external references.
inline Address ReadExternalReferenceCase();
@@ -175,6 +179,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// Special handling for serialized code like hooking up internalized strings.
HeapObject PostProcessNewObject(HeapObject obj, SnapshotSpace space);
+ // Cached current isolate.
+ LocalIsolateWrapper local_isolate_;
+
// Objects from the attached object descriptions in the serialized user code.
std::vector<Handle<HeapObject>> attached_objects_;
@@ -197,6 +204,11 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// TODO(6593): generalize rehashing, and remove this flag.
bool can_rehash_;
std::vector<HeapObject> to_rehash_;
+ // Store the objects whose maps are deferred and thus initialized as filler
+ // maps during deserialization, so that they can be processed later when the
+ // maps become available.
+ std::unordered_map<HeapObject, SnapshotSpace, Object::Hasher>
+ fillers_to_post_process_;
#ifdef DEBUG
uint32_t num_api_references_;
diff --git a/chromium/v8/src/snapshot/object-deserializer.cc b/chromium/v8/src/snapshot/object-deserializer.cc
index 2de08846d4c..caae792c42e 100644
--- a/chromium/v8/src/snapshot/object-deserializer.cc
+++ b/chromium/v8/src/snapshot/object-deserializer.cc
@@ -6,6 +6,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/execution/isolate.h"
+#include "src/execution/local-isolate-wrapper-inl.h"
#include "src/heap/heap-inl.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/objects.h"
@@ -26,75 +27,92 @@ ObjectDeserializer::DeserializeSharedFunctionInfo(
d.AddAttachedObject(source);
Handle<HeapObject> result;
- return d.Deserialize(isolate).ToHandle(&result)
+ return d.Deserialize(LocalIsolateWrapper(isolate)).ToHandle(&result)
? Handle<SharedFunctionInfo>::cast(result)
: MaybeHandle<SharedFunctionInfo>();
}
-MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(Isolate* isolate) {
- Initialize(isolate);
+MaybeHandle<SharedFunctionInfo>
+ObjectDeserializer::DeserializeSharedFunctionInfoOffThread(
+ OffThreadIsolate* isolate, const SerializedCodeData* data,
+ Handle<String> source) {
+ DCHECK(ReadOnlyHeap::Contains(*source) || Heap::InOffThreadSpace(*source));
+
+ ObjectDeserializer d(data);
+
+ d.AddAttachedObject(source);
+
+ Handle<HeapObject> result;
+ return d.Deserialize(LocalIsolateWrapper(isolate)).ToHandle(&result)
+ ? Handle<SharedFunctionInfo>::cast(result)
+ : MaybeHandle<SharedFunctionInfo>();
+}
+
+MaybeHandle<HeapObject> ObjectDeserializer::Deserialize(
+ LocalIsolateWrapper local_isolate) {
+ Initialize(local_isolate);
if (!allocator()->ReserveSpace()) return MaybeHandle<HeapObject>();
DCHECK(deserializing_user_code());
- HandleScope scope(isolate);
+ LocalHandleScopeWrapper scope(local_isolate);
Handle<HeapObject> result;
{
DisallowHeapAllocation no_gc;
Object root;
VisitRootPointer(Root::kStartupObjectCache, nullptr, FullObjectSlot(&root));
DeserializeDeferredObjects();
- FlushICache();
- LinkAllocationSites();
- LogNewMapEvents();
- result = handle(HeapObject::cast(root), isolate);
- Rehash();
- allocator()->RegisterDeserializedObjectsForBlackAllocation();
+ CHECK(new_code_objects().empty());
+ if (is_main_thread()) {
+ LinkAllocationSites();
+ LogNewMapEvents();
+ }
+ result = handle(HeapObject::cast(root), local_isolate);
+ if (is_main_thread()) {
+ allocator()->RegisterDeserializedObjectsForBlackAllocation();
+ }
}
+
+ Rehash();
CommitPostProcessedObjects();
return scope.CloseAndEscape(result);
}
-void ObjectDeserializer::FlushICache() {
- DCHECK(deserializing_user_code());
- for (Code code : new_code_objects()) {
- // Record all references to embedded objects in the new code object.
-#ifndef V8_DISABLE_WRITE_BARRIERS
- WriteBarrierForCode(code);
-#endif
- FlushInstructionCache(code.raw_instruction_start(),
- code.raw_instruction_size());
- }
-}
-
void ObjectDeserializer::CommitPostProcessedObjects() {
- CHECK_LE(new_internalized_strings().size(), kMaxInt);
- StringTable::EnsureCapacityForDeserialization(
- isolate(), static_cast<int>(new_internalized_strings().size()));
- for (Handle<String> string : new_internalized_strings()) {
- DisallowHeapAllocation no_gc;
- StringTableInsertionKey key(*string);
- StringTable::AddKeyNoResize(isolate(), &key);
+ if (is_main_thread()) {
+ CHECK_LE(new_internalized_strings().size(), kMaxInt);
+ StringTable::EnsureCapacityForDeserialization(
+ isolate(), static_cast<int>(new_internalized_strings().size()));
+ for (Handle<String> string : new_internalized_strings()) {
+ DisallowHeapAllocation no_gc;
+ StringTableInsertionKey key(*string);
+ StringTable::AddKeyNoResize(isolate(), &key);
+ }
+
+ for (Handle<JSArrayBuffer> buffer : new_off_heap_array_buffers()) {
+ uint32_t store_index = buffer->GetBackingStoreRefForDeserialization();
+ auto bs = backing_store(store_index);
+ SharedFlag shared =
+ bs && bs->is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared;
+ buffer->Setup(shared, bs);
+ }
+ } else {
+ CHECK_EQ(new_internalized_strings().size(), 0);
+ CHECK_EQ(new_off_heap_array_buffers().size(), 0);
}
- Heap* heap = isolate()->heap();
- Factory* factory = isolate()->factory();
for (Handle<Script> script : new_scripts()) {
// Assign a new script id to avoid collision.
- script->set_id(isolate()->GetNextScriptId());
+ script->set_id(local_isolate()->GetNextScriptId());
LogScriptEvents(*script);
// Add script to list.
- Handle<WeakArrayList> list = factory->script_list();
- list = WeakArrayList::AddToEnd(isolate(), list,
- MaybeObjectHandle::Weak(script));
- heap->SetRootScriptList(*list);
- }
-
- for (Handle<JSArrayBuffer> buffer : new_off_heap_array_buffers()) {
- uint32_t store_index = buffer->GetBackingStoreRefForDeserialization();
- auto bs = backing_store(store_index);
- SharedFlag shared =
- bs && bs->is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared;
- buffer->Setup(shared, bs);
+ if (is_main_thread()) {
+ Handle<WeakArrayList> list = isolate()->factory()->script_list();
+ list = WeakArrayList::AddToEnd(isolate(), list,
+ MaybeObjectHandle::Weak(script));
+ isolate()->heap()->SetRootScriptList(*list);
+ } else {
+ local_isolate().off_thread()->heap()->AddToScriptList(script);
+ }
}
}
diff --git a/chromium/v8/src/snapshot/object-deserializer.h b/chromium/v8/src/snapshot/object-deserializer.h
index ad7fecb0213..5003ffe1602 100644
--- a/chromium/v8/src/snapshot/object-deserializer.h
+++ b/chromium/v8/src/snapshot/object-deserializer.h
@@ -18,14 +18,16 @@ class ObjectDeserializer final : public Deserializer {
public:
static MaybeHandle<SharedFunctionInfo> DeserializeSharedFunctionInfo(
Isolate* isolate, const SerializedCodeData* data, Handle<String> source);
+ static MaybeHandle<SharedFunctionInfo> DeserializeSharedFunctionInfoOffThread(
+ OffThreadIsolate* isolate, const SerializedCodeData* data,
+ Handle<String> source);
private:
explicit ObjectDeserializer(const SerializedCodeData* data);
// Deserialize an object graph. Fail gracefully.
- MaybeHandle<HeapObject> Deserialize(Isolate* isolate);
+ MaybeHandle<HeapObject> Deserialize(LocalIsolateWrapper isolate);
- void FlushICache();
void LinkAllocationSites();
void CommitPostProcessedObjects();
};
diff --git a/chromium/v8/src/snapshot/read-only-deserializer.cc b/chromium/v8/src/snapshot/read-only-deserializer.cc
index c1c96666ca6..7c8c44d9ea7 100644
--- a/chromium/v8/src/snapshot/read-only-deserializer.cc
+++ b/chromium/v8/src/snapshot/read-only-deserializer.cc
@@ -39,7 +39,7 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
ReadOnlyRoots roots(isolate);
roots.Iterate(this);
- ro_heap->read_only_space()->RepairFreeListsAfterDeserialization();
+ ro_heap->read_only_space()->RepairFreeSpacesAfterDeserialization();
// Deserialize the Read-only Object Cache.
for (size_t i = 0;; ++i) {
@@ -55,7 +55,7 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
}
if (FLAG_rehash_snapshot && can_rehash()) {
- isolate_->heap()->InitializeHashSeed();
+ isolate->heap()->InitializeHashSeed();
Rehash();
}
}
diff --git a/chromium/v8/src/snapshot/serializer-allocator.cc b/chromium/v8/src/snapshot/serializer-allocator.cc
index a709715bdda..a1bd9f43eba 100644
--- a/chromium/v8/src/snapshot/serializer-allocator.cc
+++ b/chromium/v8/src/snapshot/serializer-allocator.cc
@@ -142,7 +142,8 @@ void SerializerAllocator::OutputStatistics() {
PrintF(" Spaces (bytes):\n");
for (int space = 0; space < kNumberOfSpaces; space++) {
- PrintF("%16s", Heap::GetSpaceName(static_cast<AllocationSpace>(space)));
+ PrintF("%16s",
+ BaseSpace::GetSpaceName(static_cast<AllocationSpace>(space)));
}
PrintF("\n");
diff --git a/chromium/v8/src/snapshot/serializer.cc b/chromium/v8/src/snapshot/serializer.cc
index d443ff67a1e..a5ab4be1c01 100644
--- a/chromium/v8/src/snapshot/serializer.cc
+++ b/chromium/v8/src/snapshot/serializer.cc
@@ -53,13 +53,14 @@ void Serializer::OutputStatistics(const char* name) {
#ifdef OBJECT_PRINT
PrintF(" Instance types (count and bytes):\n");
-#define PRINT_INSTANCE_TYPE(Name) \
- for (int space = 0; space < kNumberOfSpaces; ++space) { \
- if (instance_type_count_[space][Name]) { \
- PrintF("%10d %10zu %-10s %s\n", instance_type_count_[space][Name], \
- instance_type_size_[space][Name], \
- Heap::GetSpaceName(static_cast<AllocationSpace>(space)), #Name); \
- } \
+#define PRINT_INSTANCE_TYPE(Name) \
+ for (int space = 0; space < kNumberOfSpaces; ++space) { \
+ if (instance_type_count_[space][Name]) { \
+ PrintF("%10d %10zu %-10s %s\n", instance_type_count_[space][Name], \
+ instance_type_size_[space][Name], \
+ BaseSpace::GetSpaceName(static_cast<AllocationSpace>(space)), \
+ #Name); \
+ } \
}
INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
#undef PRINT_INSTANCE_TYPE
@@ -540,7 +541,7 @@ void Serializer::ObjectSerializer::Serialize() {
if (object_.IsScript()) {
// Clear cached line ends.
- Object undefined = ReadOnlyRoots(serializer_->isolate()).undefined_value();
+ Oddball undefined = ReadOnlyRoots(serializer_->isolate()).undefined_value();
Script::cast(object_).set_line_ends(undefined);
}
diff --git a/chromium/v8/src/snapshot/snapshot-utils.cc b/chromium/v8/src/snapshot/snapshot-utils.cc
index 88e8e794c2f..eb2372372c9 100644
--- a/chromium/v8/src/snapshot/snapshot-utils.cc
+++ b/chromium/v8/src/snapshot/snapshot-utils.cc
@@ -17,7 +17,7 @@ uint32_t Checksum(Vector<const byte> payload) {
MSAN_MEMORY_IS_INITIALIZED(payload.begin(), payload.length());
#endif // MEMORY_SANITIZER
// Priming the adler32 call so it can see what CPU features are available.
- adler32(0, NULL, 0);
+ adler32(0, nullptr, 0);
return static_cast<uint32_t>(adler32(0, payload.begin(), payload.length()));
}
diff --git a/chromium/v8/src/snapshot/snapshot.cc b/chromium/v8/src/snapshot/snapshot.cc
index 6c129a846a0..fd0866619c1 100644
--- a/chromium/v8/src/snapshot/snapshot.cc
+++ b/chromium/v8/src/snapshot/snapshot.cc
@@ -128,6 +128,17 @@ bool Snapshot::HasContextSnapshot(Isolate* isolate, size_t index) {
return index < num_contexts;
}
+bool Snapshot::VersionIsValid(const v8::StartupData* data) {
+ char version[SnapshotImpl::kVersionStringLength];
+ memset(version, 0, SnapshotImpl::kVersionStringLength);
+ CHECK_LT(
+ SnapshotImpl::kVersionStringOffset + SnapshotImpl::kVersionStringLength,
+ static_cast<uint32_t>(data->raw_size));
+ Version::GetString(Vector<char>(version, SnapshotImpl::kVersionStringLength));
+ return strncmp(version, data->data + SnapshotImpl::kVersionStringOffset,
+ SnapshotImpl::kVersionStringLength) == 0;
+}
+
bool Snapshot::Initialize(Isolate* isolate) {
if (!isolate->snapshot_available()) return false;
RuntimeCallTimerScope rcs_timer(isolate,
@@ -600,13 +611,12 @@ Vector<const byte> SnapshotImpl::ExtractContextData(const v8::StartupData* data,
}
void SnapshotImpl::CheckVersion(const v8::StartupData* data) {
- char version[kVersionStringLength];
- memset(version, 0, kVersionStringLength);
- CHECK_LT(kVersionStringOffset + kVersionStringLength,
- static_cast<uint32_t>(data->raw_size));
- Version::GetString(Vector<char>(version, kVersionStringLength));
- if (strncmp(version, data->data + kVersionStringOffset,
- kVersionStringLength) != 0) {
+ if (!Snapshot::VersionIsValid(data)) {
+ char version[kVersionStringLength];
+ memset(version, 0, kVersionStringLength);
+ CHECK_LT(kVersionStringOffset + kVersionStringLength,
+ static_cast<uint32_t>(data->raw_size));
+ Version::GetString(Vector<char>(version, kVersionStringLength));
FATAL(
"Version mismatch between V8 binary and snapshot.\n"
"# V8 binary version: %.*s\n"
diff --git a/chromium/v8/src/snapshot/snapshot.h b/chromium/v8/src/snapshot/snapshot.h
index e0ea02681cc..016e51799b9 100644
--- a/chromium/v8/src/snapshot/snapshot.h
+++ b/chromium/v8/src/snapshot/snapshot.h
@@ -91,6 +91,7 @@ class Snapshot : public AllStatic {
static bool EmbedsScript(Isolate* isolate);
V8_EXPORT_PRIVATE static bool VerifyChecksum(const v8::StartupData* data);
static bool ExtractRehashability(const v8::StartupData* data);
+ static bool VersionIsValid(const v8::StartupData* data);
// To be implemented by the snapshot source.
static const v8::StartupData* DefaultSnapshotBlob();
diff --git a/chromium/v8/src/snapshot/startup-deserializer.cc b/chromium/v8/src/snapshot/startup-deserializer.cc
index 095009b4e84..6d19c3a399a 100644
--- a/chromium/v8/src/snapshot/startup-deserializer.cc
+++ b/chromium/v8/src/snapshot/startup-deserializer.cc
@@ -74,7 +74,7 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
}
void StartupDeserializer::LogNewMapEvents() {
- if (FLAG_trace_maps) LOG(isolate_, LogAllMaps());
+ if (FLAG_trace_maps) LOG(isolate(), LogAllMaps());
}
void StartupDeserializer::FlushICache() {
diff --git a/chromium/v8/src/strings/uri.cc b/chromium/v8/src/strings/uri.cc
index 466c3616a1a..905c86d3c25 100644
--- a/chromium/v8/src/strings/uri.cc
+++ b/chromium/v8/src/strings/uri.cc
@@ -10,6 +10,7 @@
#include "src/strings/char-predicates-inl.h"
#include "src/strings/string-search.h"
#include "src/strings/unicode-inl.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -174,6 +175,8 @@ bool IntoOneAndTwoByte(Handle<String> uri, bool is_uri,
MaybeHandle<String> Uri::Decode(Isolate* isolate, Handle<String> uri,
bool is_uri) {
+ TRACE_EVENT0("v8", "V8.DecodeUri");
+
uri = String::Flatten(isolate, uri);
std::vector<uint8_t> one_byte_buffer;
std::vector<uc16> two_byte_buffer;
@@ -278,6 +281,8 @@ void EncodePair(uc16 cc1, uc16 cc2, std::vector<uint8_t>* buffer) {
MaybeHandle<String> Uri::Encode(Isolate* isolate, Handle<String> uri,
bool is_uri) {
+ TRACE_EVENT0("v8", "V8.EncodeUri");
+
uri = String::Flatten(isolate, uri);
int uri_length = uri->length();
std::vector<uint8_t> buffer;
diff --git a/chromium/v8/src/torque/cfg.h b/chromium/v8/src/torque/cfg.h
index e45cb75073e..4cacb58edb3 100644
--- a/chromium/v8/src/torque/cfg.h
+++ b/chromium/v8/src/torque/cfg.h
@@ -63,7 +63,7 @@ class Block {
DCHECK_EQ(input_definitions_->Size(), input_definitions.Size());
bool changed = false;
- for (BottomOffset i = 0; i < input_definitions.AboveTop(); ++i) {
+ for (BottomOffset i = {0}; i < input_definitions.AboveTop(); ++i) {
auto& current = input_definitions_->Peek(i);
auto& input = input_definitions.Peek(i);
if (current == input) continue;
diff --git a/chromium/v8/src/torque/constants.h b/chromium/v8/src/torque/constants.h
index 616e7a23acd..8b9300d6cf2 100644
--- a/chromium/v8/src/torque/constants.h
+++ b/chromium/v8/src/torque/constants.h
@@ -95,6 +95,7 @@ static const char* const ANNOTATION_IFNOT = "@ifnot";
static const char* const ANNOTATION_GENERATE_BODY_DESCRIPTOR =
"@generateBodyDescriptor";
static const char* const ANNOTATION_EXPORT_CPP_CLASS = "@export";
+static const char* const ANNOTATION_DO_NOT_GENERATE_CAST = "@doNotGenerateCast";
inline bool IsConstexprName(const std::string& name) {
return name.substr(0, std::strlen(CONSTEXPR_TYPE_PREFIX)) ==
@@ -133,6 +134,7 @@ enum class ClassFlag {
kUndefinedLayout = 1 << 11,
kGenerateBodyDescriptor = 1 << 12,
kExport = 1 << 13,
+ kDoNotGenerateCast = 1 << 14
};
using ClassFlags = base::Flags<ClassFlag>;
diff --git a/chromium/v8/src/torque/csa-generator.cc b/chromium/v8/src/torque/csa-generator.cc
index 45ed7f3af42..a25c5ce25fe 100644
--- a/chromium/v8/src/torque/csa-generator.cc
+++ b/chromium/v8/src/torque/csa-generator.cc
@@ -16,7 +16,7 @@ namespace torque {
base::Optional<Stack<std::string>> CSAGenerator::EmitGraph(
Stack<std::string> parameters) {
- for (BottomOffset i = 0; i < parameters.AboveTop(); ++i) {
+ for (BottomOffset i = {0}; i < parameters.AboveTop(); ++i) {
SetDefinitionVariable(DefinitionLocation::Parameter(i.offset),
parameters.Peek(i));
}
@@ -27,7 +27,7 @@ base::Optional<Stack<std::string>> CSAGenerator::EmitGraph(
out() << " compiler::CodeAssemblerParameterizedLabel<";
bool first = true;
DCHECK_EQ(block->InputTypes().Size(), block->InputDefinitions().Size());
- for (BottomOffset i = 0; i < block->InputTypes().AboveTop(); ++i) {
+ for (BottomOffset i = {0}; i < block->InputTypes().AboveTop(); ++i) {
if (block->InputDefinitions().Peek(i).IsPhiFromBlock(block)) {
if (!first) out() << ", ";
out() << block->InputTypes().Peek(i)->GetGeneratedTNodeTypeName();
@@ -70,7 +70,7 @@ Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
Stack<std::string> stack;
std::stringstream phi_names;
- for (BottomOffset i = 0; i < block->InputTypes().AboveTop(); ++i) {
+ for (BottomOffset i = {0}; i < block->InputTypes().AboveTop(); ++i) {
const auto& def = block->InputDefinitions().Peek(i);
stack.Push(DefinitionToVariable(def));
if (def.IsPhiFromBlock(block)) {
@@ -274,6 +274,31 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
<< return_type->GetGeneratedTNodeTypeName() << ">";
}
}
+ } else if (instruction.intrinsic->ExternalName() == "%GetClassMapConstant") {
+ if (parameter_types.size() != 0) {
+ ReportError("%GetClassMapConstant must not take parameters");
+ }
+ if (instruction.specialization_types.size() != 1) {
+ ReportError(
+ "%GetClassMapConstant must take a single class as specialization "
+ "parameter");
+ }
+ const ClassType* class_type =
+ ClassType::DynamicCast(instruction.specialization_types[0]);
+ if (!class_type) {
+ ReportError("%GetClassMapConstant must take a class type parameter");
+ }
+ // If the class isn't actually used as the parameter to a TNode,
+ // then we can't rely on the class existing in C++ or being of the same
+ // type (e.g. it could be a template), so don't use the template CSA
+ // machinery for accessing the class' map.
+ std::string class_name =
+ class_type->name() != class_type->GetGeneratedTNodeTypeName()
+ ? std::string("void")
+ : class_type->name();
+
+ out() << std::string("CodeStubAssembler(state_).GetClassMapConstant<") +
+ class_name + ">";
} else if (instruction.intrinsic->ExternalName() == "%FromConstexpr") {
if (parameter_types.size() != 1 || !parameter_types[0]->IsConstexpr()) {
ReportError(
@@ -468,7 +493,7 @@ void CSAGenerator::EmitInstruction(
const auto& input_definitions =
(*instruction.return_continuation)->InputDefinitions();
- for (BottomOffset i = 0; i < input_definitions.AboveTop(); ++i) {
+ for (BottomOffset i = {0}; i < input_definitions.AboveTop(); ++i) {
if (input_definitions.Peek(i).IsPhiFromBlock(
*instruction.return_continuation)) {
out() << ", "
@@ -487,7 +512,7 @@ void CSAGenerator::EmitInstruction(
const auto& label_definitions =
instruction.label_blocks[l]->InputDefinitions();
- BottomOffset i = 0;
+ BottomOffset i = {0};
for (; i < stack->AboveTop(); ++i) {
if (label_definitions.Peek(i).IsPhiFromBlock(
instruction.label_blocks[l])) {
@@ -630,7 +655,7 @@ void CSAGenerator::PostCallableExceptionPreparation(
DCHECK_EQ(stack->Size() + 1, (*catch_block)->InputDefinitions().Size());
const auto& input_definitions = (*catch_block)->InputDefinitions();
- for (BottomOffset i = 0; i < input_definitions.AboveTop(); ++i) {
+ for (BottomOffset i = {0}; i < input_definitions.AboveTop(); ++i) {
if (input_definitions.Peek(i).IsPhiFromBlock(*catch_block)) {
if (i < stack->AboveTop()) {
out() << ", " << stack->Peek(i);
@@ -713,7 +738,7 @@ void CSAGenerator::EmitInstruction(const BranchInstruction& instruction,
const auto& true_definitions = instruction.if_true->InputDefinitions();
DCHECK_EQ(stack->Size(), true_definitions.Size());
bool first = true;
- for (BottomOffset i = 0; i < stack->AboveTop(); ++i) {
+ for (BottomOffset i = {0}; i < stack->AboveTop(); ++i) {
if (true_definitions.Peek(i).IsPhiFromBlock(instruction.if_true)) {
if (!first) out() << ", ";
out() << stack->Peek(i);
@@ -726,7 +751,7 @@ void CSAGenerator::EmitInstruction(const BranchInstruction& instruction,
const auto& false_definitions = instruction.if_false->InputDefinitions();
DCHECK_EQ(stack->Size(), false_definitions.Size());
first = true;
- for (BottomOffset i = 0; i < stack->AboveTop(); ++i) {
+ for (BottomOffset i = {0}; i < stack->AboveTop(); ++i) {
if (false_definitions.Peek(i).IsPhiFromBlock(instruction.if_false)) {
if (!first) out() << ", ";
out() << stack->Peek(i);
@@ -744,7 +769,7 @@ void CSAGenerator::EmitInstruction(
const auto& true_definitions = instruction.if_true->InputDefinitions();
DCHECK_EQ(stack->Size(), true_definitions.Size());
- for (BottomOffset i = 0; i < stack->AboveTop(); ++i) {
+ for (BottomOffset i = {0}; i < stack->AboveTop(); ++i) {
if (true_definitions.Peek(i).IsPhiFromBlock(instruction.if_true)) {
out() << ", " << stack->Peek(i);
}
@@ -756,7 +781,7 @@ void CSAGenerator::EmitInstruction(
const auto& false_definitions = instruction.if_false->InputDefinitions();
DCHECK_EQ(stack->Size(), false_definitions.Size());
- for (BottomOffset i = 0; i < stack->AboveTop(); ++i) {
+ for (BottomOffset i = {0}; i < stack->AboveTop(); ++i) {
if (false_definitions.Peek(i).IsPhiFromBlock(instruction.if_false)) {
out() << ", " << stack->Peek(i);
}
@@ -772,7 +797,7 @@ void CSAGenerator::EmitInstruction(const GotoInstruction& instruction,
const auto& destination_definitions =
instruction.destination->InputDefinitions();
DCHECK_EQ(stack->Size(), destination_definitions.Size());
- for (BottomOffset i = 0; i < stack->AboveTop(); ++i) {
+ for (BottomOffset i = {0}; i < stack->AboveTop(); ++i) {
if (destination_definitions.Peek(i).IsPhiFromBlock(
instruction.destination)) {
out() << ", " << stack->Peek(i);
diff --git a/chromium/v8/src/torque/declarable.cc b/chromium/v8/src/torque/declarable.cc
index 7c370c97b3c..fb5ed15f850 100644
--- a/chromium/v8/src/torque/declarable.cc
+++ b/chromium/v8/src/torque/declarable.cc
@@ -77,9 +77,26 @@ SpecializationRequester::SpecializationRequester(SourcePosition position,
this->scope = scope;
}
+std::vector<Declarable*> Scope::Lookup(const QualifiedName& name) {
+ if (name.namespace_qualification.size() >= 1 &&
+ name.namespace_qualification[0] == "") {
+ return GlobalContext::GetDefaultNamespace()->Lookup(
+ name.DropFirstNamespaceQualification());
+ }
+ std::vector<Declarable*> result;
+ if (ParentScope()) {
+ result = ParentScope()->Lookup(name);
+ }
+ for (Declarable* declarable : LookupShallow(name)) {
+ result.push_back(declarable);
+ }
+ return result;
+}
+
base::Optional<std::string> TypeConstraint::IsViolated(const Type* type) const {
if (upper_bound && !type->IsSubtypeOf(*upper_bound)) {
- return {ToString("expected ", *type, " to be a subtype of ", *upper_bound)};
+ return {
+ ToString("expected ", *type, " to be a subtype of ", **upper_bound)};
}
return base::nullopt;
}
diff --git a/chromium/v8/src/torque/declarable.h b/chromium/v8/src/torque/declarable.h
index b6fdef67b90..6fbf969e86f 100644
--- a/chromium/v8/src/torque/declarable.h
+++ b/chromium/v8/src/torque/declarable.h
@@ -36,6 +36,17 @@ struct QualifiedName {
explicit QualifiedName(std::string name)
: QualifiedName({}, std::move(name)) {}
+ bool HasNamespaceQualification() const {
+ return !namespace_qualification.empty();
+ }
+
+ QualifiedName DropFirstNamespaceQualification() const {
+ return QualifiedName{
+ std::vector<std::string>(namespace_qualification.begin() + 1,
+ namespace_qualification.end()),
+ name};
+ }
+
friend std::ostream& operator<<(std::ostream& os, const QualifiedName& name);
};
@@ -163,7 +174,7 @@ class Scope : public Declarable {
explicit Scope(Declarable::Kind kind) : Declarable(kind) {}
std::vector<Declarable*> LookupShallow(const QualifiedName& name) {
- if (name.namespace_qualification.empty()) return declarations_[name.name];
+ if (!name.HasNamespaceQualification()) return declarations_[name.name];
Scope* child = nullptr;
for (Declarable* declarable :
declarations_[name.namespace_qualification.front()]) {
@@ -176,22 +187,10 @@ class Scope : public Declarable {
}
}
if (child == nullptr) return {};
- return child->LookupShallow(
- QualifiedName({name.namespace_qualification.begin() + 1,
- name.namespace_qualification.end()},
- name.name));
+ return child->LookupShallow(name.DropFirstNamespaceQualification());
}
- std::vector<Declarable*> Lookup(const QualifiedName& name) {
- std::vector<Declarable*> result;
- if (ParentScope()) {
- result = ParentScope()->Lookup(name);
- }
- for (Declarable* declarable : LookupShallow(name)) {
- result.push_back(declarable);
- }
- return result;
- }
+ std::vector<Declarable*> Lookup(const QualifiedName& name);
template <class T>
T* AddDeclarable(const std::string& name, T* declarable) {
declarations_[name].push_back(declarable);
diff --git a/chromium/v8/src/torque/implementation-visitor.cc b/chromium/v8/src/torque/implementation-visitor.cc
index bee31b4d32a..b0f7013d350 100644
--- a/chromium/v8/src/torque/implementation-visitor.cc
+++ b/chromium/v8/src/torque/implementation-visitor.cc
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/torque/implementation-visitor.h"
+
#include <algorithm>
+#include <iomanip>
#include <string>
#include "src/base/optional.h"
@@ -10,7 +13,6 @@
#include "src/torque/csa-generator.h"
#include "src/torque/declaration-visitor.h"
#include "src/torque/global-context.h"
-#include "src/torque/implementation-visitor.h"
#include "src/torque/parameter-difference.h"
#include "src/torque/server-data.h"
#include "src/torque/type-inference.h"
@@ -729,8 +731,17 @@ VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
VisitResult true_result;
{
StackScope true_block_scope(this);
+ VisitResult right_result = Visit(expr->right);
+ if (TryGetSourceForBitfieldExpression(expr->left) != nullptr &&
+ TryGetSourceForBitfieldExpression(expr->right) != nullptr &&
+ TryGetSourceForBitfieldExpression(expr->left)->value ==
+ TryGetSourceForBitfieldExpression(expr->right)->value) {
+ Lint(
+ "Please use & rather than && when checking multiple bitfield "
+ "values, to avoid complexity in generated code.");
+ }
true_result = true_block_scope.Yield(
- GenerateImplicitConvert(TypeOracle::GetBoolType(), Visit(expr->right)));
+ GenerateImplicitConvert(TypeOracle::GetBoolType(), right_result));
}
assembler().Goto(done_block);
@@ -775,16 +786,22 @@ VisitResult ImplementationVisitor::Visit(AssignmentExpression* expr) {
}
VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
- int32_t i = static_cast<int32_t>(expr->number);
const Type* result_type = TypeOracle::GetConstFloat64Type();
- if (i == expr->number) {
- if ((i >> 30) == (i >> 31)) {
- result_type = TypeOracle::GetConstInt31Type();
- } else {
- result_type = TypeOracle::GetConstInt32Type();
+ if (expr->number >= std::numeric_limits<int32_t>::min() &&
+ expr->number <= std::numeric_limits<int32_t>::max()) {
+ int32_t i = static_cast<int32_t>(expr->number);
+ if (i == expr->number) {
+ if ((i >> 30) == (i >> 31)) {
+ result_type = TypeOracle::GetConstInt31Type();
+ } else {
+ result_type = TypeOracle::GetConstInt32Type();
+ }
}
}
- return VisitResult{result_type, ToString(expr->number)};
+ std::stringstream str;
+ str << std::setprecision(std::numeric_limits<double>::digits10 + 1)
+ << expr->number;
+ return VisitResult{result_type, str.str()};
}
VisitResult ImplementationVisitor::Visit(AssumeTypeImpossibleExpression* expr) {
@@ -825,6 +842,17 @@ VisitResult ImplementationVisitor::Visit(LocationExpression* expr) {
return scope.Yield(GenerateFetchFromLocation(GetLocationReference(expr)));
}
+VisitResult ImplementationVisitor::Visit(FieldAccessExpression* expr) {
+ StackScope scope(this);
+ LocationReference location = GetLocationReference(expr);
+ if (location.IsBitFieldAccess()) {
+ if (auto* identifier = IdentifierExpression::DynamicCast(expr->object)) {
+ bitfield_expressions_[expr] = identifier->name;
+ }
+ }
+ return scope.Yield(GenerateFetchFromLocation(location));
+}
+
const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
Binding<LocalLabel>* label = LookupLabel(stmt->label->value);
size_t parameter_count = label->parameter_types.size();
@@ -2005,25 +2033,29 @@ LocationReference ImplementationVisitor::GenerateFieldAccess(
ProjectStructField(reference.temporary(), fieldname),
reference.temporary_description());
}
- if (reference.ReferencedType()->IsBitFieldStructType()) {
- const BitFieldStructType* bitfield_struct =
- BitFieldStructType::cast(reference.ReferencedType());
- const BitField& field = bitfield_struct->LookupField(fieldname);
- return LocationReference::BitFieldAccess(reference, field);
- }
- if (const auto type_wrapped_in_smi = Type::MatchUnaryGeneric(
- reference.ReferencedType(), TypeOracle::GetSmiTaggedGeneric())) {
- const BitFieldStructType* bitfield_struct =
- BitFieldStructType::DynamicCast(*type_wrapped_in_smi);
- if (bitfield_struct == nullptr) {
- ReportError(
- "When a value of type SmiTagged<T> is used in a field access "
- "expression, T is expected to be a bitfield struct type. Instead, T "
- "is ",
- **type_wrapped_in_smi);
+ if (base::Optional<const Type*> referenced_type =
+ reference.ReferencedType()) {
+ if ((*referenced_type)->IsBitFieldStructType()) {
+ const BitFieldStructType* bitfield_struct =
+ BitFieldStructType::cast(*referenced_type);
+ const BitField& field = bitfield_struct->LookupField(fieldname);
+ return LocationReference::BitFieldAccess(reference, field);
+ }
+ if (const auto type_wrapped_in_smi = Type::MatchUnaryGeneric(
+ (*referenced_type), TypeOracle::GetSmiTaggedGeneric())) {
+ const BitFieldStructType* bitfield_struct =
+ BitFieldStructType::DynamicCast(*type_wrapped_in_smi);
+ if (bitfield_struct == nullptr) {
+ ReportError(
+ "When a value of type SmiTagged<T> is used in a field access "
+ "expression, T is expected to be a bitfield struct type. Instead, "
+ "T "
+ "is ",
+ **type_wrapped_in_smi);
+ }
+ const BitField& field = bitfield_struct->LookupField(fieldname);
+ return LocationReference::BitFieldAccess(reference, field);
}
- const BitField& field = bitfield_struct->LookupField(fieldname);
- return LocationReference::BitFieldAccess(reference, field);
}
if (reference.IsHeapReference()) {
VisitResult ref = reference.heap_reference();
@@ -2188,7 +2220,7 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
} else if (reference.IsVariableAccess()) {
return GenerateCopy(reference.variable());
} else if (reference.IsHeapReference()) {
- const Type* referenced_type = reference.ReferencedType();
+ const Type* referenced_type = *reference.ReferencedType();
if (referenced_type == TypeOracle::GetFloat64OrHoleType()) {
return GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
"LoadFloat64OrHole"),
@@ -2205,9 +2237,9 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
return VisitResult(referenced_type, result_range);
} else {
GenerateCopy(reference.heap_reference());
- assembler().Emit(LoadReferenceInstruction{reference.ReferencedType()});
- DCHECK_EQ(1, LoweredSlotCount(reference.ReferencedType()));
- return VisitResult(reference.ReferencedType(), assembler().TopRange(1));
+ assembler().Emit(LoadReferenceInstruction{referenced_type});
+ DCHECK_EQ(1, LoweredSlotCount(referenced_type));
+ return VisitResult(referenced_type, assembler().TopRange(1));
}
} else if (reference.IsBitFieldAccess()) {
// First fetch the bitfield struct, then get the bits out of it.
@@ -2215,7 +2247,7 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
GenerateFetchFromLocation(reference.bit_field_struct_location());
assembler().Emit(LoadBitFieldInstruction{bit_field_struct.type(),
reference.bit_field()});
- return VisitResult(reference.ReferencedType(), assembler().TopRange(1));
+ return VisitResult(*reference.ReferencedType(), assembler().TopRange(1));
} else {
if (reference.IsHeapSlice()) {
ReportError(
@@ -2248,7 +2280,7 @@ void ImplementationVisitor::GenerateAssignToLocation(
} else if (reference.IsHeapSlice()) {
ReportError("assigning a value directly to an indexed field isn't allowed");
} else if (reference.IsHeapReference()) {
- const Type* referenced_type = reference.ReferencedType();
+ const Type* referenced_type = *reference.ReferencedType();
if (reference.IsConst()) {
Error("cannot assign to const value of type ", *referenced_type).Throw();
}
@@ -2290,7 +2322,7 @@ void ImplementationVisitor::GenerateAssignToLocation(
VisitResult bit_field_struct =
GenerateFetchFromLocation(reference.bit_field_struct_location());
VisitResult converted_value =
- GenerateImplicitConvert(reference.ReferencedType(), assignment_value);
+ GenerateImplicitConvert(*reference.ReferencedType(), assignment_value);
VisitResult updated_bit_field_struct =
GenerateSetBitField(bit_field_struct.type(), reference.bit_field(),
bit_field_struct, converted_value);
@@ -2371,6 +2403,29 @@ void ImplementationVisitor::AddCallParameter(
}
}
+namespace {
+std::pair<std::string, std::string> GetClassInstanceTypeRange(
+ const ClassType* class_type) {
+ std::pair<std::string, std::string> result;
+ if (class_type->InstanceTypeRange()) {
+ auto instance_type_range = *class_type->InstanceTypeRange();
+ std::string instance_type_string_first =
+ "static_cast<InstanceType>(" +
+ std::to_string(instance_type_range.first) + ")";
+ std::string instance_type_string_second =
+ "static_cast<InstanceType>(" +
+ std::to_string(instance_type_range.second) + ")";
+ result =
+ std::make_pair(instance_type_string_first, instance_type_string_second);
+ } else {
+ ReportError(
+ "%Min/MaxInstanceType must take a class type that is either a string "
+ "or has a generated instance type range");
+ }
+ return result;
+}
+} // namespace
+
VisitResult ImplementationVisitor::GenerateCall(
Callable* callable, base::Optional<LocationReference> this_reference,
Arguments arguments, const TypeVector& specialization_types,
@@ -2601,6 +2656,48 @@ VisitResult ImplementationVisitor::GenerateCall(
Error("size of ", *type, " is not known.");
}
return VisitResult(return_type, size_string);
+ } else if (intrinsic->ExternalName() == "%ClassHasMapConstant") {
+ const Type* type = specialization_types[0];
+ const ClassType* class_type = ClassType::DynamicCast(type);
+ if (!class_type) {
+ ReportError("%ClassHasMapConstant must take a class type parameter");
+ }
+ // If the class isn't actually used as the parameter to a TNode,
+ // then we can't rely on the class existing in C++ or being of the same
+ // type (e.g. it could be a template), so don't use the template CSA
+ // machinery for accessing the class' map.
+ if (class_type->name() != class_type->GetGeneratedTNodeTypeName()) {
+ return VisitResult(return_type, std::string("false"));
+ } else {
+ return VisitResult(
+ return_type,
+ std::string("CodeStubAssembler(state_).ClassHasMapConstant<") +
+ class_type->name() + ">()");
+ }
+ } else if (intrinsic->ExternalName() == "%MinInstanceType") {
+ if (specialization_types.size() != 1) {
+ ReportError("%MinInstanceType must take a single type parameter");
+ }
+ const Type* type = specialization_types[0];
+ const ClassType* class_type = ClassType::DynamicCast(type);
+ if (!class_type) {
+ ReportError("%MinInstanceType must take a class type parameter");
+ }
+ std::pair<std::string, std::string> instance_types =
+ GetClassInstanceTypeRange(class_type);
+ return VisitResult(return_type, instance_types.first);
+ } else if (intrinsic->ExternalName() == "%MaxInstanceType") {
+ if (specialization_types.size() != 1) {
+ ReportError("%MaxInstanceType must take a single type parameter");
+ }
+ const Type* type = specialization_types[0];
+ const ClassType* class_type = ClassType::DynamicCast(type);
+ if (!class_type) {
+ ReportError("%MaxInstanceType must take a class type parameter");
+ }
+ std::pair<std::string, std::string> instance_types =
+ GetClassInstanceTypeRange(class_type);
+ return VisitResult(return_type, instance_types.second);
} else if (intrinsic->ExternalName() == "%RawConstexprCast") {
if (intrinsic->signature().parameter_types.types.size() != 1 ||
constexpr_arguments.size() != 1) {
@@ -2674,6 +2771,16 @@ VisitResult ImplementationVisitor::Visit(CallExpression* expr,
LanguageServerData::AddDefinition(expr->callee->name->pos,
callable->IdentifierPosition());
}
+ if (expr->callee->name->value == "!" && arguments.parameters.size() == 1) {
+ PropagateBitfieldMark(expr->arguments[0], expr);
+ }
+ if (expr->callee->name->value == "==" && arguments.parameters.size() == 2) {
+ if (arguments.parameters[0].type()->IsConstexpr()) {
+ PropagateBitfieldMark(expr->arguments[1], expr);
+ } else if (arguments.parameters[1].type()->IsConstexpr()) {
+ PropagateBitfieldMark(expr->arguments[0], expr);
+ }
+ }
return scope.Yield(
GenerateCall(name, arguments, specialization_types, is_tailcall));
}
@@ -2691,7 +2798,7 @@ VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
target = LocationReference::Temporary(result, "this parameter");
}
const AggregateType* target_type =
- AggregateType::DynamicCast(target.ReferencedType());
+ AggregateType::DynamicCast(*target.ReferencedType());
if (!target_type) {
ReportError("target of method call not a struct or class type");
}
@@ -3333,6 +3440,9 @@ void ImplementationVisitor::GenerateBitFields(
<< " = 1 << " << field.offset << ", \\\n";
}
header << " }; \\\n";
+ header << " using Flags = base::Flags<Flag>; \\\n";
+ header << " static constexpr int kFlagCount = "
+ << type->fields().size() << "; \\\n";
}
header << "\n";
@@ -4035,7 +4145,7 @@ void ImplementationVisitor::GenerateClassDefinitions(
structs_used_in_classes.insert(*field_as_struct);
}
}
- if (type->ShouldExport()) {
+ if (type->ShouldExport() && !type->IsAbstract()) {
factory_header << type->HandlifiedCppTypeName() << " New"
<< type->name() << "(";
factory_impl << type->HandlifiedCppTypeName() << " Factory::New"
diff --git a/chromium/v8/src/torque/implementation-visitor.h b/chromium/v8/src/torque/implementation-visitor.h
index c980f3d59b1..14506d0bf5c 100644
--- a/chromium/v8/src/torque/implementation-visitor.h
+++ b/chromium/v8/src/torque/implementation-visitor.h
@@ -137,7 +137,7 @@ class LocationReference {
return *bit_field_;
}
- const Type* ReferencedType() const {
+ base::Optional<const Type*> ReferencedType() const {
if (IsHeapReference()) {
return *TypeOracle::MatchReferenceGeneric(heap_reference().type());
}
@@ -148,7 +148,10 @@ class LocationReference {
if (IsBitFieldAccess()) {
return bit_field_->name_and_type.type;
}
- return GetVisitResult().type();
+ if (IsVariableAccess() || IsHeapSlice() || IsTemporary()) {
+ return GetVisitResult().type();
+ }
+ return base::nullopt;
}
const VisitResult& GetVisitResult() const {
@@ -498,6 +501,7 @@ class ImplementationVisitor {
VisitResult GetBuiltinCode(Builtin* builtin);
VisitResult Visit(LocationExpression* expr);
+ VisitResult Visit(FieldAccessExpression* expr);
void VisitAllDeclarables();
void Visit(Declarable* delarable);
@@ -783,9 +787,32 @@ class ImplementationVisitor {
ReplaceFileContentsIfDifferent(file, content);
}
+ const Identifier* TryGetSourceForBitfieldExpression(
+ const Expression* expr) const {
+ auto it = bitfield_expressions_.find(expr);
+ if (it == bitfield_expressions_.end()) return nullptr;
+ return it->second;
+ }
+
+ void PropagateBitfieldMark(const Expression* original,
+ const Expression* derived) {
+ if (const Identifier* source =
+ TryGetSourceForBitfieldExpression(original)) {
+ bitfield_expressions_[derived] = source;
+ }
+ }
+
base::Optional<CfgAssembler> assembler_;
NullOStream null_stream_;
bool is_dry_run_;
+
+ // Just for allowing us to emit warnings. After visiting an Expression, if
+ // that Expression is a bitfield load, plus an optional inversion or an
+ // equality check with a constant, then that Expression will be present in
+ // this map. The Identifier associated is the bitfield struct that contains
+ // the value to load.
+ std::unordered_map<const Expression*, const Identifier*>
+ bitfield_expressions_;
};
void ReportAllUnusedMacros();
diff --git a/chromium/v8/src/torque/instance-type-generator.cc b/chromium/v8/src/torque/instance-type-generator.cc
index 6e708f7fea6..2b53383f61a 100644
--- a/chromium/v8/src/torque/instance-type-generator.cc
+++ b/chromium/v8/src/torque/instance-type-generator.cc
@@ -436,11 +436,11 @@ void ImplementationVisitor::GenerateInstanceTypes(
header << only_declared_range_instance_types.str();
header << "\n";
- std::stringstream torque_internal_class_list;
- std::stringstream torque_internal_varsize_instance_type_list;
- std::stringstream torque_internal_fixed_instance_type_list;
- std::stringstream torque_internal_map_csa_list;
- std::stringstream torque_internal_map_root_list;
+ std::stringstream torque_defined_class_list;
+ std::stringstream torque_defined_varsize_instance_type_list;
+ std::stringstream torque_defined_fixed_instance_type_list;
+ std::stringstream torque_defined_map_csa_list;
+ std::stringstream torque_defined_map_root_list;
for (const ClassType* type : TypeOracle::GetClasses()) {
std::string upper_case_name = type->name();
@@ -449,41 +449,40 @@ void ImplementationVisitor::GenerateInstanceTypes(
CapifyStringWithUnderscores(type->name()) + "_TYPE";
if (type->IsExtern()) continue;
- torque_internal_class_list << " V(" << upper_case_name << ") \\\n";
+ torque_defined_class_list << " V(" << upper_case_name << ") \\\n";
if (type->IsAbstract()) continue;
- torque_internal_map_csa_list << " V(" << upper_case_name << "Map, "
- << lower_case_name << "_map, "
+ torque_defined_map_csa_list << " V(_, " << upper_case_name << "Map, "
+ << lower_case_name << "_map, "
+ << upper_case_name << ") \\\n";
+ torque_defined_map_root_list << " V(Map, " << lower_case_name << "_map, "
<< upper_case_name << "Map) \\\n";
- torque_internal_map_root_list << " V(Map, " << lower_case_name
- << "_map, " << upper_case_name
- << "Map) \\\n";
- std::stringstream& list =
- type->HasStaticSize() ? torque_internal_fixed_instance_type_list
- : torque_internal_varsize_instance_type_list;
+ std::stringstream& list = type->HasStaticSize()
+ ? torque_defined_fixed_instance_type_list
+ : torque_defined_varsize_instance_type_list;
list << " V(" << instance_type_name << ", " << upper_case_name << ", "
<< lower_case_name << ") \\\n";
}
- header << "// Non-extern Torque classes.\n";
- header << "#define TORQUE_INTERNAL_CLASS_LIST(V) \\\n";
- header << torque_internal_class_list.str();
+ header << "// Fully Torque-defined classes (both internal and exported).\n";
+ header << "#define TORQUE_DEFINED_CLASS_LIST(V) \\\n";
+ header << torque_defined_class_list.str();
header << "\n";
- header << "#define TORQUE_INTERNAL_VARSIZE_INSTANCE_TYPE_LIST(V) \\\n";
- header << torque_internal_varsize_instance_type_list.str();
+ header << "#define TORQUE_DEFINED_VARSIZE_INSTANCE_TYPE_LIST(V) \\\n";
+ header << torque_defined_varsize_instance_type_list.str();
header << "\n";
- header << "#define TORQUE_INTERNAL_FIXED_INSTANCE_TYPE_LIST(V) \\\n";
- header << torque_internal_fixed_instance_type_list.str();
+ header << "#define TORQUE_DEFINED_FIXED_INSTANCE_TYPE_LIST(V) \\\n";
+ header << torque_defined_fixed_instance_type_list.str();
header << "\n";
- header << "#define TORQUE_INTERNAL_INSTANCE_TYPE_LIST(V) \\\n";
- header << " TORQUE_INTERNAL_VARSIZE_INSTANCE_TYPE_LIST(V) \\\n";
- header << " TORQUE_INTERNAL_FIXED_INSTANCE_TYPE_LIST(V) \\\n";
+ header << "#define TORQUE_DEFINED_INSTANCE_TYPE_LIST(V) \\\n";
+ header << " TORQUE_DEFINED_VARSIZE_INSTANCE_TYPE_LIST(V) \\\n";
+ header << " TORQUE_DEFINED_FIXED_INSTANCE_TYPE_LIST(V) \\\n";
header << "\n";
- header << "#define TORQUE_INTERNAL_MAP_CSA_LIST(V) \\\n";
- header << torque_internal_map_csa_list.str();
+ header << "#define TORQUE_DEFINED_MAP_CSA_LIST_GENERATOR(V, _) \\\n";
+ header << torque_defined_map_csa_list.str();
header << "\n";
- header << "#define TORQUE_INTERNAL_MAP_ROOT_LIST(V) \\\n";
- header << torque_internal_map_root_list.str();
+ header << "#define TORQUE_DEFINED_MAP_ROOT_LIST(V) \\\n";
+ header << torque_defined_map_root_list.str();
header << "\n";
}
std::string output_header_path = output_directory + "/" + file_name;
diff --git a/chromium/v8/src/torque/instructions.h b/chromium/v8/src/torque/instructions.h
index 4609e8c223e..528d5c742ee 100644
--- a/chromium/v8/src/torque/instructions.h
+++ b/chromium/v8/src/torque/instructions.h
@@ -166,11 +166,11 @@ inline std::ostream& operator<<(std::ostream& stream,
<< loc.GetParameterIndex() << ")";
case DefinitionLocation::Kind::kPhi:
return stream << "DefinitionLocation::Phi(" << std::hex
- << (uint64_t)loc.GetPhiBlock() << std::dec << ", "
+ << loc.GetPhiBlock() << std::dec << ", "
<< loc.GetPhiIndex() << ")";
case DefinitionLocation::Kind::kInstruction:
return stream << "DefinitionLocation::Instruction(" << std::hex
- << (uint64_t)loc.GetInstruction() << std::dec << ", "
+ << loc.GetInstruction() << std::dec << ", "
<< loc.GetInstructionIndex() << ")";
}
}
diff --git a/chromium/v8/src/torque/ls/message.h b/chromium/v8/src/torque/ls/message.h
index 0d84d2ffafd..513a1e0d43a 100644
--- a/chromium/v8/src/torque/ls/message.h
+++ b/chromium/v8/src/torque/ls/message.h
@@ -82,8 +82,8 @@ class Message : public BaseJsonAccessor {
JSON_STRING_ACCESSORS(jsonrpc)
protected:
- const JsonObject& object() const { return value_.ToObject(); }
- JsonObject& object() { return value_.ToObject(); }
+ const JsonObject& object() const override { return value_.ToObject(); }
+ JsonObject& object() override { return value_.ToObject(); }
private:
JsonValue value_;
@@ -96,8 +96,8 @@ class NestedJsonAccessor : public BaseJsonAccessor {
public:
explicit NestedJsonAccessor(JsonObject& object) : object_(object) {}
- const JsonObject& object() const { return object_; }
- JsonObject& object() { return object_; }
+ const JsonObject& object() const override { return object_; }
+ JsonObject& object() override { return object_; }
private:
JsonObject& object_;
diff --git a/chromium/v8/src/torque/torque-compiler.cc b/chromium/v8/src/torque/torque-compiler.cc
index fd717b2649b..20bc2973547 100644
--- a/chromium/v8/src/torque/torque-compiler.cc
+++ b/chromium/v8/src/torque/torque-compiler.cc
@@ -55,6 +55,7 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
}
TargetArchitecture::Scope target_architecture(options.force_32bit_output);
TypeOracle::Scope type_oracle;
+ CurrentScope::Scope current_namespace(GlobalContext::GetDefaultNamespace());
// Two-step process of predeclaration + resolution allows to resolve type
// declarations independent of the order they are given.
diff --git a/chromium/v8/src/torque/torque-parser.cc b/chromium/v8/src/torque/torque-parser.cc
index fa496ae6a09..a79f271eec8 100644
--- a/chromium/v8/src/torque/torque-parser.cc
+++ b/chromium/v8/src/torque/torque-parser.cc
@@ -855,7 +855,7 @@ base::Optional<ParseResult> MakeClassDeclaration(
{ANNOTATION_GENERATE_PRINT, ANNOTATION_NO_VERIFIER, ANNOTATION_ABSTRACT,
ANNOTATION_HAS_SAME_INSTANCE_TYPE_AS_PARENT,
ANNOTATION_GENERATE_CPP_CLASS, ANNOTATION_GENERATE_BODY_DESCRIPTOR,
- ANNOTATION_EXPORT_CPP_CLASS,
+ ANNOTATION_EXPORT_CPP_CLASS, ANNOTATION_DO_NOT_GENERATE_CAST,
ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT,
ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT},
{ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE,
@@ -874,6 +874,9 @@ base::Optional<ParseResult> MakeClassDeclaration(
if (annotations.Contains(ANNOTATION_GENERATE_CPP_CLASS)) {
flags |= ClassFlag::kGenerateCppClassDefinitions;
}
+ if (annotations.Contains(ANNOTATION_DO_NOT_GENERATE_CAST)) {
+ flags |= ClassFlag::kDoNotGenerateCast;
+ }
if (annotations.Contains(ANNOTATION_GENERATE_BODY_DESCRIPTOR)) {
flags |= ClassFlag::kGenerateBodyDescriptor;
}
@@ -896,6 +899,7 @@ base::Optional<ParseResult> MakeClassDeclaration(
flags |= ClassFlag::kIsShape;
flags |= ClassFlag::kTransient;
flags |= ClassFlag::kHasSameInstanceTypeAsParent;
+ flags |= ClassFlag::kDoNotGenerateCast;
} else {
DCHECK_EQ(kind, "class");
}
@@ -934,9 +938,60 @@ base::Optional<ParseResult> MakeClassDeclaration(
return true;
});
- Declaration* result = MakeNode<ClassDeclaration>(
+ std::vector<Declaration*> result;
+
+ result.push_back(MakeNode<ClassDeclaration>(
name, flags, std::move(extends), std::move(generates), std::move(methods),
- fields, MakeInstanceTypeConstraints(annotations));
+ fields, MakeInstanceTypeConstraints(annotations)));
+
+ if ((flags & ClassFlag::kDoNotGenerateCast) == 0 &&
+ (flags & ClassFlag::kIsShape) == 0) {
+ ParameterList parameters;
+ parameters.names.push_back(MakeNode<Identifier>("obj"));
+ parameters.types.push_back(
+ MakeNode<BasicTypeExpression>(std::vector<std::string>{}, "HeapObject",
+ std::vector<TypeExpression*>{}));
+ LabelAndTypesVector labels;
+ labels.push_back(LabelAndTypes{MakeNode<Identifier>("CastError"),
+ std::vector<TypeExpression*>{}});
+
+ TypeExpression* class_type =
+ MakeNode<BasicTypeExpression>(std::vector<std::string>{}, name->value,
+ std::vector<TypeExpression*>{});
+
+ std::vector<std::string> namespace_qualification{
+ TORQUE_INTERNAL_NAMESPACE_STRING};
+
+ IdentifierExpression* internal_downcast_target =
+ MakeNode<IdentifierExpression>(
+ namespace_qualification,
+ MakeNode<Identifier>("DownCastForTorqueClass"),
+ std::vector<TypeExpression*>{class_type});
+ IdentifierExpression* internal_downcast_otherwise =
+ MakeNode<IdentifierExpression>(std::vector<std::string>{},
+ MakeNode<Identifier>("CastError"));
+
+ Expression* argument = MakeNode<IdentifierExpression>(
+ std::vector<std::string>{}, MakeNode<Identifier>("obj"));
+
+ auto value = MakeCall(internal_downcast_target, base::nullopt,
+ std::vector<Expression*>{argument},
+ std::vector<Statement*>{MakeNode<ExpressionStatement>(
+ internal_downcast_otherwise)});
+
+ auto cast_body = MakeNode<ReturnStatement>(value);
+
+ std::vector<TypeExpression*> generic_parameters;
+ generic_parameters.push_back(
+ MakeNode<BasicTypeExpression>(std::vector<std::string>{}, name->value,
+ std::vector<TypeExpression*>{}));
+
+ Declaration* specialization = MakeNode<SpecializationDeclaration>(
+ false, MakeNode<Identifier>("Cast"), generic_parameters,
+ std::move(parameters), class_type, std::move(labels), cast_body);
+ result.push_back(specialization);
+ }
+
return ParseResult{result};
}
@@ -1602,6 +1657,17 @@ base::Optional<ParseResult> MakeRightShiftIdentifier(
return ParseResult{MakeNode<Identifier>(str)};
}
+base::Optional<ParseResult> MakeNamespaceQualification(
+ ParseResultIterator* child_results) {
+ bool global_namespace = child_results->NextAs<bool>();
+ auto namespace_qualification =
+ child_results->NextAs<std::vector<std::string>>();
+ if (global_namespace) {
+ namespace_qualification.insert(namespace_qualification.begin(), "");
+ }
+ return ParseResult(std::move(namespace_qualification));
+}
+
base::Optional<ParseResult> MakeIdentifierExpression(
ParseResultIterator* child_results) {
auto namespace_qualification =
@@ -1982,14 +2048,19 @@ struct TorqueGrammar : Grammar {
// Result: std::vector<Annotation>
Symbol* annotations = List<Annotation>(&annotation);
+ // Result: std::vector<std::string>
+ Symbol namespaceQualification = {
+ Rule({CheckIf(Token("::")),
+ List<std::string>(Sequence({&identifier, Token("::")}))},
+ MakeNamespaceQualification)};
+
// Result: TypeList
Symbol* typeList = List<TypeExpression*>(&type, Token(","));
// Result: TypeExpression*
Symbol simpleType = {
Rule({Token("("), &type, Token(")")}),
- Rule({List<std::string>(Sequence({&identifier, Token("::")})),
- CheckIf(Token("constexpr")), &identifier,
+ Rule({&namespaceQualification, CheckIf(Token("constexpr")), &identifier,
TryOrDefault<std::vector<TypeExpression*>>(
&genericSpecializationTypeList)},
MakeBasicTypeExpression),
@@ -2123,7 +2194,7 @@ struct TorqueGrammar : Grammar {
// Result: Expression*
Symbol identifierExpression = {
- Rule({List<std::string>(Sequence({&identifier, Token("::")})), &name,
+ Rule({&namespaceQualification, &name,
TryOrDefault<TypeList>(&genericSpecializationTypeList)},
MakeIdentifierExpression),
};
@@ -2369,7 +2440,7 @@ struct TorqueGrammar : Grammar {
Optional<std::string>(
Sequence({Token("generates"), &externalString})),
&optionalClassBody},
- AsSingletonVector<Declaration*, MakeClassDeclaration>()),
+ MakeClassDeclaration),
Rule({annotations, Token("struct"), &name,
TryOrDefault<GenericParameters>(&genericParameters), Token("{"),
List<Declaration*>(&method),
diff --git a/chromium/v8/src/torque/type-visitor.cc b/chromium/v8/src/torque/type-visitor.cc
index 5b61baf3a83..453d71e4639 100644
--- a/chromium/v8/src/torque/type-visitor.cc
+++ b/chromium/v8/src/torque/type-visitor.cc
@@ -311,6 +311,16 @@ const ClassType* TypeVisitor::ComputeType(
flags = flags | ClassFlag::kGeneratePrint | ClassFlag::kGenerateVerify |
ClassFlag::kGenerateBodyDescriptor;
}
+ if (!(flags & ClassFlag::kExtern) &&
+ (flags & ClassFlag::kHasSameInstanceTypeAsParent)) {
+ Error("non-extern Torque-defined classes must have unique instance types");
+ }
+ if ((flags & ClassFlag::kHasSameInstanceTypeAsParent) &&
+ !(flags & ClassFlag::kDoNotGenerateCast || flags & ClassFlag::kIsShape)) {
+ Error(
+ "classes that inherit their instance type must be annotated with "
+ "@doNotGenerateCast");
+ }
return TypeOracle::GetClassType(super_type, decl->name->value, flags,
generates, decl, alias);
diff --git a/chromium/v8/src/torque/types.h b/chromium/v8/src/torque/types.h
index c01d55ccff0..7d84ae6a045 100644
--- a/chromium/v8/src/torque/types.h
+++ b/chromium/v8/src/torque/types.h
@@ -104,6 +104,7 @@ struct RuntimeType {
class V8_EXPORT_PRIVATE Type : public TypeBase {
public:
+ Type& operator=(const Type& other) = delete;
virtual bool IsSubtypeOf(const Type* supertype) const;
// Default rendering for error messages etc.
@@ -164,7 +165,6 @@ class V8_EXPORT_PRIVATE Type : public TypeBase {
Type(TypeBase::Kind kind, const Type* parent,
MaybeSpecializationKey specialized_from = base::nullopt);
Type(const Type& other) V8_NOEXCEPT;
- Type& operator=(const Type& other) = delete;
void set_parent(const Type* t) { parent_ = t; }
int Depth() const;
virtual std::string ToExplicitString() const = 0;
@@ -660,6 +660,9 @@ class ClassType final : public AggregateType {
if (IsAbstract()) return false;
return flags_ & ClassFlag::kGenerateBodyDescriptor || !IsExtern();
}
+ bool DoNotGenerateCast() const {
+ return flags_ & ClassFlag::kDoNotGenerateCast;
+ }
bool IsTransient() const override { return flags_ & ClassFlag::kTransient; }
bool IsAbstract() const { return flags_ & ClassFlag::kAbstract; }
bool HasSameInstanceTypeAsParent() const {
diff --git a/chromium/v8/src/torque/utils.h b/chromium/v8/src/torque/utils.h
index 689d242ab0d..52efe6564e7 100644
--- a/chromium/v8/src/torque/utils.h
+++ b/chromium/v8/src/torque/utils.h
@@ -47,6 +47,7 @@ std::string ToString(Args&&... args) {
class V8_EXPORT_PRIVATE MessageBuilder {
public:
+ MessageBuilder() = delete;
MessageBuilder(const std::string& message, TorqueMessage::Kind kind);
MessageBuilder& Position(SourcePosition position) {
@@ -62,7 +63,6 @@ class V8_EXPORT_PRIVATE MessageBuilder {
}
private:
- MessageBuilder() = delete;
void Report() const;
TorqueMessage message_;
@@ -172,9 +172,7 @@ void PrintCommaSeparatedList(std::ostream& os, const T& list) {
struct BottomOffset {
size_t offset;
- BottomOffset(std::nullptr_t zero = 0) // NOLINT(runtime/explicit)
- : offset(0) {}
- explicit BottomOffset(std::size_t offset) : offset(offset) {}
+
BottomOffset& operator=(std::size_t offset) {
this->offset = offset;
return *this;
@@ -370,10 +368,10 @@ class IfDefScope {
public:
IfDefScope(std::ostream& os, std::string d);
~IfDefScope();
-
- private:
IfDefScope(const IfDefScope&) = delete;
IfDefScope& operator=(const IfDefScope&) = delete;
+
+ private:
std::ostream& os_;
std::string d_;
};
@@ -383,10 +381,10 @@ class NamespaceScope {
NamespaceScope(std::ostream& os,
std::initializer_list<std::string> namespaces);
~NamespaceScope();
-
- private:
NamespaceScope(const NamespaceScope&) = delete;
NamespaceScope& operator=(const NamespaceScope&) = delete;
+
+ private:
std::ostream& os_;
std::vector<std::string> d_;
};
@@ -395,10 +393,10 @@ class IncludeGuardScope {
public:
IncludeGuardScope(std::ostream& os, std::string file_name);
~IncludeGuardScope();
-
- private:
IncludeGuardScope(const IncludeGuardScope&) = delete;
IncludeGuardScope& operator=(const IncludeGuardScope&) = delete;
+
+ private:
std::ostream& os_;
std::string d_;
};
@@ -407,10 +405,10 @@ class IncludeObjectMacrosScope {
public:
explicit IncludeObjectMacrosScope(std::ostream& os);
~IncludeObjectMacrosScope();
-
- private:
IncludeObjectMacrosScope(const IncludeObjectMacrosScope&) = delete;
IncludeObjectMacrosScope& operator=(const IncludeObjectMacrosScope&) = delete;
+
+ private:
std::ostream& os_;
};
diff --git a/chromium/v8/src/tracing/trace-categories.h b/chromium/v8/src/tracing/trace-categories.h
index 91bf3da96e7..2f9d672801f 100644
--- a/chromium/v8/src/tracing/trace-categories.h
+++ b/chromium/v8/src/tracing/trace-categories.h
@@ -32,6 +32,7 @@ PERFETTO_DEFINE_CATEGORIES(
perfetto::Category("v8.console"),
perfetto::Category("v8.execute"),
perfetto::Category("v8.runtime"),
+ perfetto::Category("v8.wasm"),
perfetto::Category::Group("devtools.timeline,v8"),
perfetto::Category::Group("devtools.timeline,"
TRACE_DISABLED_BY_DEFAULT("v8.gc")),
@@ -39,7 +40,6 @@ PERFETTO_DEFINE_CATEGORIES(
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.compile")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler")),
- perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler.hires")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.gc")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.ic_stats")),
@@ -47,10 +47,11 @@ PERFETTO_DEFINE_CATEGORIES(
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling")),
perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.turbofan")),
- perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.wasm")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed")),
+ perfetto::Category(TRACE_DISABLED_BY_DEFAULT("v8.zone_stats")),
perfetto::Category::Group("v8,devtools.timeline"),
perfetto::Category::Group(TRACE_DISABLED_BY_DEFAULT("v8.turbofan") ","
- TRACE_DISABLED_BY_DEFAULT("v8.wasm")));
+ TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed")));
// clang-format on
#endif // defined(V8_USE_PERFETTO)
diff --git a/chromium/v8/src/tracing/tracing-category-observer.cc b/chromium/v8/src/tracing/tracing-category-observer.cc
index a44074d52d1..3debacb548d 100644
--- a/chromium/v8/src/tracing/tracing-category-observer.cc
+++ b/chromium/v8/src/tracing/tracing-category-observer.cc
@@ -57,6 +57,13 @@ void TracingCategoryObserver::OnTraceEnabled() {
i::TracingFlags::ic_stats.fetch_or(ENABLED_BY_TRACING,
std::memory_order_relaxed);
}
+
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.zone_stats"),
+ &enabled);
+ if (enabled) {
+ i::TracingFlags::zone_stats.fetch_or(ENABLED_BY_TRACING,
+ std::memory_order_relaxed);
+ }
}
void TracingCategoryObserver::OnTraceDisabled() {
diff --git a/chromium/v8/src/trap-handler/handler-inside-posix.cc b/chromium/v8/src/trap-handler/handler-inside-posix.cc
index 6228e87e84f..b943d3222aa 100644
--- a/chromium/v8/src/trap-handler/handler-inside-posix.cc
+++ b/chromium/v8/src/trap-handler/handler-inside-posix.cc
@@ -60,15 +60,15 @@ class SigUnmaskStack {
pthread_sigmask(SIG_UNBLOCK, &sigs, &old_mask_);
}
- ~SigUnmaskStack() { pthread_sigmask(SIG_SETMASK, &old_mask_, nullptr); }
-
- private:
- sigset_t old_mask_;
-
// We'd normally use DISALLOW_COPY_AND_ASSIGN, but we're avoiding a dependency
// on base/macros.h
SigUnmaskStack(const SigUnmaskStack&) = delete;
void operator=(const SigUnmaskStack&) = delete;
+
+ ~SigUnmaskStack() { pthread_sigmask(SIG_SETMASK, &old_mask_, nullptr); }
+
+ private:
+ sigset_t old_mask_;
};
bool TryHandleSignal(int signum, siginfo_t* info, void* context) {
diff --git a/chromium/v8/src/utils/ostreams.cc b/chromium/v8/src/utils/ostreams.cc
index c43f01be563..b58f51159b8 100644
--- a/chromium/v8/src/utils/ostreams.cc
+++ b/chromium/v8/src/utils/ostreams.cc
@@ -6,6 +6,7 @@
#include <cinttypes>
+#include "src/base/lazy-instance.h"
#include "src/objects/objects.h"
#include "src/objects/string.h"
@@ -114,6 +115,9 @@ std::streamsize AndroidLogStream::xsputn(const char* s, std::streamsize n) {
}
#endif
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(base::RecursiveMutex,
+ StdoutStream::GetStdoutMutex)
+
namespace {
// Locale-independent predicates.
diff --git a/chromium/v8/src/utils/ostreams.h b/chromium/v8/src/utils/ostreams.h
index 118dfc282af..899c85fd94c 100644
--- a/chromium/v8/src/utils/ostreams.h
+++ b/chromium/v8/src/utils/ostreams.h
@@ -13,6 +13,7 @@
#include "include/v8config.h"
#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
namespace v8 {
@@ -80,12 +81,20 @@ class StdoutStream : public std::ostream {
StdoutStream() : std::ostream(&stream_) {}
private:
+ static V8_EXPORT_PRIVATE base::RecursiveMutex* GetStdoutMutex();
+
AndroidLogStream stream_;
+ base::RecursiveMutexGuard mutex_guard_{GetStdoutMutex()};
};
#else
class StdoutStream : public OFStream {
public:
StdoutStream() : OFStream(stdout) {}
+
+ private:
+ static V8_EXPORT_PRIVATE base::RecursiveMutex* GetStdoutMutex();
+
+ base::RecursiveMutexGuard mutex_guard_{GetStdoutMutex()};
};
#endif
diff --git a/chromium/v8/src/utils/pointer-with-payload.h b/chromium/v8/src/utils/pointer-with-payload.h
index 3dbd6acac05..6200f410775 100644
--- a/chromium/v8/src/utils/pointer-with-payload.h
+++ b/chromium/v8/src/utils/pointer-with-payload.h
@@ -20,6 +20,12 @@ struct PointerWithPayloadTraits {
alignof(PointerType) >= 8 ? 3 : alignof(PointerType) >= 4 ? 2 : 1;
};
+// Assume void* has the same payloads as void**, under the assumption that it's
+// used for classes that contain at least one pointer.
+template <>
+struct PointerWithPayloadTraits<void> : public PointerWithPayloadTraits<void*> {
+};
+
// PointerWithPayload combines a PointerType* an a small PayloadType into
// one. The bits of the storage type get packed into the lower bits of the
// pointer that are free due to alignment. The user needs to specify how many
@@ -42,7 +48,8 @@ class PointerWithPayload {
"Ptr does not have sufficient alignment for the selected amount of "
"storage bits.");
- static constexpr uintptr_t kPayloadMask = (uintptr_t{1} << kAvailBits) - 1;
+ static constexpr uintptr_t kPayloadMask =
+ (uintptr_t{1} << NumPayloadBits) - 1;
static constexpr uintptr_t kPointerMask = ~kPayloadMask;
public:
@@ -68,6 +75,13 @@ class PointerWithPayload {
return reinterpret_cast<PointerType*>(pointer_ & kPointerMask);
}
+ // An optimized version of GetPointer for when we know the payload value.
+ V8_INLINE PointerType* GetPointerWithKnownPayload(PayloadType payload) const {
+ DCHECK_EQ(GetPayload(), payload);
+ return reinterpret_cast<PointerType*>(pointer_ -
+ static_cast<uintptr_t>(payload));
+ }
+
V8_INLINE PointerType* operator->() const { return GetPointer(); }
V8_INLINE void update(PointerType* new_pointer, PayloadType new_payload) {
diff --git a/chromium/v8/src/utils/vector.h b/chromium/v8/src/utils/vector.h
index 38202d804fd..50622174844 100644
--- a/chromium/v8/src/utils/vector.h
+++ b/chromium/v8/src/utils/vector.h
@@ -28,9 +28,7 @@ class Vector {
constexpr Vector() : start_(nullptr), length_(0) {}
constexpr Vector(T* data, size_t length) : start_(data), length_(length) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(length == 0 || data != nullptr);
-#endif
+ CONSTEXPR_DCHECK(length == 0 || data != nullptr);
}
static Vector<T> New(size_t length) {
@@ -115,9 +113,7 @@ class Vector {
}
// Implicit conversion from Vector<T> to Vector<const T>.
- inline operator Vector<const T>() const {
- return Vector<const T>::cast(*this);
- }
+ operator Vector<const T>() const { return {start_, length_}; }
template <typename S>
static Vector<T> cast(Vector<S> input) {
@@ -163,6 +159,7 @@ class OwnedVector {
: data_(std::move(data)), length_(length) {
DCHECK_IMPLIES(length_ > 0, data_ != nullptr);
}
+
// Implicit conversion from {OwnedVector<U>} to {OwnedVector<T>}, instantiable
// if {std::unique_ptr<U>} can be converted to {std::unique_ptr<T>}.
// Can be used to convert {OwnedVector<T>} to {OwnedVector<const T>}.
@@ -207,11 +204,20 @@ class OwnedVector {
}
// Allocates a new vector of the specified size via the default allocator.
+ // Elements in the new vector are value-initialized.
static OwnedVector<T> New(size_t size) {
if (size == 0) return {};
return OwnedVector<T>(std::make_unique<T[]>(size), size);
}
+ // Allocates a new vector of the specified size via the default allocator.
+ // Elements in the new vector are default-initialized.
+ static OwnedVector<T> NewForOverwrite(size_t size) {
+ if (size == 0) return {};
+ // TODO(v8): Use {std::make_unique_for_overwrite} once we allow C++20.
+ return OwnedVector<T>(std::unique_ptr<T[]>(new T[size]), size);
+ }
+
// Allocates a new vector containing the specified collection of values.
// {Iterator} is the common type of {std::begin} and {std::end} called on a
// {const U&}. This function is only instantiable if that type exists.
@@ -222,7 +228,8 @@ class OwnedVector {
Iterator begin = std::begin(collection);
Iterator end = std::end(collection);
using non_const_t = typename std::remove_const<T>::type;
- auto vec = OwnedVector<non_const_t>::New(std::distance(begin, end));
+ auto vec =
+ OwnedVector<non_const_t>::NewForOverwrite(std::distance(begin, end));
std::copy(begin, end, vec.start());
return vec;
}
@@ -289,6 +296,14 @@ inline constexpr auto VectorOf(Container&& c)
return VectorOf(c.data(), c.size());
}
+// Construct a Vector from an initializer list. The vector can obviously only be
+// used as long as the initializer list is live. Valid uses include direct use
+// in parameter lists: F(VectorOf({1, 2, 3}));
+template <typename T>
+inline constexpr Vector<const T> VectorOf(std::initializer_list<T> list) {
+ return VectorOf(list.begin(), list.size());
+}
+
template <typename T, size_t kSize>
class EmbeddedVector : public Vector<T> {
public:
diff --git a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index eb91b79ea55..4a9cffb9728 100644
--- a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -332,6 +332,71 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
}
}
+constexpr int MaskFromNeonDataType(NeonDataType dt) {
+ switch (dt) {
+ case NeonS8:
+ case NeonU8:
+ return 7;
+ case NeonS16:
+ case NeonU16:
+ return 15;
+ case NeonS32:
+ case NeonU32:
+ return 31;
+ case NeonS64:
+ case NeonU64:
+ return 63;
+ }
+}
+
+enum ShiftDirection { kLeft, kRight };
+
+template <ShiftDirection dir = kLeft, NeonDataType dt, NeonSize sz>
+inline void EmitSimdShift(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, LiftoffRegister rhs) {
+ constexpr int mask = MaskFromNeonDataType(dt);
+ UseScratchRegisterScope temps(assm);
+ QwNeonRegister tmp = temps.AcquireQ();
+ Register shift = temps.Acquire();
+ assm->and_(shift, rhs.gp(), Operand(mask));
+ assm->vdup(sz, tmp, shift);
+ if (dir == kRight) {
+ assm->vneg(sz, tmp, tmp);
+ }
+ assm->vshl(dt, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), tmp);
+}
+
+template <ShiftDirection dir, NeonDataType dt>
+inline void EmitSimdShiftImmediate(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ // vshr by 0 is not allowed, so check for it, and only move if dst != lhs.
+ int32_t shift = rhs & MaskFromNeonDataType(dt);
+ if (shift) {
+ if (dir == kLeft) {
+ assm->vshl(dt, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), shift);
+ } else {
+ assm->vshr(dt, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), shift);
+ }
+ } else if (dst != lhs) {
+ assm->vmov(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs));
+ }
+}
+
+inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(assm);
+ DwVfpRegister scratch = temps.AcquireD();
+ assm->vpmax(NeonU32, scratch, src.low_fp(), src.high_fp());
+ assm->vpmax(NeonU32, scratch, scratch, scratch);
+ assm->ExtractLane(dst.gp(), scratch, NeonS32, 0);
+ assm->cmp(dst.gp(), Operand(0));
+ assm->mov(dst.gp(), Operand(1), LeaveCC, ne);
+}
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
@@ -437,7 +502,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
vmov(liftoff::GetFloatRegister(reg.fp()), value.to_f32_boxed());
break;
case ValueType::kF64: {
- Register extra_scratch = GetUnusedRegister(kGpReg).gp();
+ Register extra_scratch = GetUnusedRegister(kGpReg, {}).gp();
vmov(reg.fp(), Double(value.to_f64_boxed().get_bits()), extra_scratch);
break;
}
@@ -1171,7 +1236,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
@@ -1216,7 +1281,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
// The scratch register will be required by str if multiple instructions
// are required to encode the offset, and so we cannot use it in that case.
if (!ImmediateFitsAddrMode2Instruction(dst.offset())) {
- src = GetUnusedRegister(kGpReg).gp();
+ src = GetUnusedRegister(kGpReg, {}).gp();
} else {
src = temps.Acquire();
}
@@ -1758,7 +1823,7 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
constexpr uint32_t kF32SignBit = uint32_t{1} << 31;
UseScratchRegisterScope temps(this);
- Register scratch = GetUnusedRegister(kGpReg).gp();
+ Register scratch = GetUnusedRegister(kGpReg, {}).gp();
Register scratch2 = temps.Acquire();
VmovLow(scratch, lhs);
// Clear sign bit in {scratch}.
@@ -1777,7 +1842,7 @@ void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
// On arm, we cannot hold the whole f64 value in a gp register, so we just
// operate on the upper half (UH).
UseScratchRegisterScope temps(this);
- Register scratch = GetUnusedRegister(kGpReg).gp();
+ Register scratch = GetUnusedRegister(kGpReg, {}).gp();
Register scratch2 = temps.Acquire();
VmovHigh(scratch, lhs);
// Clear sign bit in {scratch}.
@@ -1862,6 +1927,38 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
b(trap, ge);
return true;
}
+ case kExprI32SConvertSatF32: {
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_s32_f32(
+ scratch_f,
+ liftoff::GetFloatRegister(src.fp())); // f32 -> i32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ return true;
+ }
+ case kExprI32UConvertSatF32: {
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_u32_f32(
+ scratch_f,
+ liftoff::GetFloatRegister(src.fp())); // f32 -> u32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ return true;
+ }
+ case kExprI32SConvertSatF64: {
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_s32_f64(scratch_f, src.fp()); // f64 -> i32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ return true;
+ }
+ case kExprI32UConvertSatF64: {
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_u32_f64(scratch_f, src.fp()); // f64 -> u32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ return true;
+ }
case kExprI32ReinterpretF32:
vmov(dst.gp(), liftoff::GetFloatRegister(src.fp()));
return true;
@@ -1914,10 +2011,14 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprF64UConvertI64:
case kExprI64SConvertF32:
case kExprI64UConvertF32:
+ case kExprI64SConvertSatF32:
+ case kExprI64UConvertSatF32:
case kExprF32SConvertI64:
case kExprF32UConvertI64:
case kExprI64SConvertF64:
case kExprI64UConvertF64:
+ case kExprI64SConvertSatF64:
+ case kExprI64UConvertSatF64:
// These cases can be handled by the C fallback function.
return false;
default:
@@ -2052,6 +2153,79 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
}
}
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ UseScratchRegisterScope temps(this);
+ Register actual_src_addr = liftoff::CalculateActualAddress(
+ this, &temps, src_addr, offset_reg, offset_imm);
+ *protected_load_pc = pc_offset();
+ MachineType memtype = type.mem_type();
+
+ if (transform == LoadTransformationKind::kExtend) {
+ if (memtype == MachineType::Int8()) {
+ vld1(Neon8, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ vmovl(NeonS8, liftoff::GetSimd128Register(dst), dst.low_fp());
+ } else if (memtype == MachineType::Uint8()) {
+ vld1(Neon8, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ vmovl(NeonU8, liftoff::GetSimd128Register(dst), dst.low_fp());
+ } else if (memtype == MachineType::Int16()) {
+ vld1(Neon16, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ vmovl(NeonS16, liftoff::GetSimd128Register(dst), dst.low_fp());
+ } else if (memtype == MachineType::Uint16()) {
+ vld1(Neon16, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ vmovl(NeonU16, liftoff::GetSimd128Register(dst), dst.low_fp());
+ } else if (memtype == MachineType::Int32()) {
+ vld1(Neon32, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ vmovl(NeonS32, liftoff::GetSimd128Register(dst), dst.low_fp());
+ } else if (memtype == MachineType::Uint32()) {
+ vld1(Neon32, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ vmovl(NeonU32, liftoff::GetSimd128Register(dst), dst.low_fp());
+ }
+ } else {
+ DCHECK_EQ(LoadTransformationKind::kSplat, transform);
+ if (memtype == MachineType::Int8()) {
+ vld1r(Neon8, NeonListOperand(liftoff::GetSimd128Register(dst)),
+ NeonMemOperand(actual_src_addr));
+ } else if (memtype == MachineType::Int16()) {
+ vld1r(Neon16, NeonListOperand(liftoff::GetSimd128Register(dst)),
+ NeonMemOperand(actual_src_addr));
+ } else if (memtype == MachineType::Int32()) {
+ vld1r(Neon32, NeonListOperand(liftoff::GetSimd128Register(dst)),
+ NeonMemOperand(actual_src_addr));
+ } else if (memtype == MachineType::Int64()) {
+ vld1(Neon32, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ TurboAssembler::Move(dst.high_fp(), dst.low_fp());
+ }
+ }
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ UseScratchRegisterScope temps(this);
+
+ NeonListOperand table(liftoff::GetSimd128Register(lhs));
+ if (dst == lhs) {
+ // dst will be overwritten, so keep the table somewhere else.
+ QwNeonRegister tbl = temps.AcquireQ();
+ TurboAssembler::Move(tbl, liftoff::GetSimd128Register(lhs));
+ table = NeonListOperand(tbl);
+ }
+
+ vtbl(dst.low_fp(), table, rhs.low_fp());
+ vtbl(dst.high_fp(), table, rhs.high_fp());
+}
+
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
TurboAssembler::Move(dst.low_fp(), src.fp());
@@ -2273,12 +2447,37 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shl");
+ liftoff::EmitSimdShift<liftoff::kLeft, NeonS64, Neon32>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i64x2_shli");
+ vshl(NeonS64, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), rhs & 63);
+}
+
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonS64, Neon32>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonS64>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonU64, Neon32>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonU64>(this, dst, lhs,
+ rhs);
}
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2306,15 +2505,18 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
QwNeonRegister tmp1 = left;
QwNeonRegister tmp2 = right;
- if (cache_state()->is_used(lhs) && cache_state()->is_used(rhs)) {
+ LiftoffRegList used_plus_dst =
+ cache_state()->used_registers | LiftoffRegList::ForRegs(dst);
+
+ if (used_plus_dst.has(lhs) && used_plus_dst.has(rhs)) {
tmp1 = temps.AcquireQ();
// We only have 1 scratch Q register, so acquire another ourselves.
LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
tmp2 = liftoff::GetSimd128Register(unused_pair);
- } else if (cache_state()->is_used(lhs)) {
+ } else if (used_plus_dst.has(lhs)) {
tmp1 = temps.AcquireQ();
- } else if (cache_state()->is_used(rhs)) {
+ } else if (used_plus_dst.has(rhs)) {
tmp2 = temps.AcquireQ();
}
@@ -2363,14 +2565,79 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ DwVfpRegister scratch = temps.AcquireD();
+ vpmin(NeonU32, scratch, src.low_fp(), src.high_fp());
+ vpmin(NeonU32, scratch, scratch, scratch);
+ ExtractLane(dst.gp(), scratch, NeonS32, 0);
+ cmp(dst.gp(), Operand(0));
+ mov(dst.gp(), Operand(1), LeaveCC, ne);
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ Simd128Register tmp = liftoff::GetSimd128Register(src);
+ Simd128Register mask = temps.AcquireQ();
+
+ if (cache_state()->is_used(src)) {
+ // We only have 1 scratch Q register, so try and reuse src.
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
+ LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
+ mask = liftoff::GetSimd128Register(unused_pair);
+ }
+
+ vshr(NeonS32, tmp, liftoff::GetSimd128Register(src), 31);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ vmov(mask.low(), Double((uint64_t)0x0000'0002'0000'0001));
+ vmov(mask.high(), Double((uint64_t)0x0000'0008'0000'0004));
+ vand(tmp, mask, tmp);
+ vpadd(Neon32, tmp.low(), tmp.low(), tmp.high());
+ vpadd(Neon32, tmp.low(), tmp.low(), kDoubleRegZero);
+ VmovLow(dst.gp(), tmp.low());
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shl");
+ liftoff::EmitSimdShift<liftoff::kLeft, NeonS32, Neon32>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i32x4_shli");
+ vshl(NeonS32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), rhs & 31);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonS32, Neon32>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonS32>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonU32, Neon32>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonU32>(this, dst, lhs,
+ rhs);
}
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2430,14 +2697,81 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ DwVfpRegister scratch = temps.AcquireD();
+ vpmin(NeonU16, scratch, src.low_fp(), src.high_fp());
+ vpmin(NeonU16, scratch, scratch, scratch);
+ vpmin(NeonU16, scratch, scratch, scratch);
+ ExtractLane(dst.gp(), scratch, NeonS16, 0);
+ cmp(dst.gp(), Operand(0));
+ mov(dst.gp(), Operand(1), LeaveCC, ne);
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ Simd128Register tmp = liftoff::GetSimd128Register(src);
+ Simd128Register mask = temps.AcquireQ();
+
+ if (cache_state()->is_used(src)) {
+ // We only have 1 scratch Q register, so try and reuse src.
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
+ LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
+ mask = liftoff::GetSimd128Register(unused_pair);
+ }
+
+ vshr(NeonS16, tmp, liftoff::GetSimd128Register(src), 15);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ vmov(mask.low(), Double((uint64_t)0x0008'0004'0002'0001));
+ vmov(mask.high(), Double((uint64_t)0x0080'0040'0020'0010));
+ vand(tmp, mask, tmp);
+ vpadd(Neon16, tmp.low(), tmp.low(), tmp.high());
+ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ vmov(NeonU16, dst.gp(), tmp.low(), 0);
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shl");
+ liftoff::EmitSimdShift<liftoff::kLeft, NeonS16, Neon16>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i16x8_shli");
+ vshl(NeonS16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), rhs & 15);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonS16, Neon16>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonS16>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonU16, Neon16>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonU16>(this, dst, lhs,
+ rhs);
}
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2537,6 +2871,60 @@ void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
imm_lane_idx);
}
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ Simd128Register dest = liftoff::GetSimd128Register(dst);
+ Simd128Register src1 = liftoff::GetSimd128Register(lhs);
+ Simd128Register src2 = liftoff::GetSimd128Register(rhs);
+ UseScratchRegisterScope temps(this);
+ Simd128Register scratch = temps.AcquireQ();
+ if ((src1 != src2) && src1.code() + 1 != src2.code()) {
+ // vtbl requires the operands to be consecutive or the same.
+ // If they are the same, we build a smaller list operand (table_size = 2).
+ // If they are not the same, and not consecutive, we move the src1 and src2
+ // to q14 and q15, which will be unused since they are not allocatable in
+ // Liftoff. If the operands are the same, then we build a smaller list
+ // operand below.
+ static_assert(!(kLiftoffAssemblerFpCacheRegs &
+ (d28.bit() | d29.bit() | d30.bit() | d31.bit())),
+ "This only works if q14-q15 (d28-d31) are not used.");
+ vmov(q14, src1);
+ src1 = q14;
+ vmov(q15, src2);
+ src2 = q15;
+ }
+
+ int table_size = src1 == src2 ? 2 : 4;
+ uint32_t mask = table_size == 2 ? 0x0F0F0F0F : 0x1F1F1F1F;
+
+ int scratch_s_base = scratch.code() * 4;
+ for (int j = 0; j < 4; j++) {
+ uint32_t imm = 0;
+ for (int i = 3; i >= 0; i--) {
+ imm = (imm << 8) | shuffle[j * 4 + i];
+ }
+ uint32_t four_lanes = imm;
+ // Ensure indices are in [0,15] if table_size is 2, or [0,31] if 4.
+ four_lanes &= mask;
+ vmov(SwVfpRegister::from_code(scratch_s_base + j),
+ Float32::FromBits(four_lanes));
+ }
+
+ DwVfpRegister table_base = src1.low();
+ NeonListOperand table(table_base, table_size);
+
+ if (dest != src1 && dest != src2) {
+ vtbl(dest.low(), table, scratch.low());
+ vtbl(dest.high(), table, scratch.high());
+ } else {
+ vtbl(scratch.low(), table, scratch.low());
+ vtbl(scratch.high(), table, scratch.high());
+ vmov(dest, scratch);
+ }
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
vdup(Neon8, liftoff::GetSimd128Register(dst), src.gp());
@@ -2569,14 +2957,82 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ DwVfpRegister scratch = temps.AcquireD();
+ vpmin(NeonU8, scratch, src.low_fp(), src.high_fp());
+ vpmin(NeonU8, scratch, scratch, scratch);
+ vpmin(NeonU8, scratch, scratch, scratch);
+ vpmin(NeonU8, scratch, scratch, scratch);
+ ExtractLane(dst.gp(), scratch, NeonS8, 0);
+ cmp(dst.gp(), Operand(0));
+ mov(dst.gp(), Operand(1), LeaveCC, ne);
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ Simd128Register tmp = liftoff::GetSimd128Register(src);
+ Simd128Register mask = temps.AcquireQ();
+
+ if (cache_state()->is_used(src)) {
+ // We only have 1 scratch Q register, so try and reuse src.
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
+ LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
+ mask = liftoff::GetSimd128Register(unused_pair);
+ }
+
+ vshr(NeonS8, tmp, liftoff::GetSimd128Register(src), 7);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ vmov(mask.low(), Double((uint64_t)0x8040'2010'0804'0201));
+ vmov(mask.high(), Double((uint64_t)0x8040'2010'0804'0201));
+ vand(tmp, mask, tmp);
+ vext(mask, tmp, tmp, 8);
+ vzip(Neon8, mask, tmp);
+ vpadd(Neon16, tmp.low(), tmp.low(), tmp.high());
+ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ vmov(NeonU16, dst.gp(), tmp.low(), 0);
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shl");
+ liftoff::EmitSimdShift<liftoff::kLeft, NeonS8, Neon8>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i8x16_shli");
+ vshl(NeonS8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), rhs & 7);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonS8, Neon8>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonS8>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonU8, Neon8>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonU8>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2842,6 +3298,30 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
liftoff::GetSimd128Register(src2));
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vcvt_s32_f32(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vcvt_u32_f32(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vcvt_f32_s32(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vcvt_f32_u32(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
diff --git a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 9c142e4ad0f..03643c6edd7 100644
--- a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -104,6 +104,76 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
return MemOperand(addr.X(), offset_imm);
}
+enum class ShiftDirection : bool { kLeft, kRight };
+
+enum class ShiftSign : bool { kSigned, kUnsigned };
+
+template <ShiftDirection dir, ShiftSign sign = ShiftSign::kSigned>
+inline void EmitSimdShift(LiftoffAssembler* assm, VRegister dst, VRegister lhs,
+ Register rhs, VectorFormat format) {
+ DCHECK_IMPLIES(dir == ShiftDirection::kLeft, sign == ShiftSign::kSigned);
+ DCHECK(dst.IsSameFormat(lhs));
+ DCHECK_EQ(dst.LaneCount(), LaneCountFromFormat(format));
+
+ UseScratchRegisterScope temps(assm);
+ VRegister tmp = temps.AcquireV(format);
+ Register shift = dst.Is2D() ? temps.AcquireX() : temps.AcquireW();
+ int mask = LaneSizeInBitsFromFormat(format) - 1;
+ assm->And(shift, rhs, mask);
+ assm->Dup(tmp, shift);
+
+ if (dir == ShiftDirection::kRight) {
+ assm->Neg(tmp, tmp);
+ }
+
+ if (sign == ShiftSign::kSigned) {
+ assm->Sshl(dst, lhs, tmp);
+ } else {
+ assm->Ushl(dst, lhs, tmp);
+ }
+}
+
+template <VectorFormat format, ShiftSign sign>
+inline void EmitSimdShiftRightImmediate(LiftoffAssembler* assm, VRegister dst,
+ VRegister lhs, int32_t rhs) {
+ // Sshr and Ushr does not allow shifts to be 0, so check for that here.
+ int mask = LaneSizeInBitsFromFormat(format) - 1;
+ int32_t shift = rhs & mask;
+ if (!shift) {
+ if (dst != lhs) {
+ assm->Mov(dst, lhs);
+ }
+ return;
+ }
+
+ if (sign == ShiftSign::kSigned) {
+ assm->Sshr(dst, lhs, rhs & mask);
+ } else {
+ assm->Ushr(dst, lhs, rhs & mask);
+ }
+}
+
+inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ // AnyTrue does not depend on the number of lanes, so we can use V4S for all.
+ UseScratchRegisterScope scope(assm);
+ VRegister temp = scope.AcquireV(kFormatS);
+ assm->Umaxv(temp, src.fp().V4S());
+ assm->Umov(dst.gp().W(), temp, 0);
+ assm->Cmp(dst.gp().W(), 0);
+ assm->Cset(dst.gp().W(), ne);
+}
+
+inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src, VectorFormat format) {
+ UseScratchRegisterScope scope(assm);
+ VRegister temp = scope.AcquireV(ScalarFormatFromFormat(format));
+ assm->Uminv(temp, VRegister::Create(src.fp().code(), format));
+ assm->Umov(dst.gp().W(), temp, 0);
+ assm->Cmp(dst.gp().W(), 0);
+ assm->Cset(dst.gp().W(), ne);
+}
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
@@ -299,8 +369,6 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
case LoadType::kS128Load:
Ldr(dst.fp().Q(), src_op);
break;
- default:
- UNREACHABLE();
}
}
@@ -337,65 +405,280 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
case StoreType::kS128Store:
Str(src.fp().Q(), dst_op);
break;
+ }
+}
+
+namespace liftoff {
+#define __ lasm->
+
+inline Register CalculateActualAddress(LiftoffAssembler* lasm,
+ Register addr_reg, Register offset_reg,
+ int32_t offset_imm,
+ Register result_reg) {
+ DCHECK_NE(offset_reg, no_reg);
+ DCHECK_NE(addr_reg, no_reg);
+ __ Add(result_reg, addr_reg, Operand(offset_reg));
+ if (offset_imm != 0) {
+ __ Add(result_reg, result_reg, Operand(offset_imm));
+ }
+ return result_reg;
+}
+
+enum class Binop { kAdd, kSub, kAnd, kOr, kXor, kExchange };
+
+inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister value, LiftoffRegister result,
+ StoreType type, Binop op) {
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ Register store_result = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+
+ // Make sure that {result} is unique.
+ Register result_reg = result.gp();
+ if (result_reg == value.gp() || result_reg == dst_addr ||
+ result_reg == offset_reg) {
+ result_reg = __ GetUnusedRegister(kGpReg, pinned).gp();
+ }
+
+ UseScratchRegisterScope temps(lasm);
+ Register actual_addr = liftoff::CalculateActualAddress(
+ lasm, dst_addr, offset_reg, offset_imm, temps.AcquireX());
+
+ // Allocate an additional {temp} register to hold the result that should be
+ // stored to memory. Note that {temp} and {store_result} are not allowed to be
+ // the same register.
+ Register temp = temps.AcquireX();
+
+ Label retry;
+ __ Bind(&retry);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ __ ldaxrb(result_reg.W(), actual_addr);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ __ ldaxrh(result_reg.W(), actual_addr);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ __ ldaxr(result_reg.W(), actual_addr);
+ break;
+ case StoreType::kI64Store:
+ __ ldaxr(result_reg.X(), actual_addr);
+ break;
default:
UNREACHABLE();
}
+
+ switch (op) {
+ case Binop::kAdd:
+ __ add(temp, result_reg, value.gp());
+ break;
+ case Binop::kSub:
+ __ sub(temp, result_reg, value.gp());
+ break;
+ case Binop::kAnd:
+ __ and_(temp, result_reg, value.gp());
+ break;
+ case Binop::kOr:
+ __ orr(temp, result_reg, value.gp());
+ break;
+ case Binop::kXor:
+ __ eor(temp, result_reg, value.gp());
+ break;
+ case Binop::kExchange:
+ __ mov(temp, value.gp());
+ break;
+ }
+
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ __ stlxrb(store_result.W(), temp.W(), actual_addr);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ __ stlxrh(store_result.W(), temp.W(), actual_addr);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ __ stlxr(store_result.W(), temp.W(), actual_addr);
+ break;
+ case StoreType::kI64Store:
+ __ stlxr(store_result.W(), temp.X(), actual_addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ Cbnz(store_result.W(), &retry);
+
+ if (result_reg != result.gp()) {
+ __ mov(result.gp(), result_reg);
+ }
}
+#undef __
+} // namespace liftoff
+
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicLoad");
+ UseScratchRegisterScope temps(this);
+ Register src_reg = liftoff::CalculateActualAddress(
+ this, src_addr, offset_reg, offset_imm, temps.AcquireX());
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ Ldarb(dst.gp().W(), src_reg);
+ return;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ Ldarh(dst.gp().W(), src_reg);
+ return;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32U:
+ Ldar(dst.gp().W(), src_reg);
+ return;
+ case LoadType::kI64Load:
+ Ldar(dst.gp().X(), src_reg);
+ return;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicStore");
+ UseScratchRegisterScope temps(this);
+ Register dst_reg = liftoff::CalculateActualAddress(
+ this, dst_addr, offset_reg, offset_imm, temps.AcquireX());
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ Stlrb(src.gp().W(), dst_reg);
+ return;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ Stlrh(src.gp().W(), dst_reg);
+ return;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ Stlr(src.gp().W(), dst_reg);
+ return;
+ case StoreType::kI64Store:
+ Stlr(src.gp().X(), dst_reg);
+ return;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAdd");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kAdd);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicSub");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kSub);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAnd");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kAnd);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicOr");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kOr);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicXor");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kXor);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicExchange");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kExchange);
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
- bailout(kAtomics, "AtomicCompareExchange");
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, expected, new_value);
+
+ Register result_reg = result.gp();
+ if (pinned.has(result)) {
+ result_reg = GetUnusedRegister(kGpReg, pinned).gp();
+ }
+
+ UseScratchRegisterScope temps(this);
+ Register store_result = temps.AcquireW();
+
+ Register actual_addr = liftoff::CalculateActualAddress(
+ this, dst_addr, offset_reg, offset_imm, temps.AcquireX());
+
+ Label retry;
+ Label done;
+ Bind(&retry);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ ldaxrb(result_reg.W(), actual_addr);
+ Cmp(result.gp().W(), Operand(expected.gp().W(), UXTB));
+ B(ne, &done);
+ stlxrb(store_result.W(), new_value.gp().W(), actual_addr);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ ldaxrh(result_reg.W(), actual_addr);
+ Cmp(result.gp().W(), Operand(expected.gp().W(), UXTH));
+ B(ne, &done);
+ stlxrh(store_result.W(), new_value.gp().W(), actual_addr);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ ldaxr(result_reg.W(), actual_addr);
+ Cmp(result.gp().W(), Operand(expected.gp().W(), UXTW));
+ B(ne, &done);
+ stlxr(store_result.W(), new_value.gp().W(), actual_addr);
+ break;
+ case StoreType::kI64Store:
+ ldaxr(result_reg.X(), actual_addr);
+ Cmp(result.gp().X(), Operand(expected.gp().X(), UXTX));
+ B(ne, &done);
+ stlxr(store_result.W(), new_value.gp().X(), actual_addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ Cbnz(store_result.W(), &retry);
+ Bind(&done);
+
+ if (result_reg != result.gp()) {
+ mov(result.gp(), result_reg);
+ }
}
void LiftoffAssembler::AtomicFence() { Dmb(InnerShareable, BarrierAll); }
@@ -439,7 +722,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
Fmov(dst.D(), src.D());
} else {
DCHECK_EQ(kWasmS128, type);
- Fmov(dst.Q(), src.Q());
+ Mov(dst.Q(), src.Q());
}
}
@@ -921,6 +1204,30 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
B(trap, ne);
return true;
}
+ case kExprI32SConvertSatF32:
+ Fcvtzs(dst.gp().W(), src.fp().S());
+ return true;
+ case kExprI32UConvertSatF32:
+ Fcvtzu(dst.gp().W(), src.fp().S());
+ return true;
+ case kExprI32SConvertSatF64:
+ Fcvtzs(dst.gp().W(), src.fp().D());
+ return true;
+ case kExprI32UConvertSatF64:
+ Fcvtzu(dst.gp().W(), src.fp().D());
+ return true;
+ case kExprI64SConvertSatF32:
+ Fcvtzs(dst.gp().X(), src.fp().S());
+ return true;
+ case kExprI64UConvertSatF32:
+ Fcvtzu(dst.gp().X(), src.fp().S());
+ return true;
+ case kExprI64SConvertSatF64:
+ Fcvtzs(dst.gp().X(), src.fp().D());
+ return true;
+ case kExprI64UConvertSatF64:
+ Fcvtzu(dst.gp().X(), src.fp().D());
+ return true;
case kExprI32ReinterpretF32:
Fmov(dst.gp().W(), src.fp().S());
return true;
@@ -1102,6 +1409,70 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
}
}
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ UseScratchRegisterScope temps(this);
+ MemOperand src_op =
+ liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
+ *protected_load_pc = pc_offset();
+ MachineType memtype = type.mem_type();
+
+ if (transform == LoadTransformationKind::kExtend) {
+ if (memtype == MachineType::Int8()) {
+ Ldr(dst.fp().D(), src_op);
+ Sxtl(dst.fp().V8H(), dst.fp().V8B());
+ } else if (memtype == MachineType::Uint8()) {
+ Ldr(dst.fp().D(), src_op);
+ Uxtl(dst.fp().V8H(), dst.fp().V8B());
+ } else if (memtype == MachineType::Int16()) {
+ Ldr(dst.fp().D(), src_op);
+ Sxtl(dst.fp().V4S(), dst.fp().V4H());
+ } else if (memtype == MachineType::Uint16()) {
+ Ldr(dst.fp().D(), src_op);
+ Uxtl(dst.fp().V4S(), dst.fp().V4H());
+ } else if (memtype == MachineType::Int32()) {
+ Ldr(dst.fp().D(), src_op);
+ Sxtl(dst.fp().V2D(), dst.fp().V2S());
+ } else if (memtype == MachineType::Uint32()) {
+ Ldr(dst.fp().D(), src_op);
+ Uxtl(dst.fp().V2D(), dst.fp().V2S());
+ }
+ } else {
+ // ld1r only allows no offset or post-index, so emit an add.
+ DCHECK_EQ(LoadTransformationKind::kSplat, transform);
+ if (src_op.IsRegisterOffset()) {
+ // We have 2 tmp gps, so it's okay to acquire 1 more here, and actually
+ // doesn't matter if we acquire the same one.
+ Register tmp = temps.AcquireX();
+ Add(tmp, src_op.base(), src_op.regoffset().X());
+ src_op = MemOperand(tmp.X(), 0);
+ } else if (src_op.IsImmediateOffset() && src_op.offset() != 0) {
+ Register tmp = temps.AcquireX();
+ Add(tmp, src_op.base(), src_op.offset());
+ src_op = MemOperand(tmp.X(), 0);
+ }
+
+ if (memtype == MachineType::Int8()) {
+ ld1r(dst.fp().V16B(), src_op);
+ } else if (memtype == MachineType::Int16()) {
+ ld1r(dst.fp().V8H(), src_op);
+ } else if (memtype == MachineType::Int32()) {
+ ld1r(dst.fp().V4S(), src_op);
+ } else if (memtype == MachineType::Int64()) {
+ ld1r(dst.fp().V2D(), src_op);
+ }
+ }
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Tbl(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V2D(), src.fp().D(), 0);
@@ -1262,12 +1633,42 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shl");
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kLeft>(
+ this, dst.fp().V2D(), lhs.fp().V2D(), rhs.gp(), kFormat2D);
}
void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i64x2_shli");
+ Shl(dst.fp().V2D(), lhs.fp().V2D(), rhs & 63);
+}
+
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V2D(), lhs.fp().V2D(), rhs.gp(), kFormat2D);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat2D, liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V2D(), lhs.fp().V2D(), rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V2D(), lhs.fp().V2D(), rhs.gp(), kFormat2D);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat2D,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V2D(), lhs.fp().V2D(), rhs);
}
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1327,14 +1728,69 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
Neg(dst.fp().V4S(), src.fp().V4S());
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, kFormat4S);
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ VRegister tmp = temps.AcquireQ();
+ VRegister mask = temps.AcquireQ();
+
+ Sshr(tmp.V4S(), src.fp().V4S(), 31);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ Movi(mask.V2D(), 0x0000'0008'0000'0004, 0x0000'0002'0000'0001);
+ And(tmp.V16B(), mask.V16B(), tmp.V16B());
+ Addv(tmp.S(), tmp.V4S());
+ Mov(dst.gp().W(), tmp.V4S(), 0);
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shl");
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kLeft>(
+ this, dst.fp().V4S(), lhs.fp().V4S(), rhs.gp(), kFormat4S);
}
void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i32x4_shli");
+ Shl(dst.fp().V4S(), lhs.fp().V4S(), rhs & 31);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V4S(), lhs.fp().V4S(), rhs.gp(), kFormat4S);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat4S, liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V4S(), lhs.fp().V4S(), rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V4S(), lhs.fp().V4S(), rhs.gp(), kFormat4S);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat4S,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V4S(), lhs.fp().V4S(), rhs);
}
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1408,14 +1864,69 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
Neg(dst.fp().V8H(), src.fp().V8H());
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, kFormat8H);
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ VRegister tmp = temps.AcquireQ();
+ VRegister mask = temps.AcquireQ();
+
+ Sshr(tmp.V8H(), src.fp().V8H(), 15);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ Movi(mask.V2D(), 0x0080'0040'0020'0010, 0x0008'0004'0002'0001);
+ And(tmp.V16B(), mask.V16B(), tmp.V16B());
+ Addv(tmp.H(), tmp.V8H());
+ Mov(dst.gp().W(), tmp.V8H(), 0);
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shl");
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kLeft>(
+ this, dst.fp().V8H(), lhs.fp().V8H(), rhs.gp(), kFormat8H);
}
void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i16x8_shli");
+ Shl(dst.fp().V8H(), lhs.fp().V8H(), rhs & 15);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V8H(), lhs.fp().V8H(), rhs.gp(), kFormat8H);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat8H, liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V8H(), lhs.fp().V8H(), rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V8H(), lhs.fp().V8H(), rhs.gp(), kFormat8H);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat8H,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V8H(), lhs.fp().V8H(), rhs);
}
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1481,6 +1992,45 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
Umax(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ VRegister src1 = lhs.fp();
+ VRegister src2 = rhs.fp();
+ VRegister temp = dst.fp();
+ if (dst == lhs || dst == rhs) {
+ // dst overlaps with lhs or rhs, so we need a temporary.
+ temp = GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(lhs, rhs)).fp();
+ }
+
+ UseScratchRegisterScope scope(this);
+
+ if (src1 != src2 && !AreConsecutive(src1, src2)) {
+ // Tbl needs consecutive registers, which our scratch registers are.
+ src1 = scope.AcquireV(kFormat16B);
+ src2 = scope.AcquireV(kFormat16B);
+ DCHECK(AreConsecutive(src1, src2));
+ Mov(src1.Q(), lhs.fp().Q());
+ Mov(src2.Q(), rhs.fp().Q());
+ }
+
+ uint8_t mask = lhs == rhs ? 0x0F : 0x1F;
+ int64_t imms[2] = {0, 0};
+ for (int i = 7; i >= 0; i--) {
+ imms[0] = (imms[0] << 8) | (shuffle[i] & mask);
+ imms[1] = (imms[1] << 8) | (shuffle[i + 8] & mask);
+ }
+
+ Movi(temp.V16B(), imms[1], imms[0]);
+
+ if (src1 == src2) {
+ Tbl(dst.fp().V16B(), src1.V16B(), temp.V16B());
+ } else {
+ Tbl(dst.fp().V16B(), src1.V16B(), src2.V16B(), temp.V16B());
+ }
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V16B(), src.gp().W());
@@ -1513,14 +2063,71 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
Neg(dst.fp().V16B(), src.fp().V16B());
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, kFormat16B);
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ VRegister tmp = temps.AcquireQ();
+ VRegister mask = temps.AcquireQ();
+
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ Sshr(tmp.V16B(), src.fp().V16B(), 7);
+ Movi(mask.V2D(), 0x8040'2010'0804'0201);
+ And(tmp.V16B(), mask.V16B(), tmp.V16B());
+ Ext(mask.V16B(), tmp.V16B(), tmp.V16B(), 8);
+ Zip1(tmp.V16B(), tmp.V16B(), mask.V16B());
+ Addv(tmp.H(), tmp.V8H());
+ Mov(dst.gp().W(), tmp.V8H(), 0);
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shl");
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kLeft>(
+ this, dst.fp().V16B(), lhs.fp().V16B(), rhs.gp(), kFormat16B);
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i8x16_shli");
+ Shl(dst.fp().V16B(), lhs.fp().V16B(), rhs & 7);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V16B(), lhs.fp().V16B(), rhs.gp(), kFormat16B);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat16B, liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V16B(), lhs.fp().V16B(), rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V16B(), lhs.fp().V16B(), rhs.gp(), kFormat16B);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat16B,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V16B(), lhs.fp().V16B(), rhs);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1750,6 +2357,26 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
Bsl(dst.fp().V16B(), src1.fp().V16B(), src2.fp().V16B());
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fcvtzs(dst.fp().V4S(), src.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fcvtzu(dst.fp().V4S(), src.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Scvtf(dst.fp().V4S(), src.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Ucvtf(dst.fp().V4S(), src.fp().V4S());
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
diff --git a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 7a1d629bf2d..468450aef66 100644
--- a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -130,7 +130,7 @@ inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) {
if (candidate.is_byte_register()) return candidate;
// {GetUnusedRegister()} may insert move instructions to spill registers to
// the stack. This is OK because {mov} does not change the status flags.
- return assm->GetUnusedRegister(liftoff::kByteRegs).gp();
+ return assm->GetUnusedRegister(liftoff::kByteRegs, {}).gp();
}
inline void MoveStackValue(LiftoffAssembler* assm, const Operand& src,
@@ -336,8 +336,6 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
case LoadType::kS128Load:
movdqu(dst.fp(), src_op);
break;
- default:
- UNREACHABLE();
}
}
@@ -405,8 +403,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
case StoreType::kS128Store:
Movdqu(dst_op, src.fp());
break;
- default:
- UNREACHABLE();
}
}
@@ -494,7 +490,56 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAdd");
+ if (type.value() == StoreType::kI64Store) {
+ bailout(kAtomics, "AtomicAdd");
+ return;
+ }
+
+ DCHECK_EQ(value, result);
+ DCHECK(!cache_state()->is_used(result));
+ bool is_64_bit_op = type.value_type() == kWasmI64;
+
+ Register value_reg = is_64_bit_op ? value.low_gp() : value.gp();
+ Register result_reg = is_64_bit_op ? result.low_gp() : result.gp();
+
+ bool is_byte_store = type.size() == 1;
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, value_reg, offset_reg);
+
+ // Ensure that {value_reg} is a valid register.
+ if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
+ Register safe_value_reg =
+ GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
+ mov(safe_value_reg, value_reg);
+ value_reg = safe_value_reg;
+ }
+
+ Operand dst_op = Operand(dst_addr, offset_reg, times_1, offset_imm);
+ lock();
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ xadd_b(dst_op, value_reg);
+ movzx_b(result_reg, value_reg);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ xadd_w(dst_op, value_reg);
+ movzx_w(result_reg, value_reg);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ xadd(dst_op, value_reg);
+ if (value_reg != result_reg) {
+ mov(result_reg, value_reg);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ if (is_64_bit_op) {
+ xor_(result.high_gp(), result.high_gp());
+ }
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
@@ -1349,7 +1394,7 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, DoubleRegister dst,
// We need one tmp register to extract the sign bit. Get it right at the
// beginning, such that the spilling code is not accidentially jumped over.
- Register tmp = assm->GetUnusedRegister(kGpReg).gp();
+ Register tmp = assm->GetUnusedRegister(kGpReg, {}).gp();
#define dop(name, ...) \
do { \
@@ -1412,9 +1457,9 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
static constexpr int kF32SignBit = 1 << 31;
- Register scratch = GetUnusedRegister(kGpReg).gp();
- Register scratch2 =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
+ LiftoffRegList pinned;
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
Movd(scratch, lhs); // move {lhs} into {scratch}.
and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}.
Movd(scratch2, rhs); // move {rhs} into {scratch2}.
@@ -1541,9 +1586,9 @@ void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
static constexpr int kF32SignBit = 1 << 31;
// On ia32, we cannot hold the whole f64 value in a gp register, so we just
// operate on the upper half (UH).
- Register scratch = GetUnusedRegister(kGpReg).gp();
- Register scratch2 =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
+ LiftoffRegList pinned;
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
Pextrd(scratch, lhs, 1); // move UH of {lhs} into {scratch}.
and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}.
@@ -1612,6 +1657,7 @@ void LiftoffAssembler::emit_f64_sqrt(DoubleRegister dst, DoubleRegister src) {
}
namespace liftoff {
+#define __ assm->
// Used for float to int conversions. If the value in {converted_back} equals
// {src} afterwards, the conversion succeeded.
template <typename dst_type, typename src_type>
@@ -1621,21 +1667,21 @@ inline void ConvertFloatToIntAndBack(LiftoffAssembler* assm, Register dst,
LiftoffRegList pinned) {
if (std::is_same<double, src_type>::value) { // f64
if (std::is_signed<dst_type>::value) { // f64 -> i32
- assm->cvttsd2si(dst, src);
- assm->Cvtsi2sd(converted_back, dst);
+ __ cvttsd2si(dst, src);
+ __ Cvtsi2sd(converted_back, dst);
} else { // f64 -> u32
- assm->Cvttsd2ui(dst, src, liftoff::kScratchDoubleReg);
- assm->Cvtui2sd(converted_back, dst,
- assm->GetUnusedRegister(kGpReg, pinned).gp());
+ __ Cvttsd2ui(dst, src, liftoff::kScratchDoubleReg);
+ __ Cvtui2sd(converted_back, dst,
+ __ GetUnusedRegister(kGpReg, pinned).gp());
}
} else { // f32
if (std::is_signed<dst_type>::value) { // f32 -> i32
- assm->cvttss2si(dst, src);
- assm->Cvtsi2ss(converted_back, dst);
+ __ cvttss2si(dst, src);
+ __ Cvtsi2ss(converted_back, dst);
} else { // f32 -> u32
- assm->Cvttss2ui(dst, src, liftoff::kScratchDoubleReg);
- assm->Cvtui2ss(converted_back, dst,
- assm->GetUnusedRegister(kGpReg, pinned).gp());
+ __ Cvttss2ui(dst, src, liftoff::kScratchDoubleReg);
+ __ Cvtui2ss(converted_back, dst,
+ __ GetUnusedRegister(kGpReg, pinned).gp());
}
}
}
@@ -1644,36 +1690,101 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
- assm->bailout(kMissingCPUFeature, "no SSE4.1");
+ __ bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
LiftoffRegList pinned = LiftoffRegList::ForRegs(src, dst);
DoubleRegister rounded =
- pinned.set(assm->GetUnusedRegister(kFpReg, pinned)).fp();
+ pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
DoubleRegister converted_back =
- pinned.set(assm->GetUnusedRegister(kFpReg, pinned)).fp();
+ pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
if (std::is_same<double, src_type>::value) { // f64
- assm->roundsd(rounded, src, kRoundToZero);
+ __ roundsd(rounded, src, kRoundToZero);
} else { // f32
- assm->roundss(rounded, src, kRoundToZero);
+ __ roundss(rounded, src, kRoundToZero);
}
ConvertFloatToIntAndBack<dst_type, src_type>(assm, dst, rounded,
converted_back, pinned);
if (std::is_same<double, src_type>::value) { // f64
- assm->ucomisd(converted_back, rounded);
+ __ ucomisd(converted_back, rounded);
} else { // f32
- assm->ucomiss(converted_back, rounded);
+ __ ucomiss(converted_back, rounded);
}
// Jump to trap if PF is 0 (one of the operands was NaN) or they are not
// equal.
- assm->j(parity_even, trap);
- assm->j(not_equal, trap);
+ __ j(parity_even, trap);
+ __ j(not_equal, trap);
return true;
}
+
+template <typename dst_type, typename src_type>
+inline bool EmitSatTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
+ DoubleRegister src) {
+ if (!CpuFeatures::IsSupported(SSE4_1)) {
+ __ bailout(kMissingCPUFeature, "no SSE4.1");
+ return true;
+ }
+ CpuFeatureScope feature(assm, SSE4_1);
+
+ Label done;
+ Label not_nan;
+ Label src_positive;
+
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(src, dst);
+ DoubleRegister rounded =
+ pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
+ DoubleRegister converted_back =
+ pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
+ DoubleRegister zero_reg =
+ pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
+
+ if (std::is_same<double, src_type>::value) { // f64
+ __ roundsd(rounded, src, kRoundToZero);
+ } else { // f32
+ __ roundss(rounded, src, kRoundToZero);
+ }
+
+ ConvertFloatToIntAndBack<dst_type, src_type>(assm, dst, rounded,
+ converted_back, pinned);
+ if (std::is_same<double, src_type>::value) { // f64
+ __ ucomisd(converted_back, rounded);
+ } else { // f32
+ __ ucomiss(converted_back, rounded);
+ }
+
+ // Return 0 if PF is 0 (one of the operands was NaN)
+ __ j(parity_odd, &not_nan);
+ __ xor_(dst, dst);
+ __ jmp(&done);
+
+ __ bind(&not_nan);
+ // If rounding is as expected, return result
+ __ j(equal, &done);
+
+ __ Xorpd(zero_reg, zero_reg);
+
+ // if out-of-bounds, check if src is positive
+ if (std::is_same<double, src_type>::value) { // f64
+ __ ucomisd(src, zero_reg);
+ } else { // f32
+ __ ucomiss(src, zero_reg);
+ }
+ __ j(above, &src_positive);
+ __ mov(dst, Immediate(std::numeric_limits<dst_type>::min()));
+ __ jmp(&done);
+
+ __ bind(&src_positive);
+
+ __ mov(dst, Immediate(std::numeric_limits<dst_type>::max()));
+
+ __ bind(&done);
+ return true;
+}
+#undef __
} // namespace liftoff
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
@@ -1695,6 +1806,18 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI32UConvertF64:
return liftoff::EmitTruncateFloatToInt<uint32_t, double>(this, dst.gp(),
src.fp(), trap);
+ case kExprI32SConvertSatF32:
+ return liftoff::EmitSatTruncateFloatToInt<int32_t, float>(this, dst.gp(),
+ src.fp());
+ case kExprI32UConvertSatF32:
+ return liftoff::EmitSatTruncateFloatToInt<uint32_t, float>(this, dst.gp(),
+ src.fp());
+ case kExprI32SConvertSatF64:
+ return liftoff::EmitSatTruncateFloatToInt<int32_t, double>(this, dst.gp(),
+ src.fp());
+ case kExprI32UConvertSatF64:
+ return liftoff::EmitSatTruncateFloatToInt<uint32_t, double>(
+ this, dst.gp(), src.fp());
case kExprI32ReinterpretF32:
Movd(dst.gp(), src.fp());
return true;
@@ -2017,8 +2140,164 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
(assm->*sse_op)(dst.fp(), shift);
}
}
+
+enum class ShiftSignedness { kSigned, kUnsigned };
+
+template <bool is_signed>
+void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, LiftoffRegister rhs) {
+ // Same algorithm is used for both signed and unsigned shifts, the only
+ // difference is the actual shift and pack in the end. This is the same
+ // algorithm as used in code-generator-ia32.cc
+ Register tmp =
+ assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ XMMRegister tmp_simd =
+ assm->GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+
+ // Unpack the bytes into words, do logical shifts, and repack.
+ assm->Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
+ assm->Punpcklbw(dst.fp(), lhs.fp());
+ assm->mov(tmp, rhs.gp());
+ // Take shift value modulo 8.
+ assm->and_(tmp, 7);
+ assm->add(tmp, Immediate(8));
+ assm->Movd(tmp_simd, tmp);
+ if (is_signed) {
+ assm->Psraw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
+ tmp_simd);
+ assm->Psraw(dst.fp(), dst.fp(), tmp_simd);
+ assm->Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ assm->Psrlw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
+ tmp_simd);
+ assm->Psrlw(dst.fp(), dst.fp(), tmp_simd);
+ assm->Packuswb(dst.fp(), liftoff::kScratchDoubleReg);
+ }
+}
+
+inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ Register tmp =
+ assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst)).gp();
+ assm->xor_(tmp, tmp);
+ assm->mov(dst.gp(), Immediate(1));
+ assm->Ptest(src.fp(), src.fp());
+ assm->cmov(zero, dst.gp(), tmp);
+}
+
+template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
+inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ Register tmp =
+ assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst)).gp();
+ XMMRegister tmp_simd = liftoff::kScratchDoubleReg;
+ assm->mov(tmp, Immediate(1));
+ assm->xor_(dst.gp(), dst.gp());
+ assm->Pxor(tmp_simd, tmp_simd);
+ (assm->*pcmp)(tmp_simd, src.fp());
+ assm->Ptest(tmp_simd, tmp_simd);
+ assm->cmov(zero, dst.gp(), tmp);
+}
+
} // namespace liftoff
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
+ Operand src_op{src_addr, offset_reg, times_1,
+ static_cast<int32_t>(offset_imm)};
+ *protected_load_pc = pc_offset();
+
+ MachineType memtype = type.mem_type();
+ if (transform == LoadTransformationKind::kExtend) {
+ if (memtype == MachineType::Int8()) {
+ Pmovsxbw(dst.fp(), src_op);
+ } else if (memtype == MachineType::Uint8()) {
+ Pmovzxbw(dst.fp(), src_op);
+ } else if (memtype == MachineType::Int16()) {
+ Pmovsxwd(dst.fp(), src_op);
+ } else if (memtype == MachineType::Uint16()) {
+ Pmovzxwd(dst.fp(), src_op);
+ } else if (memtype == MachineType::Int32()) {
+ Pmovsxdq(dst.fp(), src_op);
+ } else if (memtype == MachineType::Uint32()) {
+ Pmovzxdq(dst.fp(), src_op);
+ }
+ } else {
+ DCHECK_EQ(LoadTransformationKind::kSplat, transform);
+ if (memtype == MachineType::Int8()) {
+ Pinsrb(dst.fp(), src_op, 0);
+ Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+ } else if (memtype == MachineType::Int16()) {
+ Pinsrw(dst.fp(), src_op, 0);
+ Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
+ Punpcklqdq(dst.fp(), dst.fp());
+ } else if (memtype == MachineType::Int32()) {
+ Vbroadcastss(dst.fp(), src_op);
+ } else if (memtype == MachineType::Int64()) {
+ Movddup(dst.fp(), src_op);
+ }
+ }
+}
+
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ // Prepare 16 byte aligned buffer for shuffle control mask.
+ mov(tmp.gp(), esp);
+ and_(esp, -16);
+ movups(liftoff::kScratchDoubleReg, lhs.fp());
+
+ for (int i = 3; i >= 0; i--) {
+ uint32_t mask = 0;
+ for (int j = 3; j >= 0; j--) {
+ uint8_t lane = shuffle[i * 4 + j];
+ mask <<= 8;
+ mask |= lane < kSimd128Size ? lane : 0x80;
+ }
+ push(Immediate(mask));
+ }
+ Pshufb(liftoff::kScratchDoubleReg, Operand(esp, 0));
+
+ for (int i = 3; i >= 0; i--) {
+ uint32_t mask = 0;
+ for (int j = 3; j >= 0; j--) {
+ uint8_t lane = shuffle[i * 4 + j];
+ mask <<= 8;
+ mask |= lane >= kSimd128Size ? (lane & 0x0F) : 0x80;
+ }
+ push(Immediate(mask));
+ }
+ if (dst.fp() != rhs.fp()) {
+ movups(dst.fp(), rhs.fp());
+ }
+ Pshufb(dst.fp(), Operand(esp, 0));
+ Por(dst.fp(), liftoff::kScratchDoubleReg);
+ mov(esp, tmp.gp());
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ XMMRegister mask =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(lhs, rhs)).fp();
+ // Out-of-range indices should return 0, add 112 (0x70) so that any value > 15
+ // saturates to 128 (top bit set), so pshufb will zero that lane.
+ TurboAssembler::Move(mask, uint32_t{0x70707070});
+ Pshufd(mask, mask, uint8_t{0x0});
+ Paddusb(mask, rhs.fp());
+ if (lhs != dst) {
+ Movaps(dst.fp(), lhs.fp());
+ }
+ Pshufb(dst.fp(), mask);
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
Movd(dst.fp(), src.gp());
@@ -2350,6 +2629,21 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src);
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovmskb(dst.gp(), src.fp());
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
@@ -2381,7 +2675,7 @@ void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
- LiftoffRegister tmp = GetUnusedRegister(tmp_rc);
+ LiftoffRegister tmp = GetUnusedRegister(tmp_rc, {});
byte shift = static_cast<byte>(rhs & 0x7);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -2399,6 +2693,43 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
Pand(dst.fp(), liftoff::kScratchDoubleReg);
}
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
+ Punpcklbw(dst.fp(), lhs.fp());
+ uint8_t shift = (rhs & 7) + 8;
+ Psraw(liftoff::kScratchDoubleReg, shift);
+ Psraw(dst.fp(), shift);
+ Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ // Perform 16-bit shift, then mask away high bits.
+ uint8_t shift = rhs & 7;
+ Psrlw(dst.fp(), lhs.fp(), byte{shift});
+
+ uint8_t bmask = 0xff >> shift;
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ mov(tmp, mask);
+ Movd(liftoff::kScratchDoubleReg, tmp);
+ Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 0);
+ Pand(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddb, &Assembler::paddb>(
@@ -2541,6 +2872,24 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src);
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ XMMRegister tmp = liftoff::kScratchDoubleReg;
+ Packsswb(tmp, src.fp());
+ Pmovmskb(dst.gp(), tmp);
+ shr(dst.gp(), 8);
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShiftOp<&Assembler::vpsllw, &Assembler::psllw, 4>(this, dst,
@@ -2553,6 +2902,32 @@ void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsraw, &Assembler::psraw, 4>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsraw, &Assembler::psraw, 4>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrlw, &Assembler::psrlw, 4>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlw, &Assembler::psrlw, 4>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddw, &Assembler::paddw>(
@@ -2639,6 +3014,21 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src);
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movmskps(dst.gp(), src.fp());
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShiftOp<&Assembler::vpslld, &Assembler::pslld, 5>(this, dst,
@@ -2651,6 +3041,32 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrad, &Assembler::psrad, 5>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrad, &Assembler::psrad, 5>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrld, &Assembler::psrld, 5>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrld, &Assembler::psrld, 5>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddd, &Assembler::paddd>(
@@ -2723,6 +3139,56 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ XMMRegister shift = liftoff::kScratchDoubleReg;
+ XMMRegister tmp =
+ GetUnusedRegister(RegClass::kFpReg, LiftoffRegList::ForRegs(dst, lhs))
+ .fp();
+
+ // Take shift value modulo 64.
+ and_(rhs.gp(), Immediate(63));
+ Movd(shift, rhs.gp());
+
+ // Set up a mask [0x80000000,0,0x80000000,0].
+ Pcmpeqb(tmp, tmp);
+ Psllq(tmp, tmp, 63);
+
+ Psrlq(tmp, tmp, shift);
+ Psrlq(dst.fp(), lhs.fp(), shift);
+ Pxor(dst.fp(), tmp);
+ Psubq(dst.fp(), tmp);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ XMMRegister tmp = liftoff::kScratchDoubleReg;
+ int32_t shift = rhs & 63;
+
+ // Set up a mask [0x80000000,0,0x80000000,0].
+ Pcmpeqb(tmp, tmp);
+ Psllq(tmp, tmp, 63);
+
+ Psrlq(tmp, tmp, shift);
+ Psrlq(dst.fp(), lhs.fp(), shift);
+ Pxor(dst.fp(), tmp);
+ Psubq(dst.fp(), tmp);
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrlq, &Assembler::psrlq, 6>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlq, &Assembler::psrlq, 6>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddq, &Assembler::paddq>(
@@ -2990,6 +3456,97 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
Andnpd(dst.fp(), liftoff::kScratchDoubleReg);
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // NAN->0
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcmpeqps(liftoff::kScratchDoubleReg, src.fp(), src.fp());
+ vpand(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ movaps(liftoff::kScratchDoubleReg, src.fp());
+ cmpeqps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
+ pand(dst.fp(), liftoff::kScratchDoubleReg);
+ }
+ // Set top bit if >= 0 (but not -0.0!).
+ Pxor(liftoff::kScratchDoubleReg, dst.fp());
+ // Convert to int.
+ Cvttps2dq(dst.fp(), dst.fp());
+ // Set top bit if >=0 is now < 0.
+ Pand(liftoff::kScratchDoubleReg, dst.fp());
+ Psrad(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{31});
+ // Set positive overflow lanes to 0x7FFFFFFF.
+ Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ DoubleRegister tmp =
+ GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, src)).fp();
+ // NAN->0, negative->0.
+ Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmaxps(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
+ maxps(dst.fp(), liftoff::kScratchDoubleReg);
+ }
+ // scratch: float representation of max_signed.
+ Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Psrld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
+ uint8_t{1}); // 0x7fffffff
+ Cvtdq2ps(liftoff::kScratchDoubleReg,
+ liftoff::kScratchDoubleReg); // 0x4f000000
+ // tmp: convert (src-max_signed).
+ // Set positive overflow lanes to 0x7FFFFFFF.
+ // Set negative lanes to 0.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubps(tmp, dst.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ movaps(tmp, dst.fp());
+ subps(tmp, liftoff::kScratchDoubleReg);
+ }
+ Cmpleps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, tmp);
+ Cvttps2dq(tmp, tmp);
+ Pxor(tmp, liftoff::kScratchDoubleReg);
+ Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pmaxsd(tmp, liftoff::kScratchDoubleReg);
+ // Convert to int. Overflow lanes above max_signed will be 0x80000000.
+ Cvttps2dq(dst.fp(), dst.fp());
+ // Add (src-max_signed) for overflow lanes.
+ Paddd(dst.fp(), dst.fp(), tmp);
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtdq2ps(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg); // Zeros.
+ Pblendw(liftoff::kScratchDoubleReg, src.fp(),
+ uint8_t{0x55}); // Get lo 16 bits.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsubd(dst.fp(), src.fp(), liftoff::kScratchDoubleReg); // Get hi 16 bits.
+ } else {
+ if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
+ psubd(dst.fp(), liftoff::kScratchDoubleReg);
+ }
+ Cvtdq2ps(liftoff::kScratchDoubleReg,
+ liftoff::kScratchDoubleReg); // Convert lo exactly.
+ Psrld(dst.fp(), dst.fp(), byte{1}); // Divide by 2 to get in unsigned range.
+ Cvtdq2ps(dst.fp(), dst.fp()); // Convert hi, exactly.
+ Addps(dst.fp(), dst.fp(), dst.fp()); // Double hi, exactly.
+ Addps(dst.fp(), dst.fp(),
+ liftoff::kScratchDoubleReg); // Add hi and lo, may round.
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3270,7 +3827,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
index 923d375064c..a8b40a7b462 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -82,35 +82,35 @@ class StackTransferRecipe {
DCHECK(load_dst_regs_.is_empty());
}
- void TransferStackSlot(const VarState& dst, const VarState& src) {
+ V8_INLINE void TransferStackSlot(const VarState& dst, const VarState& src) {
DCHECK_EQ(dst.type(), src.type());
- switch (dst.loc()) {
+ if (dst.is_reg()) {
+ LoadIntoRegister(dst.reg(), src, src.offset());
+ return;
+ }
+ if (dst.is_const()) {
+ DCHECK_EQ(dst.i32_const(), src.i32_const());
+ return;
+ }
+ DCHECK(dst.is_stack());
+ switch (src.loc()) {
case VarState::kStack:
- switch (src.loc()) {
- case VarState::kStack:
- if (src.offset() == dst.offset()) break;
- asm_->MoveStackValue(dst.offset(), src.offset(), src.type());
- break;
- case VarState::kRegister:
- asm_->Spill(dst.offset(), src.reg(), src.type());
- break;
- case VarState::kIntConst:
- asm_->Spill(dst.offset(), src.constant());
- break;
+ if (src.offset() != dst.offset()) {
+ asm_->MoveStackValue(dst.offset(), src.offset(), src.type());
}
break;
case VarState::kRegister:
- LoadIntoRegister(dst.reg(), src, src.offset());
+ asm_->Spill(dst.offset(), src.reg(), src.type());
break;
case VarState::kIntConst:
- DCHECK_EQ(dst, src);
+ asm_->Spill(dst.offset(), src.constant());
break;
}
}
- void LoadIntoRegister(LiftoffRegister dst,
- const LiftoffAssembler::VarState& src,
- uint32_t src_offset) {
+ V8_INLINE void LoadIntoRegister(LiftoffRegister dst,
+ const LiftoffAssembler::VarState& src,
+ uint32_t src_offset) {
switch (src.loc()) {
case VarState::kStack:
LoadStackSlot(dst, src_offset, src.type());
@@ -466,7 +466,7 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
// they do not move). Try to keep register in registers, but avoid duplicates.
InitMergeRegion(this, source_begin, target_begin, num_locals, kKeepStackSlots,
kConstantsNotAllowed, kNoReuseRegisters, used_regs);
- // Sanity check: All the {used_regs} are really in use now.
+ // Consistency check: All the {used_regs} are really in use now.
DCHECK_EQ(used_regs, used_registers & used_regs);
// Last, initialize the section in between. Here, constants are allowed, but
@@ -510,24 +510,15 @@ LiftoffAssembler::~LiftoffAssembler() {
LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot,
LiftoffRegList pinned) {
- switch (slot.loc()) {
- case VarState::kStack: {
- LiftoffRegister reg =
- GetUnusedRegister(reg_class_for(slot.type()), pinned);
- Fill(reg, slot.offset(), slot.type());
- return reg;
- }
- case VarState::kRegister:
- return slot.reg();
- case VarState::kIntConst: {
- RegClass rc =
- kNeedI64RegPair && slot.type() == kWasmI64 ? kGpRegPair : kGpReg;
- LiftoffRegister reg = GetUnusedRegister(rc, pinned);
- LoadConstant(reg, slot.constant());
- return reg;
- }
+ if (slot.is_reg()) return slot.reg();
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(slot.type()), pinned);
+ if (slot.is_const()) {
+ LoadConstant(reg, slot.constant());
+ } else {
+ DCHECK(slot.is_stack());
+ Fill(reg, slot.offset(), slot.type());
}
- UNREACHABLE();
+ return reg;
}
LiftoffRegister LiftoffAssembler::LoadI64HalfIntoRegister(VarState slot,
@@ -535,7 +526,7 @@ LiftoffRegister LiftoffAssembler::LoadI64HalfIntoRegister(VarState slot,
if (slot.is_reg()) {
return half == kLowWord ? slot.reg().low() : slot.reg().high();
}
- LiftoffRegister dst = GetUnusedRegister(kGpReg);
+ LiftoffRegister dst = GetUnusedRegister(kGpReg, {});
if (slot.is_stack()) {
FillI64Half(dst.gp(), slot.offset(), half);
return dst;
@@ -548,33 +539,39 @@ LiftoffRegister LiftoffAssembler::LoadI64HalfIntoRegister(VarState slot,
return dst;
}
-LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
- DCHECK(!cache_state_.stack_state.empty());
- VarState slot = cache_state_.stack_state.back();
- if (slot.is_reg()) cache_state_.dec_used(slot.reg());
- cache_state_.stack_state.pop_back();
- return LoadToRegister(slot, pinned);
-}
-
LiftoffRegister LiftoffAssembler::PeekToRegister(int index,
LiftoffRegList pinned) {
DCHECK_LT(index, cache_state_.stack_state.size());
VarState& slot = cache_state_.stack_state.end()[-1 - index];
- if (slot.is_reg()) cache_state_.dec_used(slot.reg());
- LiftoffRegister reg = LoadToRegister(slot, pinned);
- if (!slot.is_reg()) {
- slot.MakeRegister(reg);
+ if (slot.is_reg()) {
+ cache_state_.dec_used(slot.reg());
+ return slot.reg();
}
+ LiftoffRegister reg = LoadToRegister(slot, pinned);
+ slot.MakeRegister(reg);
return reg;
}
void LiftoffAssembler::PrepareLoopArgs(int num) {
for (int i = 0; i < num; ++i) {
VarState& slot = cache_state_.stack_state.end()[-1 - i];
- if (!slot.is_const()) continue;
- RegClass rc =
- kNeedI64RegPair && slot.type() == kWasmI64 ? kGpRegPair : kGpReg;
- LiftoffRegister reg = GetUnusedRegister(rc);
+ if (slot.is_stack()) continue;
+ RegClass rc = reg_class_for(slot.type());
+ if (slot.is_reg()) {
+ if (cache_state_.get_use_count(slot.reg()) > 1) {
+ // If the register is used more than once, we cannot use it for the
+ // merge. Move it to an unused register instead.
+ LiftoffRegList pinned;
+ pinned.set(slot.reg());
+ LiftoffRegister dst_reg = GetUnusedRegister(rc, pinned);
+ Move(dst_reg, slot.reg(), slot.type());
+ cache_state_.dec_used(slot.reg());
+ cache_state_.inc_used(dst_reg);
+ slot.MakeRegister(dst_reg);
+ }
+ continue;
+ }
+ LiftoffRegister reg = GetUnusedRegister(rc, {});
LoadConstant(reg, slot.constant());
slot.MakeRegister(reg);
cache_state_.inc_used(reg);
@@ -724,6 +721,8 @@ void LiftoffAssembler::PrepareBuiltinCall(
PrepareStackTransfers(sig, call_descriptor, params.begin(), &stack_slots,
&stack_transfers, &param_regs);
// Create all the slots.
+ // Builtin stack parameters are pushed in reversed order.
+ stack_slots.Reverse();
stack_slots.Construct();
// Execute the stack transfers before filling the instance register.
stack_transfers.Execute();
@@ -742,13 +741,14 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
constexpr size_t kInputShift = 1;
// Spill all cache slots which are not being used as parameters.
- // Don't update any register use counters, they will be reset later anyway.
- for (uint32_t idx = 0, end = cache_state_.stack_height() - num_params;
- idx < end; ++idx) {
- VarState& slot = cache_state_.stack_state[idx];
- if (!slot.is_reg()) continue;
- Spill(slot.offset(), slot.reg(), slot.type());
- slot.MakeStack();
+ for (VarState* it = cache_state_.stack_state.end() - 1 - num_params;
+ it >= cache_state_.stack_state.begin() &&
+ !cache_state_.used_registers.is_empty();
+ --it) {
+ if (!it->is_reg()) continue;
+ Spill(it->offset(), it->reg(), it->type());
+ cache_state_.dec_used(it->reg());
+ it->MakeStack();
}
LiftoffStackSlots stack_slots(this);
@@ -859,6 +859,10 @@ void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
// Use the {StackTransferRecipe} to move pairs, as the registers in the
// pairs might overlap.
StackTransferRecipe(this).MoveRegister(dst, src, type);
+ } else if (kNeedS128RegPair && dst.is_fp_pair()) {
+ // Calling low_fp is fine, Move will automatically check the type and
+ // convert this FP to its SIMD register, and use a SIMD move.
+ Move(dst.low_fp(), src.low_fp(), type);
} else if (dst.is_gp()) {
Move(dst.gp(), src.gp(), type);
} else {
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.h b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
index 3377990496f..aad75b18597 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
@@ -56,20 +56,6 @@ class LiftoffAssembler : public TurboAssembler {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
}
- bool operator==(const VarState& other) const {
- if (loc_ != other.loc_) return false;
- if (type_ != other.type_) return false;
- switch (loc_) {
- case kStack:
- return true;
- case kRegister:
- return reg_ == other.reg_;
- case kIntConst:
- return i32_const_ == other.i32_const_;
- }
- UNREACHABLE();
- }
-
bool is_stack() const { return loc_ == kStack; }
bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); }
bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); }
@@ -140,6 +126,8 @@ class LiftoffAssembler : public TurboAssembler {
CacheState() = default;
CacheState(CacheState&&) V8_NOEXCEPT = default;
CacheState& operator=(CacheState&&) V8_NOEXCEPT = default;
+ // Disallow copy construction.
+ CacheState(const CacheState&) = delete;
base::SmallVector<VarState, 8> stack_state;
LiftoffRegList used_registers;
@@ -277,14 +265,23 @@ class LiftoffAssembler : public TurboAssembler {
private:
// Make the copy assignment operator private (to be used from {Split()}).
CacheState& operator=(const CacheState&) V8_NOEXCEPT = default;
- // Disallow copy construction.
- CacheState(const CacheState&) = delete;
};
explicit LiftoffAssembler(std::unique_ptr<AssemblerBuffer>);
~LiftoffAssembler() override;
- LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
+ LiftoffRegister LoadToRegister(VarState slot, LiftoffRegList pinned);
+
+ LiftoffRegister PopToRegister(LiftoffRegList pinned = {}) {
+ DCHECK(!cache_state_.stack_state.empty());
+ VarState slot = cache_state_.stack_state.back();
+ cache_state_.stack_state.pop_back();
+ if (slot.is_reg()) {
+ cache_state_.dec_used(slot.reg());
+ return slot.reg();
+ }
+ return LoadToRegister(slot, pinned);
+ }
// Returns the register which holds the value of stack slot {index}. If the
// value is not stored in a register yet, a register is allocated for it. The
@@ -340,7 +337,7 @@ class LiftoffAssembler : public TurboAssembler {
// possible.
LiftoffRegister GetUnusedRegister(
RegClass rc, std::initializer_list<LiftoffRegister> try_first,
- LiftoffRegList pinned = {}) {
+ LiftoffRegList pinned) {
for (LiftoffRegister reg : try_first) {
DCHECK_EQ(reg.reg_class(), rc);
if (cache_state_.is_free(reg)) return reg;
@@ -349,7 +346,7 @@ class LiftoffAssembler : public TurboAssembler {
}
// Get an unused register for class {rc}, potentially spilling to free one.
- LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
+ LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned) {
if (kNeedI64RegPair && rc == kGpRegPair) {
LiftoffRegList candidates = kGpCacheRegList;
Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
@@ -733,6 +730,15 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_f64_set_cond(Condition condition, Register dst,
DoubleRegister lhs, DoubleRegister rhs);
+ inline void LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LoadTransformationKind transform,
+ uint32_t* protected_load_pc);
+ inline void emit_s8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]);
+ inline void emit_s8x16_swizzle(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src);
@@ -801,10 +807,21 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1,
LiftoffRegister src2, LiftoffRegister mask);
inline void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v8x16_anytrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v8x16_alltrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs);
+ inline void emit_i8x16_shr_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_shri_s(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
+ inline void emit_i8x16_shr_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_shri_u(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
inline void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i8x16_add_saturate_s(LiftoffRegister dst,
@@ -832,10 +849,21 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v16x8_anytrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v16x8_alltrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs);
+ inline void emit_i16x8_shr_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_shri_s(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
+ inline void emit_i16x8_shr_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_shri_u(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
inline void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i16x8_add_saturate_s(LiftoffRegister dst,
@@ -863,10 +891,21 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v32x4_anytrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v32x4_alltrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs);
+ inline void emit_i32x4_shr_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_shri_s(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
+ inline void emit_i32x4_shr_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_shri_u(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
inline void emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
@@ -886,6 +925,14 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs);
+ inline void emit_i64x2_shr_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_shri_s(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
+ inline void emit_i64x2_shr_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_shri_u(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
inline void emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
@@ -922,6 +969,14 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src);
inline void emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs);
@@ -1074,7 +1129,6 @@ class LiftoffAssembler : public TurboAssembler {
}
private:
- LiftoffRegister LoadToRegister(VarState slot, LiftoffRegList pinned);
LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half);
uint32_t num_locals_ = 0;
@@ -1090,8 +1144,8 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
- LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
- LiftoffRegList pinned);
+ V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
+ LiftoffRegList pinned);
// Spill one or two fp registers to get a pair of adjacent fp registers.
LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned);
};
@@ -1212,19 +1266,19 @@ class LiftoffStackSlots {
}
void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); }
+ void Reverse() { std::reverse(slots_.begin(), slots_.end()); }
+
inline void Construct();
private:
struct Slot {
- // Allow move construction.
- Slot(Slot&&) V8_NOEXCEPT = default;
Slot(const LiftoffAssembler::VarState& src, uint32_t src_offset,
RegPairHalf half)
: src_(src), src_offset_(src_offset), half_(half) {}
explicit Slot(const LiftoffAssembler::VarState& src)
: src_(src), half_(kLowWord) {}
- const LiftoffAssembler::VarState src_;
+ LiftoffAssembler::VarState src_;
uint32_t src_offset_ = 0;
RegPairHalf half_;
};
diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
index 4d0d9dbceca..d2beb398c15 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -8,6 +8,7 @@
#include "src/codegen/assembler-inl.h"
// TODO(clemensb): Remove dependences on compiler stuff.
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
@@ -26,7 +27,7 @@
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-opcodes-inl.h"
namespace v8 {
namespace internal {
@@ -280,13 +281,15 @@ class LiftoffCompiler {
// For debugging, we need to spill registers before a trap, to be able to
// inspect them.
- struct SpilledRegistersBeforeTrap {
+ struct SpilledRegistersBeforeTrap : public ZoneObject {
struct Entry {
int offset;
LiftoffRegister reg;
ValueType type;
};
- std::vector<Entry> entries;
+ ZoneVector<Entry> entries;
+
+ explicit SpilledRegistersBeforeTrap(Zone* zone) : entries(zone) {}
};
struct OutOfLineCode {
@@ -298,13 +301,13 @@ class LiftoffCompiler {
uint32_t pc; // for trap handler.
// These two pointers will only be used for debug code:
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder;
- std::unique_ptr<SpilledRegistersBeforeTrap> spilled_registers;
+ SpilledRegistersBeforeTrap* spilled_registers;
// Named constructors:
static OutOfLineCode Trap(
WasmCode::RuntimeStubId s, WasmCodePosition pos, uint32_t pc,
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder,
- std::unique_ptr<SpilledRegistersBeforeTrap> spilled_registers) {
+ SpilledRegistersBeforeTrap* spilled_registers) {
DCHECK_LT(0, pos);
return {{},
{},
@@ -313,13 +316,13 @@ class LiftoffCompiler {
{},
pc,
debug_sidetable_entry_builder,
- std::move(spilled_registers)};
+ spilled_registers};
}
static OutOfLineCode StackCheck(
WasmCodePosition pos, LiftoffRegList regs,
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
return {{}, {}, WasmCode::kWasmStackGuard, pos,
- regs, 0, debug_sidetable_entry_builder, {}};
+ regs, 0, debug_sidetable_entry_builder, nullptr};
}
};
@@ -335,6 +338,9 @@ class LiftoffCompiler {
env_(env),
debug_sidetable_builder_(debug_sidetable_builder),
for_debugging_(for_debugging),
+ out_of_line_code_(compilation_zone),
+ source_position_table_builder_(compilation_zone),
+ protected_instructions_(compilation_zone),
compilation_zone_(compilation_zone),
safepoint_table_builder_(compilation_zone_),
next_breakpoint_ptr_(breakpoints.begin()),
@@ -391,12 +397,10 @@ class LiftoffCompiler {
switch (type.kind()) {
case ValueType::kS128:
return kSimd;
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- return kAnyRef;
- case ValueType::kExnRef:
- return kExceptionHandling;
+ case ValueType::kOptRef:
+ case ValueType::kRef:
+ // TODO(7748): Refine this.
+ return kRefTypes;
case ValueType::kBottom:
return kMultiValue;
default:
@@ -418,7 +422,7 @@ class LiftoffCompiler {
}
LiftoffBailoutReason bailout_reason = BailoutReasonForType(type);
EmbeddedVector<char, 128> buffer;
- SNPrintF(buffer, "%s %s", type.type_name(), context);
+ SNPrintF(buffer, "%s %s", type.type_name().c_str(), context);
unsupported(decoder, bailout_reason, buffer.begin());
return false;
}
@@ -495,7 +499,7 @@ class LiftoffCompiler {
position, __ cache_state()->used_registers,
RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
OutOfLineCode& ool = out_of_line_code_.back();
- Register limit_address = __ GetUnusedRegister(kGpReg).gp();
+ Register limit_address = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
__ StackCheck(ool.label.get(), limit_address);
__ bind(ool.continuation.get());
@@ -519,6 +523,15 @@ class LiftoffCompiler {
return false;
}
+ void TraceFunctionEntry(FullDecoder* decoder) {
+ DEBUG_CODE_COMMENT("trace function entry");
+ __ SpillAllRegisters();
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), false);
+ __ CallRuntimeStub(WasmCode::kWasmTraceEnter);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
+ }
+
void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder, kSupportedTypes, __ local_type(i),
@@ -593,6 +606,8 @@ class LiftoffCompiler {
// is never a position of any instruction in the function.
StackCheck(0);
+ if (FLAG_trace_wasm) TraceFunctionEntry(decoder);
+
// If we are generating debug code, do check the "hook on function call"
// flag. If set, trigger a break.
if (V8_UNLIKELY(for_debugging_)) {
@@ -604,7 +619,7 @@ class LiftoffCompiler {
*next_breakpoint_ptr_ == decoder->position());
if (!has_breakpoint) {
DEBUG_CODE_COMMENT("check hook on function call");
- Register flag = __ GetUnusedRegister(kGpReg).gp();
+ Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress,
kSystemPointerSize);
Label no_break;
@@ -693,9 +708,10 @@ class LiftoffCompiler {
asm_.AbortCompilation();
}
- void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
+ V8_NOINLINE void EmitDebuggingInfo(FullDecoder* decoder, WasmOpcode opcode) {
+ DCHECK(V8_UNLIKELY(for_debugging_));
bool breakpoint = false;
- if (V8_UNLIKELY(next_breakpoint_ptr_)) {
+ if (next_breakpoint_ptr_) {
if (*next_breakpoint_ptr_ == 0) {
// A single breakpoint at offset 0 indicates stepping.
DCHECK_EQ(next_breakpoint_ptr_ + 1, next_breakpoint_end_);
@@ -720,6 +736,12 @@ class LiftoffCompiler {
}
// Potentially generate the source position to OSR to this instruction.
MaybeGenerateExtraSourcePos(decoder, !breakpoint);
+ }
+
+ void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
+ // Add a single check, so that the fast path can be inlined while
+ // {EmitDebuggingInfo} stays outlined.
+ if (V8_UNLIKELY(for_debugging_)) EmitDebuggingInfo(decoder, opcode);
TraceCacheState(decoder);
#ifdef DEBUG
SLOW_DCHECK(__ ValidateCacheState());
@@ -923,10 +945,10 @@ class LiftoffCompiler {
constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {src})
- : __ GetUnusedRegister(result_rc);
+ ? __ GetUnusedRegister(result_rc, {src}, {})
+ : __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src);
- __ PushRegister(ValueType(result_type), dst);
+ __ PushRegister(ValueType::Primitive(result_type), dst);
}
template <ValueType::Kind type>
@@ -936,9 +958,9 @@ class LiftoffCompiler {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
ExternalReference ext_ref = fallback_fn();
- ValueType sig_reps[] = {ValueType(type)};
+ ValueType sig_reps[] = {ValueType::Primitive(type)};
FunctionSig sig(0, 1, sig_reps);
- GenerateCCall(&dst, &sig, ValueType(type), &src, ext_ref);
+ GenerateCCall(&dst, &sig, ValueType::Primitive(type), &src, ext_ref);
};
EmitUnOp<type, type>(emit_with_c_fallback);
}
@@ -951,8 +973,9 @@ class LiftoffCompiler {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass dst_rc = reg_class_for(dst_type);
LiftoffRegister src = __ PopToRegister();
- LiftoffRegister dst = src_rc == dst_rc ? __ GetUnusedRegister(dst_rc, {src})
- : __ GetUnusedRegister(dst_rc);
+ LiftoffRegister dst = src_rc == dst_rc
+ ? __ GetUnusedRegister(dst_rc, {src}, {})
+ : __ GetUnusedRegister(dst_rc, {});
DCHECK_EQ(!!can_trap, trap_position > 0);
Label* trap = can_trap ? AddOutOfLineTrap(
trap_position,
@@ -963,20 +986,22 @@ class LiftoffCompiler {
ExternalReference ext_ref = fallback_fn();
if (can_trap) {
// External references for potentially trapping conversions return int.
- ValueType sig_reps[] = {kWasmI32, ValueType(src_type)};
+ ValueType sig_reps[] = {kWasmI32, ValueType::Primitive(src_type)};
FunctionSig sig(1, 1, sig_reps);
LiftoffRegister ret_reg =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
LiftoffRegister dst_regs[] = {ret_reg, dst};
- GenerateCCall(dst_regs, &sig, ValueType(dst_type), &src, ext_ref);
+ GenerateCCall(dst_regs, &sig, ValueType::Primitive(dst_type), &src,
+ ext_ref);
__ emit_cond_jump(kEqual, trap, kWasmI32, ret_reg.gp());
} else {
- ValueType sig_reps[] = {ValueType(src_type)};
+ ValueType sig_reps[] = {ValueType::Primitive(src_type)};
FunctionSig sig(0, 1, sig_reps);
- GenerateCCall(&dst, &sig, ValueType(dst_type), &src, ext_ref);
+ GenerateCCall(&dst, &sig, ValueType::Primitive(dst_type), &src,
+ ext_ref);
}
}
- __ PushRegister(ValueType(dst_type), dst);
+ __ PushRegister(ValueType::Primitive(dst_type), dst);
}
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
@@ -1088,14 +1113,22 @@ class LiftoffCompiler {
__ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
nullptr);
});
- case kExprI32SConvertSatF32:
- case kExprI32UConvertSatF32:
- case kExprI32SConvertSatF64:
- case kExprI32UConvertSatF64:
- case kExprI64SConvertSatF32:
- case kExprI64UConvertSatF32:
- case kExprI64SConvertSatF64:
- case kExprI64UConvertSatF64:
+ CASE_TYPE_CONVERSION(I32SConvertSatF32, I32, F32, nullptr, kNoTrap)
+ CASE_TYPE_CONVERSION(I32UConvertSatF32, I32, F32, nullptr, kNoTrap)
+ CASE_TYPE_CONVERSION(I32SConvertSatF64, I32, F64, nullptr, kNoTrap)
+ CASE_TYPE_CONVERSION(I32UConvertSatF64, I32, F64, nullptr, kNoTrap)
+ CASE_TYPE_CONVERSION(I64SConvertSatF32, I64, F32,
+ &ExternalReference::wasm_float32_to_int64_sat,
+ kNoTrap)
+ CASE_TYPE_CONVERSION(I64UConvertSatF32, I64, F32,
+ &ExternalReference::wasm_float32_to_uint64_sat,
+ kNoTrap)
+ CASE_TYPE_CONVERSION(I64SConvertSatF64, I64, F64,
+ &ExternalReference::wasm_float64_to_int64_sat,
+ kNoTrap)
+ CASE_TYPE_CONVERSION(I64UConvertSatF64, I64, F64,
+ &ExternalReference::wasm_float64_to_uint64_sat,
+ kNoTrap)
return unsupported(decoder, kNonTrappingFloatToInt,
WasmOpcodes::OpcodeName(opcode));
default:
@@ -1122,11 +1155,11 @@ class LiftoffCompiler {
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {lhs})
- : __ GetUnusedRegister(result_rc);
+ ? __ GetUnusedRegister(result_rc, {lhs}, {})
+ : __ GetUnusedRegister(result_rc, {});
CallEmitFn(fnImm, dst, lhs, imm);
- __ PushRegister(ValueType(result_type), dst);
+ __ PushRegister(ValueType::Primitive(result_type), dst);
} else {
// The RHS was not an immediate.
EmitBinOp<src_type, result_type>(fn);
@@ -1141,13 +1174,13 @@ class LiftoffCompiler {
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {lhs, rhs})
- : __ GetUnusedRegister(result_rc);
+ ? __ GetUnusedRegister(result_rc, {lhs, rhs}, {})
+ : __ GetUnusedRegister(result_rc, {});
if (swap_lhs_rhs) std::swap(lhs, rhs);
CallEmitFn(fn, dst, lhs, rhs);
- __ PushRegister(ValueType(result_type), dst);
+ __ PushRegister(ValueType::Primitive(result_type), dst);
}
void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1483,34 +1516,34 @@ class LiftoffCompiler {
if (value_i32 == value) {
__ PushConstant(kWasmI64, value_i32);
} else {
- LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64));
+ LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64), {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmI64, reg);
}
}
void F32Const(FullDecoder* decoder, Value* result, float value) {
- LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
+ LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg);
}
void F64Const(FullDecoder* decoder, Value* result, double value) {
- LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
+ LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF64, reg);
}
void RefNull(FullDecoder* decoder, Value* result) {
- unsupported(decoder, kAnyRef, "ref_null");
+ unsupported(decoder, kRefTypes, "ref_null");
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
- unsupported(decoder, kAnyRef, "func");
+ unsupported(decoder, kRefTypes, "func");
}
void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
- unsupported(decoder, kAnyRef, "ref.as_non_null");
+ unsupported(decoder, kRefTypes, "ref.as_non_null");
}
void Drop(FullDecoder* decoder, const Value& value) {
@@ -1520,7 +1553,44 @@ class LiftoffCompiler {
__ cache_state()->stack_state.pop_back();
}
+ void TraceFunctionExit(FullDecoder* decoder) {
+ DEBUG_CODE_COMMENT("trace function exit");
+ // Before making the runtime call, spill all cache registers.
+ __ SpillAllRegisters();
+ LiftoffRegList pinned;
+ // Get a register to hold the stack slot for the return value.
+ LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ AllocateStackSlot(info.gp(), sizeof(int64_t));
+
+ // Store the return value if there is exactly one. Multiple return values
+ // are not handled yet.
+ size_t num_returns = decoder->sig_->return_count();
+ if (num_returns == 1) {
+ ValueType return_type = decoder->sig_->GetReturn(0);
+ LiftoffRegister return_reg =
+ __ LoadToRegister(__ cache_state()->stack_state.back(), pinned);
+ __ Store(info.gp(), no_reg, 0, return_reg,
+ StoreType::ForValueType(return_type), pinned);
+ }
+ // Put the parameter in its place.
+ WasmTraceExitDescriptor descriptor;
+ DCHECK_EQ(0, descriptor.GetStackParameterCount());
+ DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
+ Register param_reg = descriptor.GetRegisterParameter(0);
+ if (info.gp() != param_reg) {
+ __ Move(param_reg, info.gp(), LiftoffAssembler::kWasmIntPtr);
+ }
+
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), false);
+ __ CallRuntimeStub(WasmCode::kWasmTraceExit);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
+
+ __ DeallocateStackSlot(sizeof(int64_t));
+ }
+
void ReturnImpl(FullDecoder* decoder) {
+ if (FLAG_trace_wasm) TraceFunctionExit(decoder);
size_t num_returns = decoder->sig_->return_count();
if (num_returns > 0) __ MoveToReturnLocations(decoder->sig_, descriptor_);
DEBUG_CODE_COMMENT("leave frame");
@@ -1546,7 +1616,7 @@ class LiftoffCompiler {
break;
case kStack: {
auto rc = reg_class_for(imm.type);
- LiftoffRegister reg = __ GetUnusedRegister(rc);
+ LiftoffRegister reg = __ GetUnusedRegister(rc, {});
__ Fill(reg, slot.offset(), imm.type);
__ PushRegister(slot.type(), reg);
break;
@@ -1570,7 +1640,7 @@ class LiftoffCompiler {
}
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
- LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
+ LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
__ Fill(dst_reg, src_slot.offset(), type);
*dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg);
@@ -1607,9 +1677,19 @@ class LiftoffCompiler {
LocalSet(imm.index, true);
}
+ void AllocateLocals(FullDecoder* decoder, Vector<Value> local_values) {
+ // TODO(7748): Introduce typed functions bailout reason
+ unsupported(decoder, kGC, "let");
+ }
+
+ void DeallocateLocals(FullDecoder* decoder, uint32_t count) {
+ // TODO(7748): Introduce typed functions bailout reason
+ unsupported(decoder, kGC, "let");
+ }
+
Register GetGlobalBaseAndOffset(const WasmGlobal* global,
LiftoffRegList* pinned, uint32_t* offset) {
- Register addr = pinned->set(__ GetUnusedRegister(kGpReg)).gp();
+ Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp();
if (global->mutability && global->imported) {
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
__ Load(LiftoffRegister(addr), addr, no_reg,
@@ -1652,12 +1732,12 @@ class LiftoffCompiler {
void TableGet(FullDecoder* decoder, const Value& index, Value* result,
const TableIndexImmediate<validate>& imm) {
- unsupported(decoder, kAnyRef, "table_get");
+ unsupported(decoder, kRefTypes, "table_get");
}
void TableSet(FullDecoder* decoder, const Value& index, const Value& value,
const TableIndexImmediate<validate>& imm) {
- unsupported(decoder, kAnyRef, "table_set");
+ unsupported(decoder, kRefTypes, "table_set");
}
void Unreachable(FullDecoder* decoder) {
@@ -1675,8 +1755,8 @@ class LiftoffCompiler {
DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type());
LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister true_value = __ PopToRegister(pinned);
- LiftoffRegister dst =
- __ GetUnusedRegister(true_value.reg_class(), {true_value, false_value});
+ LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(),
+ {true_value, false_value}, {});
__ PushRegister(type, dst);
// Now emit the actual code to move either {true_value} or {false_value}
@@ -1819,11 +1899,12 @@ class LiftoffCompiler {
__ cache_state()->Steal(c->else_state->state);
}
- std::unique_ptr<SpilledRegistersBeforeTrap> GetSpilledRegistersBeforeTrap() {
- if (V8_LIKELY(!for_debugging_)) return nullptr;
+ SpilledRegistersBeforeTrap* GetSpilledRegistersBeforeTrap() {
+ DCHECK(for_debugging_);
// If we are generating debugging code, we really need to spill all
// registers to make them inspectable when stopping at the trap.
- auto spilled = std::make_unique<SpilledRegistersBeforeTrap>();
+ auto* spilled =
+ new (compilation_zone_) SpilledRegistersBeforeTrap(compilation_zone_);
for (uint32_t i = 0, e = __ cache_state()->stack_height(); i < e; ++i) {
auto& slot = __ cache_state()->stack_state[i];
if (!slot.is_reg()) continue;
@@ -1840,7 +1921,8 @@ class LiftoffCompiler {
out_of_line_code_.push_back(OutOfLineCode::Trap(
stub, position, pc,
RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling),
- GetSpilledRegistersBeforeTrap()));
+ V8_UNLIKELY(for_debugging_) ? GetSpilledRegistersBeforeTrap()
+ : nullptr));
return out_of_line_code_.back().label.get();
}
@@ -1852,7 +1934,7 @@ class LiftoffCompiler {
uint32_t offset, Register index, LiftoffRegList pinned,
ForceCheck force_check) {
const bool statically_oob =
- !base::IsInBounds(offset, access_size, env_->max_memory_size);
+ !base::IsInBounds<uint64_t>(offset, access_size, env_->max_memory_size);
if (!force_check && !statically_oob &&
(!FLAG_wasm_bounds_checks || env_->use_trap_handler)) {
@@ -1868,10 +1950,7 @@ class LiftoffCompiler {
if (statically_oob) {
__ emit_jump(trap_label);
- Control* current_block = decoder->control_at(0);
- if (current_block->reachable()) {
- current_block->reachability = kSpecOnlyReachable;
- }
+ decoder->SetSucceedingCodeDynamicallyUnreachable();
return true;
}
@@ -2033,11 +2112,54 @@ class LiftoffCompiler {
offset, decoder->position());
}
}
+
void LoadTransform(FullDecoder* decoder, LoadType type,
LoadTransformationKind transform,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
- unsupported(decoder, kSimd, "simd");
+ // LoadTransform requires SIMD support, so check for it here. If
+ // unsupported, bailout and let TurboFan lower the code.
+ if (!CheckSupportedType(decoder, kSupportedTypes, kWasmS128,
+ "LoadTransform")) {
+ return;
+ }
+
+ LiftoffRegList pinned;
+ Register index = pinned.set(__ PopToRegister()).gp();
+ // For load splats, LoadType is the size of the load, and for load
+ // extends, LoadType is the size of the lane, and it always loads 8 bytes.
+ uint32_t access_size =
+ transform == LoadTransformationKind::kExtend ? 8 : type.size();
+ if (BoundsCheckMem(decoder, access_size, imm.offset, index, pinned,
+ kDontForceCheck)) {
+ return;
+ }
+
+ uint32_t offset = imm.offset;
+ index = AddMemoryMasking(index, &offset, &pinned);
+ DEBUG_CODE_COMMENT("LoadTransform from memory");
+ Register addr = __ GetUnusedRegister(kGpReg, pinned).gp();
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {});
+ uint32_t protected_load_pc = 0;
+ __ LoadTransform(value, addr, index, offset, type, transform,
+ &protected_load_pc);
+
+ if (env_->use_trap_handler) {
+ AddOutOfLineTrap(decoder->position(),
+ WasmCode::kThrowWasmTrapMemOutOfBounds,
+ protected_load_pc);
+ }
+ __ PushRegister(ValueType::Primitive(kS128), value);
+
+ if (FLAG_trace_wasm_memory) {
+ // Again load extend is different.
+ MachineRepresentation mem_rep =
+ transform == LoadTransformationKind::kExtend
+ ? MachineRepresentation::kWord64
+ : type.mem_type().representation();
+ TraceMemoryOperation(false, mem_rep, index, offset, decoder->position());
+ }
}
void StoreMem(FullDecoder* decoder, StoreType type,
@@ -2075,7 +2197,7 @@ class LiftoffCompiler {
}
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
- Register mem_size = __ GetUnusedRegister(kGpReg).gp();
+ Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
__ emit_ptrsize_shri(mem_size, mem_size, kWasmPageSizeLog2);
__ PushRegister(kWasmI32, LiftoffRegister(mem_size));
@@ -2184,7 +2306,7 @@ class LiftoffCompiler {
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.table_index != 0) {
- return unsupported(decoder, kAnyRef, "table index != 0");
+ return unsupported(decoder, kRefTypes, "table index != 0");
}
for (ValueType ret : imm.sig->returns()) {
if (!CheckSupportedType(decoder, kSupportedTypes, ret, "return")) {
@@ -2326,7 +2448,7 @@ class LiftoffCompiler {
}
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
- unsupported(decoder, kAnyRef, "br_on_null");
+ unsupported(decoder, kRefTypes, "br_on_null");
}
template <ValueType::Kind src_type, ValueType::Kind result_type,
@@ -2344,9 +2466,9 @@ class LiftoffCompiler {
src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src3},
LiftoffRegList::ForRegs(src1, src2))
- : __ GetUnusedRegister(result_rc);
+ : __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src1, src2, src3);
- __ PushRegister(ValueType(result_type), dst);
+ __ PushRegister(ValueType::Primitive(result_type), dst);
}
template <typename EmitFn, typename EmitFnImm>
@@ -2360,14 +2482,14 @@ class LiftoffCompiler {
int32_t imm = rhs_slot.i32_const();
LiftoffRegister operand = __ PopToRegister();
- LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand});
+ LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fnImm, dst, operand, imm);
__ PushRegister(kWasmS128, dst);
} else {
LiftoffRegister count = __ PopToRegister();
LiftoffRegister operand = __ PopToRegister();
- LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand});
+ LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fn, dst, operand, count);
__ PushRegister(kWasmS128, dst);
@@ -2380,6 +2502,8 @@ class LiftoffCompiler {
return unsupported(decoder, kSimd, "simd");
}
switch (opcode) {
+ case wasm::kExprS8x16Swizzle:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s8x16_swizzle);
case wasm::kExprI8x16Splat:
return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i8x16_splat);
case wasm::kExprI16x8Splat:
@@ -2500,9 +2624,21 @@ class LiftoffCompiler {
return EmitTerOp<kS128, kS128>(&LiftoffAssembler::emit_s128_select);
case wasm::kExprI8x16Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_neg);
+ case wasm::kExprV8x16AnyTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v8x16_anytrue);
+ case wasm::kExprV8x16AllTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v8x16_alltrue);
+ case wasm::kExprI8x16BitMask:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i8x16_bitmask);
case wasm::kExprI8x16Shl:
return EmitSimdShiftOp(&LiftoffAssembler::emit_i8x16_shl,
&LiftoffAssembler::emit_i8x16_shli);
+ case wasm::kExprI8x16ShrS:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i8x16_shr_s,
+ &LiftoffAssembler::emit_i8x16_shri_s);
+ case wasm::kExprI8x16ShrU:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i8x16_shr_u,
+ &LiftoffAssembler::emit_i8x16_shri_u);
case wasm::kExprI8x16Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add);
case wasm::kExprI8x16AddSaturateS:
@@ -2531,9 +2667,21 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_max_u);
case wasm::kExprI16x8Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_neg);
+ case wasm::kExprV16x8AnyTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v16x8_anytrue);
+ case wasm::kExprV16x8AllTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v16x8_alltrue);
+ case wasm::kExprI16x8BitMask:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i16x8_bitmask);
case wasm::kExprI16x8Shl:
return EmitSimdShiftOp(&LiftoffAssembler::emit_i16x8_shl,
&LiftoffAssembler::emit_i16x8_shli);
+ case wasm::kExprI16x8ShrS:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i16x8_shr_s,
+ &LiftoffAssembler::emit_i16x8_shri_s);
+ case wasm::kExprI16x8ShrU:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i16x8_shr_u,
+ &LiftoffAssembler::emit_i16x8_shri_u);
case wasm::kExprI16x8Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add);
case wasm::kExprI16x8AddSaturateS:
@@ -2562,9 +2710,21 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_u);
case wasm::kExprI32x4Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_neg);
+ case wasm::kExprV32x4AnyTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v32x4_anytrue);
+ case wasm::kExprV32x4AllTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v32x4_alltrue);
+ case wasm::kExprI32x4BitMask:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i32x4_bitmask);
case wasm::kExprI32x4Shl:
return EmitSimdShiftOp(&LiftoffAssembler::emit_i32x4_shl,
&LiftoffAssembler::emit_i32x4_shli);
+ case wasm::kExprI32x4ShrS:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i32x4_shr_s,
+ &LiftoffAssembler::emit_i32x4_shri_s);
+ case wasm::kExprI32x4ShrU:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i32x4_shr_u,
+ &LiftoffAssembler::emit_i32x4_shri_u);
case wasm::kExprI32x4Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_add);
case wasm::kExprI32x4Sub:
@@ -2584,6 +2744,12 @@ class LiftoffCompiler {
case wasm::kExprI64x2Shl:
return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shl,
&LiftoffAssembler::emit_i64x2_shli);
+ case wasm::kExprI64x2ShrS:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shr_s,
+ &LiftoffAssembler::emit_i64x2_shri_s);
+ case wasm::kExprI64x2ShrU:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shr_u,
+ &LiftoffAssembler::emit_i64x2_shri_u);
case wasm::kExprI64x2Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_add);
case wasm::kExprI64x2Sub:
@@ -2626,6 +2792,18 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_min);
case wasm::kExprF64x2Max:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_max);
+ case wasm::kExprI32x4SConvertF32x4:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_sconvert_f32x4);
+ case wasm::kExprI32x4UConvertF32x4:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_uconvert_f32x4);
+ case wasm::kExprF32x4SConvertI32x4:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_f32x4_sconvert_i32x4);
+ case wasm::kExprF32x4UConvertI32x4:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_f32x4_uconvert_i32x4);
case wasm::kExprI8x16SConvertI16x8:
return EmitBinOp<kS128, kS128>(
&LiftoffAssembler::emit_i8x16_sconvert_i16x8);
@@ -2689,10 +2867,10 @@ class LiftoffCompiler {
static constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {lhs})
- : __ GetUnusedRegister(result_rc);
+ ? __ GetUnusedRegister(result_rc, {lhs}, {})
+ : __ GetUnusedRegister(result_rc, {});
fn(dst, lhs, imm.lane);
- __ PushRegister(ValueType(result_type), dst);
+ __ PushRegister(ValueType::Primitive(result_type), dst);
}
template <ValueType::Kind src2_type, typename EmitFn>
@@ -2716,7 +2894,7 @@ class LiftoffCompiler {
(src2_rc == result_rc || pin_src2)
? __ GetUnusedRegister(result_rc, {src1},
LiftoffRegList::ForRegs(src2))
- : __ GetUnusedRegister(result_rc, {src1});
+ : __ GetUnusedRegister(result_rc, {src1}, {});
fn(dst, src1, src2, imm.lane);
__ PushRegister(kWasmS128, dst);
}
@@ -2770,8 +2948,15 @@ class LiftoffCompiler {
const Simd8x16ShuffleImmediate<validate>& imm,
const Value& input0, const Value& input1,
Value* result) {
- unsupported(decoder, kSimd, "simd");
+ static constexpr RegClass result_rc = reg_class_for(ValueType::kS128);
+ LiftoffRegister rhs = __ PopToRegister();
+ LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
+ LiftoffRegister dst = __ GetUnusedRegister(result_rc, {lhs, rhs}, {});
+
+ __ LiftoffAssembler::emit_s8x16_shuffle(dst, lhs, rhs, imm.shuffle);
+ __ PushRegister(kWasmS128, dst);
}
+
void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
const Vector<Value>& args) {
unsupported(decoder, kExceptionHandling, "throw");
@@ -3369,17 +3554,17 @@ class LiftoffCompiler {
void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
const Value& value, const Value& delta, Value* result) {
- unsupported(decoder, kAnyRef, "table.grow");
+ unsupported(decoder, kRefTypes, "table.grow");
}
void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
Value* result) {
- unsupported(decoder, kAnyRef, "table.size");
+ unsupported(decoder, kRefTypes, "table.size");
}
void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
const Value& start, const Value& value, const Value& count) {
- unsupported(decoder, kAnyRef, "table.fill");
+ unsupported(decoder, kRefTypes, "table.fill");
}
void StructNew(FullDecoder* decoder,
@@ -3389,7 +3574,8 @@ class LiftoffCompiler {
unsupported(decoder, kGC, "struct.new");
}
void StructGet(FullDecoder* decoder, const Value& struct_obj,
- const FieldIndexImmediate<validate>& field, Value* result) {
+ const FieldIndexImmediate<validate>& field, bool is_signed,
+ Value* result) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "struct.get");
}
@@ -3408,7 +3594,7 @@ class LiftoffCompiler {
}
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
const ArrayIndexImmediate<validate>& imm, const Value& index,
- Value* result) {
+ bool is_signed, Value* result) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "array.get");
}
@@ -3423,6 +3609,12 @@ class LiftoffCompiler {
unsupported(decoder, kGC, "array.len");
}
+ void RttCanon(FullDecoder* decoder, const TypeIndexImmediate<validate>& imm,
+ Value* result) {
+ // TODO(7748): Implement.
+ unsupported(decoder, kGC, "rtt.canon");
+ }
+
void PassThrough(FullDecoder* decoder, const Value& from, Value* to) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "");
@@ -3484,9 +3676,9 @@ class LiftoffCompiler {
DebugSideTableBuilder* const debug_sidetable_builder_;
const ForDebugging for_debugging_;
LiftoffBailoutReason bailout_reason_ = kSuccess;
- std::vector<OutOfLineCode> out_of_line_code_;
+ ZoneVector<OutOfLineCode> out_of_line_code_;
SourcePositionTableBuilder source_position_table_builder_;
- std::vector<trap_handler::ProtectedInstructionData> protected_instructions_;
+ ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
// Zone used to store information during compilation. The result will be
// stored independently, such that this zone can die together with the
// LiftoffCompiler after compilation.
@@ -3536,9 +3728,9 @@ WasmCompilationResult ExecuteLiftoffCompilation(
std::unique_ptr<DebugSideTable>* debug_sidetable,
Vector<int> extra_source_pos) {
int func_body_size = static_cast<int>(func_body.end - func_body.start);
- TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "ExecuteLiftoffCompilation", "func_index", func_index,
- "body_size", func_body_size);
+ TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.CompileBaseline", "func_index", func_index, "body_size",
+ func_body_size);
Zone zone(allocator, "LiftoffCompilationZone");
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.h b/chromium/v8/src/wasm/baseline/liftoff-compiler.h
index 434172c4cf1..bb2ddaf050c 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.h
@@ -38,7 +38,7 @@ enum LiftoffBailoutReason : int8_t {
kComplexOperation = 4,
// Unimplemented proposals:
kSimd = 5,
- kAnyRef = 6,
+ kRefTypes = 6,
kExceptionHandling = 7,
kMultiValue = 8,
kTailCall = 9,
diff --git a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index f24c95008c9..0560a66dfe7 100644
--- a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -603,7 +603,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
@@ -646,13 +646,13 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
case ValueType::kI32: {
- LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
case ValueType::kI64: {
- LiftoffRegister tmp = GetUnusedRegister(kGpRegPair);
+ LiftoffRegister tmp = GetUnusedRegister(kGpRegPair, {});
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
@@ -1269,6 +1269,30 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
bailout(kUnsupportedArchitecture, "kExprI32UConvertF64");
return true;
}
+ case kExprI32SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ return true;
+ case kExprI32UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ return true;
+ case kExprI32SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ return true;
+ case kExprI32UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ return true;
+ case kExprI64SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ return true;
+ case kExprI64UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ return true;
+ case kExprI64SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ return true;
+ case kExprI64UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ return true;
case kExprI32ReinterpretF32:
mfc1(dst.gp(), src.fp());
return true;
@@ -1542,6 +1566,27 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bind(&cont);
}
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ bailout(kSimd, "load extend and load splat unimplemented");
+}
+
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ bailout(kSimd, "emit_s8x16_shuffle");
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s8x16_swizzle");
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i8x16_splat");
@@ -1739,6 +1784,21 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i8x16_neg");
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v8x16_anytrue");
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v8x16_alltrue");
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_bitmask");
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i8x16_shl");
@@ -1749,6 +1809,28 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i8x16_shli");
}
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_u");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_u");
+}
+
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i8x16_add");
@@ -1817,6 +1899,21 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i16x8_neg");
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v16x8_anytrue");
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v16x8_alltrue");
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_bitmask");
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i16x8_shl");
@@ -1827,6 +1924,28 @@ void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i16x8_shli");
}
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_u");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_u");
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i16x8_add");
@@ -1895,6 +2014,21 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i32x4_neg");
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v32x4_anytrue");
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v32x4_alltrue");
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_bitmask");
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i32x4_shl");
@@ -1905,6 +2039,28 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i32x4_shli");
}
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_u");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_u");
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i32x4_add");
@@ -1959,6 +2115,28 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i64x2_shli");
}
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_u");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_u");
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i64x2_add");
@@ -2064,6 +2242,26 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_f64x2_max");
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_uconvert_i32x4");
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2251,7 +2449,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}
diff --git a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 292f8032b8f..70946d3f6b5 100644
--- a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -532,7 +532,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
@@ -582,13 +582,13 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
case ValueType::kI32: {
- LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
case ValueType::kI64: {
- LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), value.to_i64());
sd(tmp.gp(), dst);
break;
@@ -1177,6 +1177,30 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprF64ReinterpretI64:
dmtc1(src.gp(), dst.fp());
return true;
+ case kExprI32SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ return true;
+ case kExprI32UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ return true;
+ case kExprI32SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ return true;
+ case kExprI32UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ return true;
+ case kExprI64SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ return true;
+ case kExprI64UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ return true;
+ case kExprI64SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ return true;
+ case kExprI64UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ return true;
default:
return false;
}
@@ -1297,6 +1321,26 @@ inline FPUCondition ConditionToConditionCmpFPU(Condition condition,
UNREACHABLE();
}
+inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ Label all_false;
+ assm->BranchMSA(&all_false, MSA_BRANCH_V, all_zero, src.fp().toW(),
+ USE_DELAY_SLOT);
+ assm->li(dst.gp(), 0l);
+ assm->li(dst.gp(), 1);
+ assm->bind(&all_false);
+}
+
+inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src, MSABranchDF msa_branch_df) {
+ Label all_true;
+ assm->BranchMSA(&all_true, msa_branch_df, all_not_zero, src.fp().toW(),
+ USE_DELAY_SLOT);
+ assm->li(dst.gp(), 1);
+ assm->li(dst.gp(), 0l);
+ assm->bind(&all_true);
+}
+
} // namespace liftoff
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
@@ -1357,6 +1401,112 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bind(&cont);
}
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Daddu(scratch, src_addr, offset_reg);
+ MemOperand src_op = MemOperand(scratch, offset_imm);
+ MSARegister dst_msa = dst.fp().toW();
+ *protected_load_pc = pc_offset();
+ MachineType memtype = type.mem_type();
+
+ if (transform == LoadTransformationKind::kExtend) {
+ Ld(scratch, src_op);
+ if (memtype == MachineType::Int8()) {
+ fill_d(dst_msa, scratch);
+ clti_s_b(kSimd128ScratchReg, dst_msa, 0);
+ ilvr_b(dst_msa, kSimd128ScratchReg, dst_msa);
+ } else if (memtype == MachineType::Uint8()) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ fill_d(dst_msa, scratch);
+ ilvr_b(dst_msa, kSimd128RegZero, dst_msa);
+ } else if (memtype == MachineType::Int16()) {
+ fill_d(dst_msa, scratch);
+ clti_s_h(kSimd128ScratchReg, dst_msa, 0);
+ ilvr_h(dst_msa, kSimd128ScratchReg, dst_msa);
+ } else if (memtype == MachineType::Uint16()) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ fill_d(dst_msa, scratch);
+ ilvr_h(dst_msa, kSimd128RegZero, dst_msa);
+ } else if (memtype == MachineType::Int32()) {
+ fill_d(dst_msa, scratch);
+ clti_s_w(kSimd128ScratchReg, dst_msa, 0);
+ ilvr_w(dst_msa, kSimd128ScratchReg, dst_msa);
+ } else if (memtype == MachineType::Uint32()) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ fill_d(dst_msa, scratch);
+ ilvr_w(dst_msa, kSimd128RegZero, dst_msa);
+ }
+ } else {
+ DCHECK_EQ(LoadTransformationKind::kSplat, transform);
+ if (memtype == MachineType::Int8()) {
+ Lb(scratch, src_op);
+ fill_b(dst_msa, scratch);
+ } else if (memtype == MachineType::Int16()) {
+ Lh(scratch, src_op);
+ fill_h(dst_msa, scratch);
+ } else if (memtype == MachineType::Int32()) {
+ Lw(scratch, src_op);
+ fill_w(dst_msa, scratch);
+ } else if (memtype == MachineType::Int64()) {
+ Ld(scratch, src_op);
+ fill_d(dst_msa, scratch);
+ }
+ }
+}
+
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ MSARegister dst_msa = dst.fp().toW();
+ MSARegister lhs_msa = lhs.fp().toW();
+ MSARegister rhs_msa = rhs.fp().toW();
+
+ uint64_t control_hi = 0;
+ uint64_t control_low = 0;
+ for (int i = 7; i >= 0; i--) {
+ control_hi <<= 8;
+ control_hi |= shuffle[i + 8];
+ control_low <<= 8;
+ control_low |= shuffle[i];
+ }
+
+ if (dst_msa == lhs_msa) {
+ move_v(kSimd128ScratchReg, lhs_msa);
+ lhs_msa = kSimd128ScratchReg;
+ } else if (dst_msa == rhs_msa) {
+ move_v(kSimd128ScratchReg, rhs_msa);
+ rhs_msa = kSimd128ScratchReg;
+ }
+
+ li(kScratchReg, control_low);
+ insert_d(dst_msa, 0, kScratchReg);
+ li(kScratchReg, control_hi);
+ insert_d(dst_msa, 1, kScratchReg);
+ vshf_b(dst_msa, rhs_msa, lhs_msa);
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ MSARegister dst_msa = dst.fp().toW();
+ MSARegister lhs_msa = lhs.fp().toW();
+ MSARegister rhs_msa = rhs.fp().toW();
+
+ if (dst == lhs) {
+ move_v(kSimd128ScratchReg, lhs_msa);
+ lhs_msa = kSimd128ScratchReg;
+ }
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ move_v(dst_msa, rhs_msa);
+ vshf_b(dst_msa, kSimd128RegZero, lhs_msa);
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
fill_b(dst.fp().toW(), src.gp());
@@ -1567,6 +1717,32 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
subv_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_B);
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ MSARegister scratch0 = kSimd128RegZero;
+ MSARegister scratch1 = kSimd128ScratchReg;
+ srli_b(scratch0, src.fp().toW(), 7);
+ srli_h(scratch1, scratch0, 7);
+ or_v(scratch0, scratch0, scratch1);
+ srli_w(scratch1, scratch0, 14);
+ or_v(scratch0, scratch0, scratch1);
+ srli_d(scratch1, scratch0, 28);
+ or_v(scratch0, scratch0, scratch1);
+ shf_w(scratch1, scratch0, 0x0E);
+ ilvev_b(scratch0, scratch1, scratch0);
+ copy_u_h(dst.gp(), scratch0, 0);
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
fill_b(kSimd128ScratchReg, rhs.gp());
@@ -1578,6 +1754,30 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
slli_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7);
}
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_b(kSimd128ScratchReg, rhs.gp());
+ sra_b(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srai_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_b(kSimd128ScratchReg, rhs.gp());
+ srl_b(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srli_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7);
+}
+
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
addv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
@@ -1647,6 +1847,31 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
subv_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_H);
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ MSARegister scratch0 = kSimd128RegZero;
+ MSARegister scratch1 = kSimd128ScratchReg;
+ srli_h(scratch0, src.fp().toW(), 15);
+ srli_w(scratch1, scratch0, 15);
+ or_v(scratch0, scratch0, scratch1);
+ srli_d(scratch1, scratch0, 30);
+ or_v(scratch0, scratch0, scratch1);
+ shf_w(scratch1, scratch0, 0x0E);
+ slli_d(scratch1, scratch1, 4);
+ or_v(scratch0, scratch0, scratch1);
+ copy_u_b(dst.gp(), scratch0, 0);
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
fill_h(kSimd128ScratchReg, rhs.gp());
@@ -1658,6 +1883,30 @@ void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
slli_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15);
}
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_h(kSimd128ScratchReg, rhs.gp());
+ sra_h(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srai_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_h(kSimd128ScratchReg, rhs.gp());
+ srl_h(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srli_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15);
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
addv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
@@ -1727,6 +1976,29 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
subv_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_W);
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ MSARegister scratch0 = kSimd128RegZero;
+ MSARegister scratch1 = kSimd128ScratchReg;
+ srli_w(scratch0, src.fp().toW(), 31);
+ srli_d(scratch1, scratch0, 31);
+ or_v(scratch0, scratch0, scratch1);
+ shf_w(scratch1, scratch0, 0x0E);
+ slli_d(scratch1, scratch1, 2);
+ or_v(scratch0, scratch0, scratch1);
+ copy_u_b(dst.gp(), scratch0, 0);
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
fill_w(kSimd128ScratchReg, rhs.gp());
@@ -1738,6 +2010,30 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
slli_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31);
}
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_w(kSimd128ScratchReg, rhs.gp());
+ sra_w(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srai_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_w(kSimd128ScratchReg, rhs.gp());
+ srl_w(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srli_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31);
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
addv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
@@ -1794,6 +2090,30 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
slli_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63);
}
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_d(kSimd128ScratchReg, rhs.gp());
+ sra_d(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srai_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63);
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_d(kSimd128ScratchReg, rhs.gp());
+ srl_d(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srli_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63);
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
addv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
@@ -1959,6 +2279,26 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
bsel_v(dst_msa, scratch0, scratch1);
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ftrunc_s_w(dst.fp().toW(), src.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ftrunc_u_w(dst.fp().toW(), src.fp().toW());
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ffint_s_w(dst.fp().toW(), src.fp().toW());
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ffint_u_w(dst.fp().toW(), src.fp().toW());
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2197,7 +2537,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}
diff --git a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index e02ab95ae4b..920dda4fe6c 100644
--- a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -539,6 +539,20 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
}
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ bailout(kSimd, "Load transform unimplemented");
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_s8x16_swizzle");
+}
+
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f64x2splat");
@@ -698,6 +712,28 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i64x2_shli");
}
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_shr_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i64x2_shri_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_shr_u");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i64x2_shri_u");
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i64x2add");
@@ -736,6 +772,21 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4neg");
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v32x4_anytrue");
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v32x4_alltrue");
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4_bitmask");
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i32x4_shl");
@@ -746,6 +797,28 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i32x4_shli");
}
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_shr_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i32x4_shri_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_shr_u");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i32x4_shri_u");
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4add");
@@ -795,6 +868,21 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8neg");
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v16x8_anytrue");
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v16x8_alltrue");
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8_bitmask");
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i16x8_shl");
@@ -805,6 +893,28 @@ void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i16x8_shli");
}
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8_shr_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i16x8_shri_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8_shr_u");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i16x8_shri_u");
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8add");
@@ -887,6 +997,13 @@ void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8extractlane_s");
}
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ bailout(kSimd, "s8x16_shuffle");
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i8x16splat");
@@ -910,6 +1027,21 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16neg");
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v8x16_anytrue");
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v8x16_alltrue");
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i8x16_bitmask");
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i8x16_shl");
@@ -920,6 +1052,28 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i8x16_shli");
}
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16_shr_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i8x16_shri_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16_shr_u");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i8x16_shri_u");
+}
+
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
@@ -1117,6 +1271,26 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_s128select");
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4_sconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4_uconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f32x4_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f32x4_uconvert_i32x4");
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
diff --git a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 704fcb81d74..803358c97e7 100644
--- a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -543,6 +543,20 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
}
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ bailout(kSimd, "Load transform unimplemented");
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_s8x16_swizzle");
+}
+
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f64x2splat");
@@ -702,6 +716,28 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i64x2_shli");
}
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_shr_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i64x2_shri_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_shr_u");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i64x2_shri_u");
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i64x2add");
@@ -740,6 +776,21 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4neg");
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v32x4_anytrue");
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v32x4_alltrue");
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4_bitmask");
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i32x4_shl");
@@ -750,6 +801,28 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i32x4_shli");
}
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_shr_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i32x4_shri_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_shr_u");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i32x4_shri_u");
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4add");
@@ -799,6 +872,21 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8neg");
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v16x8_anytrue");
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v16x8_alltrue");
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8_bitmask");
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i16x8_shl");
@@ -809,6 +897,28 @@ void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i16x8_shli");
}
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8_shr_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i16x8_shri_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8_shr_u");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i16x8_shri_u");
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8add");
@@ -891,6 +1001,13 @@ void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8extractlane_s");
}
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ bailout(kSimd, "s8x16_shuffle");
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i8x16splat");
@@ -920,6 +1037,21 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16neg");
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v8x16_anytrue");
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v8x16_alltrue");
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i8x16_bitmask");
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i8x16_shl");
@@ -930,6 +1062,28 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i8x16_shli");
}
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16_shr_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i8x16_shri_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16_shr_u");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i8x16_shri_u");
+}
+
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16add");
@@ -1149,6 +1303,26 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_s128select");
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4_sconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4_uconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f32x4_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f32x4_uconvert_i32x4");
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
diff --git a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 7638c4f9cc0..83571a18f4c 100644
--- a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -8,7 +8,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/codegen/assembler.h"
-#include "src/wasm/value-type.h"
namespace v8 {
namespace internal {
@@ -306,8 +305,6 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
case LoadType::kS128Load:
Movdqu(dst.fp(), src_op);
break;
- default:
- UNREACHABLE();
}
}
@@ -345,8 +342,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
case StoreType::kS128Store:
Movdqu(dst_op, src.fp());
break;
- default:
- UNREACHABLE();
}
}
@@ -1060,10 +1055,10 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
void (Assembler::*emit_shift)(Register)) {
// If dst is rcx, compute into the scratch register first, then move to rcx.
if (dst == rcx) {
- assm->Move(kScratchRegister, src, ValueType(type));
- if (amount != rcx) assm->Move(rcx, amount, ValueType(type));
+ assm->Move(kScratchRegister, src, ValueType::Primitive(type));
+ if (amount != rcx) assm->Move(rcx, amount, ValueType::Primitive(type));
(assm->*emit_shift)(kScratchRegister);
- assm->Move(rcx, kScratchRegister, ValueType(type));
+ assm->Move(rcx, kScratchRegister, ValueType::Primitive(type));
return;
}
@@ -1075,11 +1070,11 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
src == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx));
if (use_scratch) assm->movq(kScratchRegister, rcx);
if (src == rcx) src = kScratchRegister;
- assm->Move(rcx, amount, ValueType(type));
+ assm->Move(rcx, amount, ValueType::Primitive(type));
}
// Do the actual shift.
- if (dst != src) assm->Move(dst, src, ValueType(type));
+ if (dst != src) assm->Move(dst, src, ValueType::Primitive(type));
(assm->*emit_shift)(dst);
// Restore rcx if needed.
@@ -1620,6 +1615,7 @@ void LiftoffAssembler::emit_f64_sqrt(DoubleRegister dst, DoubleRegister src) {
}
namespace liftoff {
+#define __ assm->
// Used for float to int conversions. If the value in {converted_back} equals
// {src} afterwards, the conversion succeeded.
template <typename dst_type, typename src_type>
@@ -1628,29 +1624,29 @@ inline void ConvertFloatToIntAndBack(LiftoffAssembler* assm, Register dst,
DoubleRegister converted_back) {
if (std::is_same<double, src_type>::value) { // f64
if (std::is_same<int32_t, dst_type>::value) { // f64 -> i32
- assm->Cvttsd2si(dst, src);
- assm->Cvtlsi2sd(converted_back, dst);
+ __ Cvttsd2si(dst, src);
+ __ Cvtlsi2sd(converted_back, dst);
} else if (std::is_same<uint32_t, dst_type>::value) { // f64 -> u32
- assm->Cvttsd2siq(dst, src);
- assm->movl(dst, dst);
- assm->Cvtqsi2sd(converted_back, dst);
+ __ Cvttsd2siq(dst, src);
+ __ movl(dst, dst);
+ __ Cvtqsi2sd(converted_back, dst);
} else if (std::is_same<int64_t, dst_type>::value) { // f64 -> i64
- assm->Cvttsd2siq(dst, src);
- assm->Cvtqsi2sd(converted_back, dst);
+ __ Cvttsd2siq(dst, src);
+ __ Cvtqsi2sd(converted_back, dst);
} else {
UNREACHABLE();
}
} else { // f32
if (std::is_same<int32_t, dst_type>::value) { // f32 -> i32
- assm->Cvttss2si(dst, src);
- assm->Cvtlsi2ss(converted_back, dst);
+ __ Cvttss2si(dst, src);
+ __ Cvtlsi2ss(converted_back, dst);
} else if (std::is_same<uint32_t, dst_type>::value) { // f32 -> u32
- assm->Cvttss2siq(dst, src);
- assm->movl(dst, dst);
- assm->Cvtqsi2ss(converted_back, dst);
+ __ Cvttss2siq(dst, src);
+ __ movl(dst, dst);
+ __ Cvtqsi2ss(converted_back, dst);
} else if (std::is_same<int64_t, dst_type>::value) { // f32 -> i64
- assm->Cvttss2siq(dst, src);
- assm->Cvtqsi2ss(converted_back, dst);
+ __ Cvttss2siq(dst, src);
+ __ Cvtqsi2ss(converted_back, dst);
} else {
UNREACHABLE();
}
@@ -1661,7 +1657,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
- assm->bailout(kMissingCPUFeature, "no SSE4.1");
+ __ bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
@@ -1670,24 +1666,143 @@ inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister converted_back = kScratchDoubleReg2;
if (std::is_same<double, src_type>::value) { // f64
- assm->Roundsd(rounded, src, kRoundToZero);
+ __ Roundsd(rounded, src, kRoundToZero);
} else { // f32
- assm->Roundss(rounded, src, kRoundToZero);
+ __ Roundss(rounded, src, kRoundToZero);
}
ConvertFloatToIntAndBack<dst_type, src_type>(assm, dst, rounded,
converted_back);
if (std::is_same<double, src_type>::value) { // f64
- assm->Ucomisd(converted_back, rounded);
+ __ Ucomisd(converted_back, rounded);
} else { // f32
- assm->Ucomiss(converted_back, rounded);
+ __ Ucomiss(converted_back, rounded);
}
// Jump to trap if PF is 0 (one of the operands was NaN) or they are not
// equal.
- assm->j(parity_even, trap);
- assm->j(not_equal, trap);
+ __ j(parity_even, trap);
+ __ j(not_equal, trap);
+ return true;
+}
+
+template <typename dst_type, typename src_type>
+inline bool EmitSatTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
+ DoubleRegister src) {
+ if (!CpuFeatures::IsSupported(SSE4_1)) {
+ __ bailout(kMissingCPUFeature, "no SSE4.1");
+ return true;
+ }
+ CpuFeatureScope feature(assm, SSE4_1);
+
+ Label done;
+ Label not_nan;
+ Label src_positive;
+
+ DoubleRegister rounded = kScratchDoubleReg;
+ DoubleRegister converted_back = kScratchDoubleReg2;
+ DoubleRegister zero_reg = kScratchDoubleReg;
+
+ if (std::is_same<double, src_type>::value) { // f64
+ __ Roundsd(rounded, src, kRoundToZero);
+ } else { // f32
+ __ Roundss(rounded, src, kRoundToZero);
+ }
+
+ ConvertFloatToIntAndBack<dst_type, src_type>(assm, dst, rounded,
+ converted_back);
+ if (std::is_same<double, src_type>::value) { // f64
+ __ Ucomisd(converted_back, rounded);
+ } else { // f32
+ __ Ucomiss(converted_back, rounded);
+ }
+
+ // Return 0 if PF is 0 (one of the operands was NaN)
+ __ j(parity_odd, &not_nan);
+ __ xorl(dst, dst);
+ __ jmp(&done);
+
+ __ bind(&not_nan);
+ // If rounding is as expected, return result
+ __ j(equal, &done);
+
+ __ xorpd(zero_reg, zero_reg);
+
+ // if out-of-bounds, check if src is positive
+ if (std::is_same<double, src_type>::value) { // f64
+ __ Ucomisd(src, zero_reg);
+ } else { // f32
+ __ Ucomiss(src, zero_reg);
+ }
+ __ j(above, &src_positive);
+ if (std::is_same<int32_t, dst_type>::value ||
+ std::is_same<uint32_t, dst_type>::value) { // i32
+ __ movl(
+ dst,
+ Immediate(static_cast<int32_t>(std::numeric_limits<dst_type>::min())));
+ } else if (std::is_same<int64_t, dst_type>::value) { // i64s
+ __ movq(dst, Immediate64(std::numeric_limits<dst_type>::min()));
+ } else {
+ UNREACHABLE();
+ }
+ __ jmp(&done);
+
+ __ bind(&src_positive);
+ if (std::is_same<int32_t, dst_type>::value ||
+ std::is_same<uint32_t, dst_type>::value) { // i32
+ __ movl(
+ dst,
+ Immediate(static_cast<int32_t>(std::numeric_limits<dst_type>::max())));
+ } else if (std::is_same<int64_t, dst_type>::value) { // i64s
+ __ movq(dst, Immediate64(std::numeric_limits<dst_type>::max()));
+ } else {
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ return true;
+}
+
+template <typename src_type>
+inline bool EmitSatTruncateFloatToUInt64(LiftoffAssembler* assm, Register dst,
+ DoubleRegister src) {
+ if (!CpuFeatures::IsSupported(SSE4_1)) {
+ __ bailout(kMissingCPUFeature, "no SSE4.1");
+ return true;
+ }
+ CpuFeatureScope feature(assm, SSE4_1);
+
+ Label done;
+ Label neg_or_nan;
+ Label overflow;
+
+ DoubleRegister zero_reg = kScratchDoubleReg;
+
+ __ xorpd(zero_reg, zero_reg);
+ if (std::is_same<double, src_type>::value) { // f64
+ __ Ucomisd(src, zero_reg);
+ } else { // f32
+ __ Ucomiss(src, zero_reg);
+ }
+ // Check if NaN
+ __ j(parity_even, &neg_or_nan);
+ __ j(below, &neg_or_nan);
+ if (std::is_same<double, src_type>::value) { // f64
+ __ Cvttsd2uiq(dst, src, &overflow);
+ } else { // f32
+ __ Cvttss2uiq(dst, src, &overflow);
+ }
+ __ jmp(&done);
+
+ __ bind(&neg_or_nan);
+ __ movq(dst, zero_reg);
+ __ jmp(&done);
+
+ __ bind(&overflow);
+ __ movq(dst, Immediate64(std::numeric_limits<uint64_t>::max()));
+ __ bind(&done);
return true;
}
+#undef __
} // namespace liftoff
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
@@ -1709,6 +1824,18 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI32UConvertF64:
return liftoff::EmitTruncateFloatToInt<uint32_t, double>(this, dst.gp(),
src.fp(), trap);
+ case kExprI32SConvertSatF32:
+ return liftoff::EmitSatTruncateFloatToInt<int32_t, float>(this, dst.gp(),
+ src.fp());
+ case kExprI32UConvertSatF32:
+ return liftoff::EmitSatTruncateFloatToInt<uint32_t, float>(this, dst.gp(),
+ src.fp());
+ case kExprI32SConvertSatF64:
+ return liftoff::EmitSatTruncateFloatToInt<int32_t, double>(this, dst.gp(),
+ src.fp());
+ case kExprI32UConvertSatF64:
+ return liftoff::EmitSatTruncateFloatToInt<uint32_t, double>(
+ this, dst.gp(), src.fp());
case kExprI32ReinterpretF32:
Movd(dst.gp(), src.fp());
return true;
@@ -1731,6 +1858,20 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
Cvttsd2uiq(dst.gp(), src.fp(), trap);
return true;
}
+ case kExprI64SConvertSatF32:
+ return liftoff::EmitSatTruncateFloatToInt<int64_t, float>(this, dst.gp(),
+ src.fp());
+ case kExprI64UConvertSatF32: {
+ return liftoff::EmitSatTruncateFloatToUInt64<float>(this, dst.gp(),
+ src.fp());
+ }
+ case kExprI64SConvertSatF64:
+ return liftoff::EmitSatTruncateFloatToInt<int64_t, double>(this, dst.gp(),
+ src.fp());
+ case kExprI64UConvertSatF64: {
+ return liftoff::EmitSatTruncateFloatToUInt64<double>(this, dst.gp(),
+ src.fp());
+ }
case kExprI64UConvertI32:
AssertZeroExtended(src.gp());
if (dst.gp() != src.gp()) movl(dst.gp(), src.gp());
@@ -1975,8 +2116,185 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
(assm->*sse_op)(dst.fp(), shift);
}
}
+
+template <bool is_signed>
+void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, LiftoffRegister rhs) {
+ // Same algorithm as the one in code-generator-x64.cc.
+ assm->Punpckhbw(kScratchDoubleReg, lhs.fp());
+ assm->Punpcklbw(dst.fp(), lhs.fp());
+ // Prepare shift value
+ assm->movq(kScratchRegister, rhs.gp());
+ // Take shift value modulo 8.
+ assm->andq(kScratchRegister, Immediate(7));
+ assm->addq(kScratchRegister, Immediate(8));
+ assm->Movq(liftoff::kScratchDoubleReg2, kScratchRegister);
+ if (is_signed) {
+ assm->Psraw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
+ assm->Psraw(dst.fp(), liftoff::kScratchDoubleReg2);
+ assm->Packsswb(dst.fp(), kScratchDoubleReg);
+ } else {
+ assm->Psrlw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
+ assm->Psrlw(dst.fp(), liftoff::kScratchDoubleReg2);
+ assm->Packuswb(dst.fp(), kScratchDoubleReg);
+ }
+}
+
+// Can be used by both the immediate and register version of the shifts. psraq
+// is only available in AVX512, so we can't use it yet.
+template <typename ShiftOperand>
+void EmitI64x2ShrS(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, ShiftOperand rhs,
+ bool shift_is_rcx = false) {
+ bool restore_rcx = false;
+ Register backup = kScratchRegister2;
+ if (!shift_is_rcx) {
+ if (assm->cache_state()->is_used(LiftoffRegister(rcx))) {
+ restore_rcx = true;
+ assm->movq(backup, rcx);
+ }
+ assm->movl(rcx, rhs);
+ }
+
+ Register tmp = kScratchRegister;
+
+ assm->Pextrq(tmp, lhs.fp(), int8_t{0x0});
+ assm->sarq_cl(tmp);
+ assm->Pinsrq(dst.fp(), tmp, int8_t{0x0});
+
+ assm->Pextrq(tmp, lhs.fp(), int8_t{0x1});
+ assm->sarq_cl(tmp);
+ assm->Pinsrq(dst.fp(), tmp, int8_t{0x1});
+
+ // restore rcx.
+ if (restore_rcx) {
+ assm->movq(rcx, backup);
+ }
+}
+
+inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ assm->xorq(dst.gp(), dst.gp());
+ assm->Ptest(src.fp(), src.fp());
+ assm->setcc(not_equal, dst.gp());
+}
+
+template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
+inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ XMMRegister tmp = kScratchDoubleReg;
+ assm->xorq(dst.gp(), dst.gp());
+ assm->Pxor(tmp, tmp);
+ (assm->*pcmp)(tmp, src.fp());
+ assm->Ptest(tmp, tmp);
+ assm->setcc(equal, dst.gp());
+}
+
} // namespace liftoff
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ if (emit_debug_code() && offset_reg != no_reg) {
+ AssertZeroExtended(offset_reg);
+ }
+ Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ *protected_load_pc = pc_offset();
+ MachineType memtype = type.mem_type();
+ if (transform == LoadTransformationKind::kExtend) {
+ if (memtype == MachineType::Int8()) {
+ Pmovsxbw(dst.fp(), src_op);
+ } else if (memtype == MachineType::Uint8()) {
+ Pmovzxbw(dst.fp(), src_op);
+ } else if (memtype == MachineType::Int16()) {
+ Pmovsxwd(dst.fp(), src_op);
+ } else if (memtype == MachineType::Uint16()) {
+ Pmovzxwd(dst.fp(), src_op);
+ } else if (memtype == MachineType::Int32()) {
+ Pmovsxdq(dst.fp(), src_op);
+ } else if (memtype == MachineType::Uint32()) {
+ Pmovzxdq(dst.fp(), src_op);
+ }
+ } else {
+ DCHECK_EQ(LoadTransformationKind::kSplat, transform);
+ if (memtype == MachineType::Int8()) {
+ Pinsrb(dst.fp(), src_op, 0);
+ Pxor(kScratchDoubleReg, kScratchDoubleReg);
+ Pshufb(dst.fp(), kScratchDoubleReg);
+ } else if (memtype == MachineType::Int16()) {
+ Pinsrw(dst.fp(), src_op, 0);
+ Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
+ Punpcklqdq(dst.fp(), dst.fp());
+ } else if (memtype == MachineType::Int32()) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vbroadcastss(dst.fp(), src_op);
+ } else {
+ Movss(dst.fp(), src_op);
+ Shufps(dst.fp(), dst.fp(), byte{0});
+ }
+ } else if (memtype == MachineType::Int64()) {
+ Movddup(dst.fp(), src_op);
+ }
+ }
+}
+
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ LiftoffRegister tmp_simd =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs, rhs));
+ Movups(kScratchDoubleReg, lhs.fp());
+
+ uint64_t mask1[2] = {};
+ for (int i = 15; i >= 0; i--) {
+ uint8_t lane = shuffle[i];
+ int j = i >> 3;
+ mask1[j] <<= 8;
+ mask1[j] |= lane < kSimd128Size ? lane : 0x80;
+ }
+ TurboAssembler::Move(tmp_simd.fp(), mask1[0]);
+ movq(kScratchRegister, mask1[1]);
+ Pinsrq(tmp_simd.fp(), kScratchRegister, int8_t{1});
+ Pshufb(kScratchDoubleReg, tmp_simd.fp());
+
+ uint64_t mask2[2] = {};
+ for (int i = 15; i >= 0; i--) {
+ uint8_t lane = shuffle[i];
+ int j = i >> 3;
+ mask2[j] <<= 8;
+ mask2[j] |= lane >= kSimd128Size ? (lane & 0x0F) : 0x80;
+ }
+ TurboAssembler::Move(tmp_simd.fp(), mask2[0]);
+ movq(kScratchRegister, mask2[1]);
+ Pinsrq(tmp_simd.fp(), kScratchRegister, int8_t{1});
+
+ if (dst.fp() != rhs.fp()) {
+ Movups(dst.fp(), rhs.fp());
+ }
+ Pshufb(dst.fp(), tmp_simd.fp());
+ Por(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ XMMRegister mask =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(lhs, rhs)).fp();
+ // Out-of-range indices should return 0, add 112 (0x70) so that any value > 15
+ // saturates to 128 (top bit set), so pshufb will zero that lane.
+ TurboAssembler::Move(mask, uint32_t{0x70707070});
+ Pshufd(mask, mask, uint8_t{0x0});
+ Paddusb(mask, rhs.fp());
+ if (lhs != dst) {
+ Movaps(dst.fp(), lhs.fp());
+ }
+ Pshufb(dst.fp(), mask);
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
Movd(dst.fp(), src.gp());
@@ -2302,6 +2620,21 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src);
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovmskb(dst.gp(), src.fp());
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
static constexpr RegClass tmp_simd_rc = reg_class_for(ValueType::kS128);
@@ -2347,6 +2680,48 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
Pand(dst.fp(), kScratchDoubleReg);
}
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ Punpckhbw(kScratchDoubleReg, lhs.fp());
+ Punpcklbw(dst.fp(), lhs.fp());
+ uint8_t shift = (rhs & 7) + 8;
+ Psraw(kScratchDoubleReg, shift);
+ Psraw(dst.fp(), shift);
+ Packsswb(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ // Perform 16-bit shift, then mask away high bits.
+ uint8_t shift = rhs & 7; // i.InputInt3(1);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsrlw(dst.fp(), lhs.fp(), byte{shift});
+ } else if (dst != lhs) {
+ Movaps(dst.fp(), lhs.fp());
+ psrlw(dst.fp(), byte{shift});
+ }
+
+ uint8_t bmask = 0xff >> shift;
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ movl(kScratchRegister, Immediate(mask));
+ Movd(kScratchDoubleReg, kScratchRegister);
+ Pshufd(kScratchDoubleReg, kScratchDoubleReg, byte{0});
+ Pand(dst.fp(), kScratchDoubleReg);
+}
+
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddb, &Assembler::paddb>(
@@ -2489,6 +2864,24 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src);
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ XMMRegister tmp = kScratchDoubleReg;
+ Packsswb(tmp, src.fp());
+ Pmovmskb(dst.gp(), tmp);
+ shrq(dst.gp(), Immediate(8));
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShiftOp<&Assembler::vpsllw, &Assembler::psllw, 4>(this, dst,
@@ -2501,6 +2894,32 @@ void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsraw, &Assembler::psraw, 4>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsraw, &Assembler::psraw, 4>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrlw, &Assembler::psrlw, 4>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlw, &Assembler::psrlw, 4>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddw, &Assembler::paddw>(
@@ -2587,6 +3006,21 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src);
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movmskps(dst.gp(), src.fp());
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShiftOp<&Assembler::vpslld, &Assembler::pslld, 5>(this, dst,
@@ -2599,6 +3033,32 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrad, &Assembler::psrad, 5>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrad, &Assembler::psrad, 5>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrld, &Assembler::psrld, 5>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrld, &Assembler::psrld, 5>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddd, &Assembler::paddd>(
@@ -2670,6 +3130,31 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitI64x2ShrS(this, dst, lhs, rhs.gp(),
+ /*shift_is_rcx=*/rhs.gp() == rcx);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitI64x2ShrS(this, dst, lhs, Immediate(rhs));
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrlq, &Assembler::psrlq, 6>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlq, &Assembler::psrlq, 6>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddq, &Assembler::paddq>(
@@ -2937,6 +3422,89 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
Andnpd(dst.fp(), kScratchDoubleReg);
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // NAN->0
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcmpeqps(kScratchDoubleReg, src.fp(), src.fp());
+ vpand(dst.fp(), src.fp(), kScratchDoubleReg);
+ } else {
+ movaps(kScratchDoubleReg, src.fp());
+ cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
+ if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
+ pand(dst.fp(), kScratchDoubleReg);
+ }
+ // Set top bit if >= 0 (but not -0.0!).
+ Pxor(kScratchDoubleReg, dst.fp());
+ // Convert to int.
+ Cvttps2dq(dst.fp(), dst.fp());
+ // Set top bit if >=0 is now < 0.
+ Pand(kScratchDoubleReg, dst.fp());
+ Psrad(kScratchDoubleReg, byte{31});
+ // Set positive overflow lanes to 0x7FFFFFFF.
+ Pxor(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // NAN->0, negative->0.
+ Pxor(kScratchDoubleReg, kScratchDoubleReg);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmaxps(dst.fp(), src.fp(), kScratchDoubleReg);
+ } else {
+ if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
+ maxps(dst.fp(), kScratchDoubleReg);
+ }
+ // scratch: float representation of max_signed.
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Psrld(kScratchDoubleReg, uint8_t{1}); // 0x7fffffff
+ Cvtdq2ps(kScratchDoubleReg, kScratchDoubleReg); // 0x4f000000
+ // scratch2: convert (src-max_signed).
+ // Set positive overflow lanes to 0x7FFFFFFF.
+ // Set negative lanes to 0.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubps(liftoff::kScratchDoubleReg2, dst.fp(), kScratchDoubleReg);
+ } else {
+ movaps(liftoff::kScratchDoubleReg2, dst.fp());
+ subps(liftoff::kScratchDoubleReg2, kScratchDoubleReg);
+ }
+ Cmpleps(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
+ Cvttps2dq(liftoff::kScratchDoubleReg2, liftoff::kScratchDoubleReg2);
+ Pxor(liftoff::kScratchDoubleReg2, kScratchDoubleReg);
+ Pxor(kScratchDoubleReg, kScratchDoubleReg);
+ Pmaxsd(liftoff::kScratchDoubleReg2, kScratchDoubleReg);
+ // Convert to int. Overflow lanes above max_signed will be 0x80000000.
+ Cvttps2dq(dst.fp(), dst.fp());
+ // Add (src-max_signed) for overflow lanes.
+ Paddd(dst.fp(), liftoff::kScratchDoubleReg2);
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtdq2ps(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pxor(kScratchDoubleReg, kScratchDoubleReg); // Zeros.
+ Pblendw(kScratchDoubleReg, src.fp(), uint8_t{0x55}); // Get lo 16 bits.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsubd(dst.fp(), src.fp(), kScratchDoubleReg); // Get hi 16 bits.
+ } else {
+ if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
+ psubd(dst.fp(), kScratchDoubleReg);
+ }
+ Cvtdq2ps(kScratchDoubleReg, kScratchDoubleReg); // Convert lo exactly.
+ Psrld(dst.fp(), byte{1}); // Divide by 2 to get in unsigned range.
+ Cvtdq2ps(dst.fp(), dst.fp()); // Convert hi, exactly.
+ Addps(dst.fp(), dst.fp()); // Double hi, exactly.
+ Addps(dst.fp(), kScratchDoubleReg); // Add hi and lo, may round.
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
diff --git a/chromium/v8/src/wasm/c-api.cc b/chromium/v8/src/wasm/c-api.cc
index cd5d04bd2d8..aedf5726194 100644
--- a/chromium/v8/src/wasm/c-api.cc
+++ b/chromium/v8/src/wasm/c-api.cc
@@ -71,10 +71,19 @@ ValKind V8ValueTypeToWasm(i::wasm::ValueType v8_valtype) {
return F32;
case i::wasm::ValueType::kF64:
return F64;
- case i::wasm::ValueType::kFuncRef:
- return FUNCREF;
- case i::wasm::ValueType::kAnyRef:
- return ANYREF;
+ case i::wasm::ValueType::kRef:
+ case i::wasm::ValueType::kOptRef:
+ switch (v8_valtype.heap_type()) {
+ case i::wasm::kHeapFunc:
+ return FUNCREF;
+ case i::wasm::kHeapExtern:
+ // TODO(7748): Rename this to EXTERNREF if/when third-party API
+ // changes.
+ return ANYREF;
+ default:
+ // TODO(wasm+): support new value types
+ UNREACHABLE();
+ }
default:
// TODO(wasm+): support new value types
UNREACHABLE();
@@ -94,7 +103,7 @@ i::wasm::ValueType WasmValKindToV8(ValKind kind) {
case FUNCREF:
return i::wasm::kWasmFuncRef;
case ANYREF:
- return i::wasm::kWasmAnyRef;
+ return i::wasm::kWasmExternRef;
default:
// TODO(wasm+): support new value types
UNREACHABLE();
@@ -201,8 +210,6 @@ auto seal(const typename implement<C>::type* x) -> const C* {
// Configuration
struct ConfigImpl {
- ConfigImpl() {}
- ~ConfigImpl() {}
};
template <>
@@ -249,7 +256,7 @@ void Engine::operator delete(void* p) { ::operator delete(p); }
auto Engine::make(own<Config>&& config) -> own<Engine> {
i::FLAG_expose_gc = true;
- i::FLAG_experimental_wasm_anyref = true;
+ i::FLAG_experimental_wasm_reftypes = true;
i::FLAG_experimental_wasm_bigint = true;
i::FLAG_experimental_wasm_mv = true;
auto engine = new (std::nothrow) EngineImpl;
@@ -372,10 +379,10 @@ ValTypeImpl* valtype_i32 = new ValTypeImpl(I32);
ValTypeImpl* valtype_i64 = new ValTypeImpl(I64);
ValTypeImpl* valtype_f32 = new ValTypeImpl(F32);
ValTypeImpl* valtype_f64 = new ValTypeImpl(F64);
-ValTypeImpl* valtype_anyref = new ValTypeImpl(ANYREF);
+ValTypeImpl* valtype_externref = new ValTypeImpl(ANYREF);
ValTypeImpl* valtype_funcref = new ValTypeImpl(FUNCREF);
-ValType::~ValType() {}
+ValType::~ValType() = default;
void ValType::operator delete(void*) {}
@@ -395,7 +402,7 @@ own<ValType> ValType::make(ValKind k) {
valtype = valtype_f64;
break;
case ANYREF:
- valtype = valtype_anyref;
+ valtype = valtype_externref;
break;
case FUNCREF:
valtype = valtype_funcref;
@@ -417,7 +424,7 @@ struct ExternTypeImpl {
ExternKind kind;
explicit ExternTypeImpl(ExternKind kind) : kind(kind) {}
- virtual ~ExternTypeImpl() {}
+ virtual ~ExternTypeImpl() = default;
};
template <>
@@ -455,8 +462,6 @@ struct FuncTypeImpl : ExternTypeImpl {
: ExternTypeImpl(EXTERN_FUNC),
params(std::move(params)),
results(std::move(results)) {}
-
- ~FuncTypeImpl() {}
};
template <>
@@ -464,7 +469,7 @@ struct implement<FuncType> {
using type = FuncTypeImpl;
};
-FuncType::~FuncType() {}
+FuncType::~FuncType() = default;
auto FuncType::make(ownvec<ValType>&& params, ownvec<ValType>&& results)
-> own<FuncType> {
@@ -510,7 +515,7 @@ struct GlobalTypeImpl : ExternTypeImpl {
content(std::move(content)),
mutability(mutability) {}
- ~GlobalTypeImpl() {}
+ ~GlobalTypeImpl() override = default;
};
template <>
@@ -518,7 +523,7 @@ struct implement<GlobalType> {
using type = GlobalTypeImpl;
};
-GlobalType::~GlobalType() {}
+GlobalType::~GlobalType() = default;
auto GlobalType::make(own<ValType>&& content, Mutability mutability)
-> own<GlobalType> {
@@ -563,7 +568,7 @@ struct TableTypeImpl : ExternTypeImpl {
element(std::move(element)),
limits(limits) {}
- ~TableTypeImpl() {}
+ ~TableTypeImpl() override = default;
};
template <>
@@ -571,7 +576,7 @@ struct implement<TableType> {
using type = TableTypeImpl;
};
-TableType::~TableType() {}
+TableType::~TableType() = default;
auto TableType::make(own<ValType>&& element, Limits limits) -> own<TableType> {
return element ? own<TableType>(seal<TableType>(
@@ -609,7 +614,7 @@ struct MemoryTypeImpl : ExternTypeImpl {
explicit MemoryTypeImpl(Limits limits)
: ExternTypeImpl(EXTERN_MEMORY), limits(limits) {}
- ~MemoryTypeImpl() {}
+ ~MemoryTypeImpl() override = default;
};
template <>
@@ -617,7 +622,7 @@ struct implement<MemoryType> {
using type = MemoryTypeImpl;
};
-MemoryType::~MemoryType() {}
+MemoryType::~MemoryType() = default;
auto MemoryType::make(Limits limits) -> own<MemoryType> {
return own<MemoryType>(
@@ -655,8 +660,6 @@ struct ImportTypeImpl {
: module(std::move(module)),
name(std::move(name)),
type(std::move(type)) {}
-
- ~ImportTypeImpl() {}
};
template <>
@@ -697,8 +700,6 @@ struct ExportTypeImpl {
ExportTypeImpl(Name& name, // NOLINT(runtime/references)
own<ExternType>& type) // NOLINT(runtime/references)
: name(std::move(name)), type(std::move(type)) {}
-
- ~ExportTypeImpl() {}
};
template <>
@@ -767,7 +768,7 @@ class RefImpl {
}
private:
- RefImpl() {}
+ RefImpl() = default;
i::Address* location() const {
return reinterpret_cast<i::Address*>(val_.address());
@@ -813,8 +814,6 @@ struct FrameImpl {
func_offset(func_offset),
module_offset(module_offset) {}
- ~FrameImpl() {}
-
own<Instance> instance;
uint32_t func_index;
size_t func_offset;
@@ -854,7 +853,7 @@ struct implement<Trap> {
using type = RefImpl<Trap, i::JSReceiver>;
};
-Trap::~Trap() {}
+Trap::~Trap() = default;
auto Trap::copy() const -> own<Trap> { return impl(this)->copy(); }
@@ -941,7 +940,7 @@ struct implement<Foreign> {
using type = RefImpl<Foreign, i::JSReceiver>;
};
-Foreign::~Foreign() {}
+Foreign::~Foreign() = default;
auto Foreign::copy() const -> own<Foreign> { return impl(this)->copy(); }
@@ -962,7 +961,7 @@ struct implement<Module> {
using type = RefImpl<Module, i::WasmModuleObject>;
};
-Module::~Module() {}
+Module::~Module() = default;
auto Module::copy() const -> own<Module> { return impl(this)->copy(); }
@@ -1106,7 +1105,7 @@ struct implement<Extern> {
using type = RefImpl<Extern, i::JSReceiver>;
};
-Extern::~Extern() {}
+Extern::~Extern() = default;
auto Extern::copy() const -> own<Extern> { return impl(this)->copy(); }
@@ -1177,7 +1176,7 @@ struct implement<Func> {
using type = RefImpl<Func, i::JSFunction>;
};
-Func::~Func() {}
+Func::~Func() = default;
auto Func::copy() const -> own<Func> { return impl(this)->copy(); }
@@ -1384,7 +1383,7 @@ void PrepareFunctionData(i::Isolate* isolate,
if (!function_data->c_wrapper_code().IsSmi()) return;
// Compile wrapper code.
i::Handle<i::Code> wrapper_code =
- i::compiler::CompileCWasmEntry(isolate, sig).ToHandleChecked();
+ i::compiler::CompileCWasmEntry(isolate, sig);
function_data->set_c_wrapper_code(*wrapper_code);
// Compute packed args size.
function_data->set_packed_args_size(
@@ -1414,16 +1413,13 @@ void PushArgs(const i::wasm::FunctionSig* sig, const Val args[],
case i::wasm::ValueType::kF64:
packer->Push(args[i].f64());
break;
- case i::wasm::ValueType::kAnyRef:
- case i::wasm::ValueType::kFuncRef:
- case i::wasm::ValueType::kNullRef:
+ case i::wasm::ValueType::kRef:
+ case i::wasm::ValueType::kOptRef:
+ // TODO(7748): Make sure this works for all types.
packer->Push(WasmRefToV8(store->i_isolate(), args[i].ref())->ptr());
break;
- case i::wasm::ValueType::kExnRef:
- // TODO(jkummerow): Implement these.
- UNIMPLEMENTED();
- break;
default:
+ // TODO(7748): Implement these.
UNIMPLEMENTED();
}
}
@@ -1447,20 +1443,23 @@ void PopArgs(const i::wasm::FunctionSig* sig, Val results[],
case i::wasm::ValueType::kF64:
results[i] = Val(packer->Pop<double>());
break;
- case i::wasm::ValueType::kAnyRef:
- case i::wasm::ValueType::kFuncRef:
- case i::wasm::ValueType::kNullRef: {
- i::Address raw = packer->Pop<i::Address>();
- i::Handle<i::Object> obj(i::Object(raw), store->i_isolate());
- DCHECK_IMPLIES(type == i::wasm::kWasmNullRef, obj->IsNull());
- results[i] = Val(V8RefValueToWasm(store, obj));
- break;
- }
- case i::wasm::ValueType::kExnRef:
- // TODO(jkummerow): Implement these.
- UNIMPLEMENTED();
+ case i::wasm::ValueType::kRef:
+ case i::wasm::ValueType::kOptRef:
+ switch (type.heap_type()) {
+ case i::wasm::kHeapExtern:
+ case i::wasm::kHeapFunc: {
+ i::Address raw = packer->Pop<i::Address>();
+ i::Handle<i::Object> obj(i::Object(raw), store->i_isolate());
+ results[i] = Val(V8RefValueToWasm(store, obj));
+ break;
+ }
+ default:
+ // TODO(jkummerow): Implement these.
+ UNIMPLEMENTED();
+ }
break;
default:
+ // TODO(7748): Implement these.
UNIMPLEMENTED();
}
}
@@ -1662,7 +1661,7 @@ struct implement<Global> {
using type = RefImpl<Global, i::WasmGlobalObject>;
};
-Global::~Global() {}
+Global::~Global() = default;
auto Global::copy() const -> own<Global> { return impl(this)->copy(); }
@@ -1707,14 +1706,21 @@ auto Global::get() const -> Val {
return Val(v8_global->GetF32());
case i::wasm::ValueType::kF64:
return Val(v8_global->GetF64());
- case i::wasm::ValueType::kAnyRef:
- case i::wasm::ValueType::kFuncRef: {
- StoreImpl* store = impl(this)->store();
- i::HandleScope scope(store->i_isolate());
- return Val(V8RefValueToWasm(store, v8_global->GetRef()));
- }
+ case i::wasm::ValueType::kRef:
+ case i::wasm::ValueType::kOptRef:
+ switch (v8_global->type().heap_type()) {
+ case i::wasm::kHeapExtern:
+ case i::wasm::kHeapFunc: {
+ StoreImpl* store = impl(this)->store();
+ i::HandleScope scope(store->i_isolate());
+ return Val(V8RefValueToWasm(store, v8_global->GetRef()));
+ }
+ default:
+ // TODO(wasm+): Support new value types.
+ UNREACHABLE();
+ }
default:
- // TODO(wasm+): support new value types
+ // TODO(7748): Implement these.
UNREACHABLE();
}
}
@@ -1731,7 +1737,7 @@ void Global::set(const Val& val) {
case F64:
return v8_global->SetF64(val.f64());
case ANYREF:
- return v8_global->SetAnyRef(
+ return v8_global->SetExternRef(
WasmRefToV8(impl(this)->store()->i_isolate(), val.ref()));
case FUNCREF: {
i::Isolate* isolate = impl(this)->store()->i_isolate();
@@ -1754,7 +1760,7 @@ struct implement<Table> {
using type = RefImpl<Table, i::WasmTableObject>;
};
-Table::~Table() {}
+Table::~Table() = default;
auto Table::copy() const -> own<Table> { return impl(this)->copy(); }
@@ -1772,8 +1778,8 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
break;
case ANYREF:
// See Engine::make().
- DCHECK(i::wasm::WasmFeatures::FromFlags().has_anyref());
- i_type = i::wasm::kWasmAnyRef;
+ DCHECK(i::wasm::WasmFeatures::FromFlags().has_reftypes());
+ i_type = i::wasm::kWasmExternRef;
break;
default:
UNREACHABLE();
@@ -1815,11 +1821,11 @@ auto Table::type() const -> own<TableType> {
uint32_t max;
if (!table->maximum_length().ToUint32(&max)) max = 0xFFFFFFFFu;
ValKind kind;
- switch (table->type().kind()) {
- case i::wasm::ValueType::kFuncRef:
+ switch (table->type().heap_type()) {
+ case i::wasm::kHeapFunc:
kind = FUNCREF;
break;
- case i::wasm::ValueType::kAnyRef:
+ case i::wasm::kHeapExtern:
kind = ANYREF;
break;
default:
@@ -1873,7 +1879,7 @@ struct implement<Memory> {
using type = RefImpl<Memory, i::WasmMemoryObject>;
};
-Memory::~Memory() {}
+Memory::~Memory() = default;
auto Memory::copy() const -> own<Memory> { return impl(this)->copy(); }
@@ -1941,7 +1947,7 @@ struct implement<Instance> {
using type = RefImpl<Instance, i::WasmInstanceObject>;
};
-Instance::~Instance() {}
+Instance::~Instance() = default;
auto Instance::copy() const -> own<Instance> { return impl(this)->copy(); }
diff --git a/chromium/v8/src/wasm/c-api.h b/chromium/v8/src/wasm/c-api.h
index 43a0fb73b2d..426806f1d20 100644
--- a/chromium/v8/src/wasm/c-api.h
+++ b/chromium/v8/src/wasm/c-api.h
@@ -43,7 +43,7 @@ class StoreImpl {
private:
friend own<Store> Store::make(Engine*);
- StoreImpl() {}
+ StoreImpl() = default;
v8::Isolate::CreateParams create_params_;
v8::Isolate* isolate_ = nullptr;
diff --git a/chromium/v8/src/wasm/decoder.h b/chromium/v8/src/wasm/decoder.h
index 695960086e1..8e6afe67f0a 100644
--- a/chromium/v8/src/wasm/decoder.h
+++ b/chromium/v8/src/wasm/decoder.h
@@ -137,7 +137,6 @@ class Decoder {
if (length == nullptr) {
length = &unused_length;
}
- DCHECK(WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(*pc)));
uint32_t index;
if (*pc == WasmOpcode::kSimdPrefix) {
// SIMD opcodes can be multiple bytes (when LEB128 encoded).
diff --git a/chromium/v8/src/wasm/function-body-decoder-impl.h b/chromium/v8/src/wasm/function-body-decoder-impl.h
index 48b804a3a92..d038a7c8d52 100644
--- a/chromium/v8/src/wasm/function-body-decoder-impl.h
+++ b/chromium/v8/src/wasm/function-body-decoder-impl.h
@@ -18,6 +18,7 @@
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-subtyping.h"
namespace v8 {
namespace internal {
@@ -42,7 +43,7 @@ struct WasmException;
}())
#define CHECK_PROTOTYPE_OPCODE_GEN(feat, opt_break) \
- DCHECK(!this->module_ || this->module_->origin == kWasmOrigin); \
+ DCHECK(this->module_->origin == kWasmOrigin); \
if (!this->enabled_.has_##feat()) { \
this->error("Invalid opcode (enable with --experimental-wasm-" #feat ")"); \
opt_break \
@@ -128,6 +129,138 @@ struct WasmException;
V(I64AtomicStore16U, Uint16) \
V(I64AtomicStore32U, Uint32)
+namespace value_type_reader {
+
+// Read a value type starting at address 'pc' in 'decoder'.
+// No bytes are consumed. The result is written into the 'result' parameter.
+// Returns the amount of bytes read, or 0 if decoding failed.
+// Registers an error if the type opcode is invalid iff validate is set.
+template <Decoder::ValidateFlag validate>
+ValueType read_value_type(Decoder* decoder, const byte* pc,
+ uint32_t* const length, const WasmFeatures& enabled) {
+ *length = 1;
+ byte val = decoder->read_u8<validate>(pc, "value type opcode");
+ if (decoder->failed()) {
+ return kWasmBottom;
+ }
+
+ ValueTypeCode code = static_cast<ValueTypeCode>(val);
+
+#define REF_TYPE_CASE(heap_type, nullable, feature) \
+ case kLocal##heap_type##Ref: { \
+ ValueType result = ValueType::Ref(kHeap##heap_type, nullable); \
+ if (enabled.has_##feature()) { \
+ return result; \
+ } \
+ decoder->errorf( \
+ pc, "invalid value type '%s', enable with --experimental-wasm-%s", \
+ result.type_name().c_str(), #feature); \
+ return kWasmBottom; \
+ }
+
+ switch (code) {
+ REF_TYPE_CASE(Func, kNullable, reftypes)
+ REF_TYPE_CASE(Extern, kNullable, reftypes)
+ REF_TYPE_CASE(Eq, kNullable, gc)
+ REF_TYPE_CASE(Exn, kNullable, eh)
+ case kLocalI32:
+ return kWasmI32;
+ case kLocalI64:
+ return kWasmI64;
+ case kLocalF32:
+ return kWasmF32;
+ case kLocalF64:
+ return kWasmF64;
+ case kLocalRef:
+ case kLocalOptRef: {
+ // Set length for the macro-defined cases:
+ *length += 1;
+ Nullability nullability = code == kLocalOptRef ? kNullable : kNonNullable;
+ uint8_t heap_index = decoder->read_u8<validate>(pc + 1, "heap type");
+ switch (static_cast<ValueTypeCode>(heap_index)) {
+ REF_TYPE_CASE(Func, nullability, typed_funcref)
+ REF_TYPE_CASE(Extern, nullability, typed_funcref)
+ REF_TYPE_CASE(Eq, nullability, gc)
+ REF_TYPE_CASE(Exn, nullability, eh)
+ default:
+ uint32_t type_index =
+ decoder->read_u32v<validate>(pc + 1, length, "type index");
+ *length += 1;
+ if (!enabled.has_gc()) {
+ decoder->error(
+ pc,
+ "invalid value type '(ref [null] (type $t))', enable with "
+ "--experimental-wasm-typed-gc");
+ return kWasmBottom;
+ }
+
+ if (!VALIDATE(type_index < kV8MaxWasmTypes)) {
+ decoder->errorf(pc + 1,
+ "Type index %u is greater than the maximum "
+ "number %zu of type definitions supported by V8",
+ type_index, kV8MaxWasmTypes);
+ return kWasmBottom;
+ }
+ return ValueType::Ref(static_cast<HeapType>(type_index), nullability);
+ }
+ decoder->errorf(
+ pc,
+ "invalid value type '(ref%s $t)', enable with --experimental-wasm-gc",
+ nullability ? " null" : "");
+ return kWasmBottom;
+ }
+#undef REF_TYPE_CASE
+ case kLocalRtt:
+ if (enabled.has_gc()) {
+ uint32_t depth_length;
+ uint32_t depth =
+ decoder->read_u32v<validate>(pc + 1, &depth_length, "depth");
+ // TODO(7748): Introduce a proper limit.
+ const uint32_t kMaxRttSubtypingDepth = 7;
+ if (!VALIDATE(depth <= kMaxRttSubtypingDepth)) {
+ decoder->errorf(pc,
+ "subtyping depth %u is greater than the maximum "
+ "depth %u supported by V8",
+ depth, kMaxRttSubtypingDepth);
+ return kWasmBottom;
+ }
+ uint32_t type_index = decoder->read_u32v<validate>(
+ pc + 1 + depth_length, length, "type index");
+ if (!VALIDATE(type_index < kV8MaxWasmTypes)) {
+ decoder->errorf(pc,
+ "Type index %u is greater than the maximum "
+ "number %zu of type definitions supported by V8",
+ type_index, kV8MaxWasmTypes);
+ return kWasmBottom;
+ }
+ *length += 1 + depth_length;
+ return ValueType::Rtt(static_cast<HeapType>(type_index),
+ static_cast<uint8_t>(depth));
+ }
+ decoder->error(
+ pc, "invalid value type 'rtt', enable with --experimental-wasm-gc");
+ return kWasmBottom;
+ case kLocalS128:
+ if (enabled.has_simd()) {
+ return kWasmS128;
+ }
+ decoder->error(
+ pc,
+ "invalid value type 'Simd128', enable with --experimental-wasm-simd");
+ return kWasmBottom;
+ case kLocalVoid:
+ case kLocalI8:
+ case kLocalI16:
+ // Although these types are included in ValueType, they are technically
+ // not value types and are only used in specific contexts. The caller of
+ // this function is responsible to check for them separately.
+ break;
+ }
+ // Malformed modules specifying invalid types can get here.
+ return kWasmBottom;
+}
+} // namespace value_type_reader
+
// Helpers for decoding different kinds of immediates which follow bytecodes.
template <Decoder::ValidateFlag validate>
struct LocalIndexImmediate {
@@ -174,7 +307,9 @@ struct ImmF32Immediate {
float value;
uint32_t length = 4;
inline ImmF32Immediate(Decoder* decoder, const byte* pc) {
- // Avoid bit_cast because it might not preserve the signalling bit of a NaN.
+ // We can't use bit_cast here because calling any helper function that
+ // returns a float would potentially flip NaN bits per C++ semantics, so we
+ // have to inline the memcpy call directly.
uint32_t tmp = decoder->read_u32<validate>(pc + 1, "immf32");
memcpy(&value, &tmp, sizeof(value));
}
@@ -192,6 +327,17 @@ struct ImmF64Immediate {
};
template <Decoder::ValidateFlag validate>
+struct RefNullImmediate {
+ ValueType type;
+ uint32_t length = 1;
+ inline RefNullImmediate(const WasmFeatures& enabled, Decoder* decoder,
+ const byte* pc) {
+ type = value_type_reader::read_value_type<validate>(decoder, pc + 1,
+ &length, enabled);
+ }
+};
+
+template <Decoder::ValidateFlag validate>
struct GlobalIndexImmediate {
uint32_t index;
ValueType type = kWasmStmt;
@@ -203,135 +349,6 @@ struct GlobalIndexImmediate {
}
};
-namespace value_type_reader {
-
-// Read a value type starting at address 'pc' in 'decoder'.
-// No bytes are consumed. The result is written into the 'result' parameter.
-// Returns the amount of bytes read, or 0 if decoding failed.
-// Registers an error if the type opcode is invalid iff validate is set.
-template <Decoder::ValidateFlag validate>
-uint32_t read_value_type(Decoder* decoder, const byte* pc, ValueType* result,
- const WasmFeatures& enabled) {
- byte val = decoder->read_u8<validate>(pc, "value type opcode");
- if (decoder->failed()) return 0;
-
- ValueTypeCode code = static_cast<ValueTypeCode>(val);
- switch (code) {
- case kLocalI32:
- *result = kWasmI32;
- return 1;
- case kLocalI64:
- *result = kWasmI64;
- return 1;
- case kLocalF32:
- *result = kWasmF32;
- return 1;
- case kLocalF64:
- *result = kWasmF64;
- return 1;
- case kLocalAnyRef:
- if (enabled.has_anyref()) {
- *result = kWasmAnyRef;
- return 1;
- }
- decoder->error(pc,
- "invalid value type 'anyref', enable with "
- "--experimental-wasm-anyref");
- return 0;
- case kLocalFuncRef:
- if (enabled.has_anyref()) {
- *result = kWasmFuncRef;
- return 1;
- }
- decoder->error(pc,
- "invalid value type 'funcref', enable with "
- "--experimental-wasm-anyref");
- return 0;
- case kLocalNullRef:
- if (enabled.has_anyref()) {
- *result = kWasmNullRef;
- return 1;
- }
- decoder->error(pc,
- "invalid value type 'nullref', enable with "
- "--experimental-wasm-anyref");
- return 0;
- case kLocalExnRef:
- if (enabled.has_eh()) {
- *result = kWasmExnRef;
- return 1;
- }
- decoder->error(pc,
- "invalid value type 'exception ref', enable with "
- "--experimental-wasm-eh");
- return 0;
- case kLocalRef:
- if (enabled.has_gc()) {
- uint32_t length;
- uint32_t type_index =
- decoder->read_u32v<validate>(pc + 1, &length, "type index");
- *result = ValueType(ValueType::kRef, type_index);
- return length + 1;
- }
- decoder->error(pc,
- "invalid value type 'ref', enable with "
- "--experimental-wasm-gc");
- return 0;
- case kLocalOptRef:
- if (enabled.has_gc()) {
- uint32_t length;
- uint32_t type_index =
- decoder->read_u32v<validate>(pc + 1, &length, "type index");
- *result = ValueType(ValueType::kOptRef, type_index);
- return length + 1;
- }
- decoder->error(pc,
- "invalid value type 'optref', enable with "
- "--experimental-wasm-gc");
- return 0;
- case kLocalEqRef:
- if (enabled.has_gc()) {
- *result = kWasmEqRef;
- return 1;
- }
- decoder->error(pc,
- "invalid value type 'eqref', enable with "
- "--experimental-wasm-simd");
- return 0;
- case kLocalI31Ref:
- if (enabled.has_gc()) {
- // TODO(7748): Implement
- decoder->error(pc, "'i31ref' is unimplemented");
- }
- decoder->error(pc,
- "invalid value type 'i31ref', enable with "
- "--experimental-wasm-simd");
- return 0;
- case kLocalRttRef:
- if (enabled.has_gc()) {
- // TODO(7748): Implement
- decoder->error(pc, "'rttref' is unimplemented");
- }
- decoder->error(pc,
- "invalid value type 'rttref', enable with "
- "--experimental-wasm-simd");
- return 0;
- case kLocalS128:
- if (enabled.has_simd()) {
- *result = kWasmS128;
- return 1;
- }
- decoder->error(pc,
- "invalid value type 'Simd128', enable with "
- "--experimental-wasm-simd");
- return 0;
- default:
- *result = kWasmBottom;
- return 0;
- }
-}
-} // namespace value_type_reader
-
template <Decoder::ValidateFlag validate>
struct SelectTypeImmediate {
uint32_t length;
@@ -346,10 +363,11 @@ struct SelectTypeImmediate {
pc + 1, "Invalid number of types. Select accepts exactly one type");
return;
}
- uint32_t type_length = value_type_reader::read_value_type<validate>(
- decoder, pc + length + 1, &type, enabled);
+ uint32_t type_length;
+ type = value_type_reader::read_value_type<validate>(
+ decoder, pc + length + 1, &type_length, enabled);
length += type_length;
- if (type_length == 0) {
+ if (type == kWasmBottom) {
decoder->error(pc + 1, "invalid select type");
}
}
@@ -368,9 +386,9 @@ struct BlockTypeImmediate {
// 1st case: void block. Struct fields stay at default values.
return;
}
- length = value_type_reader::read_value_type<validate>(decoder, pc + 1,
- &type, enabled);
- if (length > 0) {
+ type = value_type_reader::read_value_type<validate>(decoder, pc + 1,
+ &length, enabled);
+ if (type != kWasmBottom) {
// 2nd case: block with val type immediate.
return;
}
@@ -497,6 +515,17 @@ struct ArrayIndexImmediate {
}
};
+// TODO(jkummerow): Make this a superclass of StructIndexImmediate and
+// ArrayIndexImmediate? Maybe even FunctionIndexImmediate too?
+template <Decoder::ValidateFlag validate>
+struct TypeIndexImmediate {
+ uint32_t index = 0;
+ uint32_t length = 0;
+ inline TypeIndexImmediate(Decoder* decoder, const byte* pc) {
+ index = decoder->read_u32v<validate>(pc, &length, "type index");
+ }
+};
+
template <Decoder::ValidateFlag validate>
struct CallIndirectImmediate {
uint32_t table_index;
@@ -509,7 +538,7 @@ struct CallIndirectImmediate {
sig_index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
TableIndexImmediate<validate> table(decoder, pc + len);
if (!VALIDATE((table.index == 0 && table.length == 1) ||
- enabled.has_anyref())) {
+ enabled.has_reftypes())) {
decoder->errorf(pc + 1 + len, "expected table index 0, found %u",
table.index);
}
@@ -733,7 +762,7 @@ struct Merge {
// Reachability::kReachable.
bool reached;
- Merge(bool reached = false) : reached(reached) {}
+ explicit Merge(bool reached = false) : reached(reached) {}
Value& operator[](uint32_t i) {
DCHECK_GT(arity, i);
@@ -746,6 +775,7 @@ enum ControlKind : uint8_t {
kControlIfElse,
kControlBlock,
kControlLoop,
+ kControlLet,
kControlTry,
kControlTryCatch
};
@@ -763,6 +793,7 @@ enum Reachability : uint8_t {
template <typename Value>
struct ControlBase {
ControlKind kind = kControlBlock;
+ uint32_t locals_count = 0;
uint32_t stack_depth = 0; // stack height at the beginning of the construct.
const uint8_t* pc = nullptr;
Reachability reachability = kReachable;
@@ -773,13 +804,16 @@ struct ControlBase {
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(ControlBase);
- ControlBase(ControlKind kind, uint32_t stack_depth, const uint8_t* pc,
- Reachability reachability)
+ ControlBase(ControlKind kind, uint32_t locals_count, uint32_t stack_depth,
+ const uint8_t* pc, Reachability reachability)
: kind(kind),
+ locals_count(locals_count),
stack_depth(stack_depth),
pc(pc),
reachability(reachability),
- start_merge(reachability == kReachable) {}
+ start_merge(reachability == kReachable) {
+ DCHECK(kind == kControlLet || locals_count == 0);
+ }
// Check whether the current block is reachable.
bool reachable() const { return reachability == kReachable; }
@@ -799,6 +833,7 @@ struct ControlBase {
bool is_onearmed_if() const { return kind == kControlIf; }
bool is_if_else() const { return kind == kControlIfElse; }
bool is_block() const { return kind == kControlBlock; }
+ bool is_let() const { return kind == kControlLet; }
bool is_loop() const { return kind == kControlLoop; }
bool is_incomplete_try() const { return kind == kControlTry; }
bool is_try_catch() const { return kind == kControlTryCatch; }
@@ -809,122 +844,120 @@ struct ControlBase {
}
};
-enum class LoadTransformationKind : uint8_t {
- kSplat,
- kExtend,
-};
-
// This is the list of callback functions that an interface for the
// WasmFullDecoder should implement.
// F(Name, args...)
-#define INTERFACE_FUNCTIONS(F) \
- /* General: */ \
- F(StartFunction) \
- F(StartFunctionBody, Control* block) \
- F(FinishFunction) \
- F(OnFirstError) \
- F(NextInstruction, WasmOpcode) \
- /* Control: */ \
- F(Block, Control* block) \
- F(Loop, Control* block) \
- F(Try, Control* block) \
- F(Catch, Control* block, Value* exception) \
- F(If, const Value& cond, Control* if_block) \
- F(FallThruTo, Control* c) \
- F(PopControl, Control* block) \
- F(EndControl, Control* block) \
- /* Instructions: */ \
- F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
- F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
- Value* result) \
- F(I32Const, Value* result, int32_t value) \
- F(I64Const, Value* result, int64_t value) \
- F(F32Const, Value* result, float value) \
- F(F64Const, Value* result, double value) \
- F(RefNull, Value* result) \
- F(RefFunc, uint32_t function_index, Value* result) \
- F(RefAsNonNull, const Value& arg, Value* result) \
- F(Drop, const Value& value) \
- F(DoReturn, Vector<Value> values) \
- F(LocalGet, Value* result, const LocalIndexImmediate<validate>& imm) \
- F(LocalSet, const Value& value, const LocalIndexImmediate<validate>& imm) \
- F(LocalTee, const Value& value, Value* result, \
- const LocalIndexImmediate<validate>& imm) \
- F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \
- F(GlobalSet, const Value& value, const GlobalIndexImmediate<validate>& imm) \
- F(TableGet, const Value& index, Value* result, \
- const TableIndexImmediate<validate>& imm) \
- F(TableSet, const Value& index, const Value& value, \
- const TableIndexImmediate<validate>& imm) \
- F(Unreachable) \
- F(Select, const Value& cond, const Value& fval, const Value& tval, \
- Value* result) \
- F(Br, Control* target) \
- F(BrIf, const Value& cond, uint32_t depth) \
- F(BrTable, const BranchTableImmediate<validate>& imm, const Value& key) \
- F(Else, Control* if_block) \
- F(LoadMem, LoadType type, const MemoryAccessImmediate<validate>& imm, \
- const Value& index, Value* result) \
- F(LoadTransform, LoadType type, LoadTransformationKind transform, \
- MemoryAccessImmediate<validate>& imm, const Value& index, Value* result) \
- F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
- const Value& index, const Value& value) \
- F(CurrentMemoryPages, Value* result) \
- F(MemoryGrow, const Value& value, Value* result) \
- F(CallDirect, const CallFunctionImmediate<validate>& imm, \
- const Value args[], Value returns[]) \
- F(CallIndirect, const Value& index, \
- const CallIndirectImmediate<validate>& imm, const Value args[], \
- Value returns[]) \
- F(ReturnCall, const CallFunctionImmediate<validate>& imm, \
- const Value args[]) \
- F(ReturnCallIndirect, const Value& index, \
- const CallIndirectImmediate<validate>& imm, const Value args[]) \
- F(BrOnNull, const Value& ref_object, uint32_t depth) \
- F(SimdOp, WasmOpcode opcode, Vector<Value> args, Value* result) \
- F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
- const Vector<Value> inputs, Value* result) \
- F(Simd8x16ShuffleOp, const Simd8x16ShuffleImmediate<validate>& imm, \
- const Value& input0, const Value& input1, Value* result) \
- F(Throw, const ExceptionIndexImmediate<validate>& imm, \
- const Vector<Value>& args) \
- F(Rethrow, const Value& exception) \
- F(BrOnException, const Value& exception, \
- const ExceptionIndexImmediate<validate>& imm, uint32_t depth, \
- Vector<Value> values) \
- F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
- const MemoryAccessImmediate<validate>& imm, Value* result) \
- F(AtomicFence) \
- F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
- const Value& src, const Value& size) \
- F(DataDrop, const DataDropImmediate<validate>& imm) \
- F(MemoryCopy, const MemoryCopyImmediate<validate>& imm, const Value& dst, \
- const Value& src, const Value& size) \
- F(MemoryFill, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
- const Value& value, const Value& size) \
- F(TableInit, const TableInitImmediate<validate>& imm, Vector<Value> args) \
- F(ElemDrop, const ElemDropImmediate<validate>& imm) \
- F(TableCopy, const TableCopyImmediate<validate>& imm, Vector<Value> args) \
- F(TableGrow, const TableIndexImmediate<validate>& imm, const Value& value, \
- const Value& delta, Value* result) \
- F(TableSize, const TableIndexImmediate<validate>& imm, Value* result) \
- F(TableFill, const TableIndexImmediate<validate>& imm, const Value& start, \
- const Value& value, const Value& count) \
- F(StructNew, const StructIndexImmediate<validate>& imm, const Value args[], \
- Value* result) \
- F(StructGet, const Value& struct_object, \
- const FieldIndexImmediate<validate>& field, Value* result) \
- F(StructSet, const Value& struct_object, \
- const FieldIndexImmediate<validate>& field, const Value& field_value) \
- F(ArrayNew, const ArrayIndexImmediate<validate>& imm, const Value& length, \
- const Value& initial_value, Value* result) \
- F(ArrayGet, const Value& array_obj, \
- const ArrayIndexImmediate<validate>& imm, const Value& index, \
- Value* result) \
- F(ArraySet, const Value& array_obj, \
- const ArrayIndexImmediate<validate>& imm, const Value& index, \
- const Value& value) \
- F(ArrayLen, const Value& array_obj, Value* result) \
+#define INTERFACE_FUNCTIONS(F) \
+ /* General: */ \
+ F(StartFunction) \
+ F(StartFunctionBody, Control* block) \
+ F(FinishFunction) \
+ F(OnFirstError) \
+ F(NextInstruction, WasmOpcode) \
+ /* Control: */ \
+ F(Block, Control* block) \
+ F(Loop, Control* block) \
+ F(Try, Control* block) \
+ F(Catch, Control* block, Value* exception) \
+ F(If, const Value& cond, Control* if_block) \
+ F(FallThruTo, Control* c) \
+ F(PopControl, Control* block) \
+ F(EndControl, Control* block) \
+ /* Instructions: */ \
+ F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
+ F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
+ Value* result) \
+ F(I32Const, Value* result, int32_t value) \
+ F(I64Const, Value* result, int64_t value) \
+ F(F32Const, Value* result, float value) \
+ F(F64Const, Value* result, double value) \
+ F(RefNull, Value* result) \
+ F(RefFunc, uint32_t function_index, Value* result) \
+ F(RefAsNonNull, const Value& arg, Value* result) \
+ F(Drop, const Value& value) \
+ F(DoReturn, Vector<Value> values) \
+ F(LocalGet, Value* result, const LocalIndexImmediate<validate>& imm) \
+ F(LocalSet, const Value& value, const LocalIndexImmediate<validate>& imm) \
+ F(LocalTee, const Value& value, Value* result, \
+ const LocalIndexImmediate<validate>& imm) \
+ F(AllocateLocals, Vector<Value> local_values) \
+ F(DeallocateLocals, uint32_t count) \
+ F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \
+ F(GlobalSet, const Value& value, const GlobalIndexImmediate<validate>& imm) \
+ F(TableGet, const Value& index, Value* result, \
+ const TableIndexImmediate<validate>& imm) \
+ F(TableSet, const Value& index, const Value& value, \
+ const TableIndexImmediate<validate>& imm) \
+ F(Unreachable) \
+ F(Select, const Value& cond, const Value& fval, const Value& tval, \
+ Value* result) \
+ F(Br, Control* target) \
+ F(BrIf, const Value& cond, uint32_t depth) \
+ F(BrTable, const BranchTableImmediate<validate>& imm, const Value& key) \
+ F(Else, Control* if_block) \
+ F(LoadMem, LoadType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, Value* result) \
+ F(LoadTransform, LoadType type, LoadTransformationKind transform, \
+ MemoryAccessImmediate<validate>& imm, const Value& index, Value* result) \
+ F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, const Value& value) \
+ F(CurrentMemoryPages, Value* result) \
+ F(MemoryGrow, const Value& value, Value* result) \
+ F(CallDirect, const CallFunctionImmediate<validate>& imm, \
+ const Value args[], Value returns[]) \
+ F(CallIndirect, const Value& index, \
+ const CallIndirectImmediate<validate>& imm, const Value args[], \
+ Value returns[]) \
+ F(ReturnCall, const CallFunctionImmediate<validate>& imm, \
+ const Value args[]) \
+ F(ReturnCallIndirect, const Value& index, \
+ const CallIndirectImmediate<validate>& imm, const Value args[]) \
+ F(BrOnNull, const Value& ref_object, uint32_t depth) \
+ F(SimdOp, WasmOpcode opcode, Vector<Value> args, Value* result) \
+ F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
+ const Vector<Value> inputs, Value* result) \
+ F(Simd8x16ShuffleOp, const Simd8x16ShuffleImmediate<validate>& imm, \
+ const Value& input0, const Value& input1, Value* result) \
+ F(Throw, const ExceptionIndexImmediate<validate>& imm, \
+ const Vector<Value>& args) \
+ F(Rethrow, const Value& exception) \
+ F(BrOnException, const Value& exception, \
+ const ExceptionIndexImmediate<validate>& imm, uint32_t depth, \
+ Vector<Value> values) \
+ F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
+ const MemoryAccessImmediate<validate>& imm, Value* result) \
+ F(AtomicFence) \
+ F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
+ const Value& src, const Value& size) \
+ F(DataDrop, const DataDropImmediate<validate>& imm) \
+ F(MemoryCopy, const MemoryCopyImmediate<validate>& imm, const Value& dst, \
+ const Value& src, const Value& size) \
+ F(MemoryFill, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
+ const Value& value, const Value& size) \
+ F(TableInit, const TableInitImmediate<validate>& imm, Vector<Value> args) \
+ F(ElemDrop, const ElemDropImmediate<validate>& imm) \
+ F(TableCopy, const TableCopyImmediate<validate>& imm, Vector<Value> args) \
+ F(TableGrow, const TableIndexImmediate<validate>& imm, const Value& value, \
+ const Value& delta, Value* result) \
+ F(TableSize, const TableIndexImmediate<validate>& imm, Value* result) \
+ F(TableFill, const TableIndexImmediate<validate>& imm, const Value& start, \
+ const Value& value, const Value& count) \
+ F(StructNew, const StructIndexImmediate<validate>& imm, const Value args[], \
+ Value* result) \
+ F(StructGet, const Value& struct_object, \
+ const FieldIndexImmediate<validate>& field, bool is_signed, Value* result) \
+ F(StructSet, const Value& struct_object, \
+ const FieldIndexImmediate<validate>& field, const Value& field_value) \
+ F(ArrayNew, const ArrayIndexImmediate<validate>& imm, const Value& length, \
+ const Value& initial_value, Value* result) \
+ F(ArrayGet, const Value& array_obj, \
+ const ArrayIndexImmediate<validate>& imm, const Value& index, \
+ bool is_signed, Value* result) \
+ F(ArraySet, const Value& array_obj, \
+ const ArrayIndexImmediate<validate>& imm, const Value& index, \
+ const Value& value) \
+ F(ArrayLen, const Value& array_obj, Value* result) \
+ F(RttCanon, const TypeIndexImmediate<validate>& imm, Value* result) \
F(PassThrough, const Value& from, Value* to)
// Generic Wasm bytecode decoder with utilities for decoding immediates,
@@ -954,44 +987,81 @@ class WasmDecoder : public Decoder {
: static_cast<uint32_t>(local_types_->size());
}
- static bool DecodeLocals(const WasmFeatures& enabled, Decoder* decoder,
- const FunctionSig* sig,
- ZoneVector<ValueType>* type_list) {
- DCHECK_NOT_NULL(type_list);
- DCHECK_EQ(0, type_list->size());
- // Initialize from signature.
- if (sig != nullptr) {
- type_list->assign(sig->parameters().begin(), sig->parameters().end());
+ void InitializeLocalsFromSig() {
+ if (sig_ != nullptr) {
+ local_types_->assign(sig_->parameters().begin(),
+ sig_->parameters().end());
}
+ }
+
+ // Decodes local definitions in the current decoder.
+ // Returns true iff locals are found.
+ // Writes the total length of decoded locals in 'total_length'.
+ // If insert_postion is present, the decoded locals will be inserted into the
+ // 'local_types_' of this decoder. Otherwise, this function is used just to
+ // check validity and determine the encoding length of the locals in bytes.
+ // The decoder's pc is not advanced. If no locals are found (i.e., no
+ // compressed uint32 is found at pc), this will exit as 'false' and without an
+ // error.
+ bool DecodeLocals(const byte* pc, uint32_t* total_length,
+ const base::Optional<uint32_t> insert_position) {
+ DCHECK_NOT_NULL(local_types_);
+
+ uint32_t length;
+ *total_length = 0;
+
+ // The 'else' value is useless, we pass it for convenience.
+ ZoneVector<ValueType>::iterator insert_iterator =
+ insert_position.has_value()
+ ? local_types_->begin() + insert_position.value()
+ : local_types_->begin();
+
// Decode local declarations, if any.
- uint32_t entries = decoder->consume_u32v("local decls count");
- if (decoder->failed()) return false;
+ uint32_t entries = read_u32v<kValidate>(pc, &length, "local decls count");
+ if (failed()) {
+ error(pc + *total_length, "invalid local decls count");
+ return false;
+ }
+ *total_length += length;
TRACE("local decls count: %u\n", entries);
- while (entries-- > 0 && decoder->more()) {
- uint32_t count = decoder->consume_u32v("local count");
- if (decoder->failed()) return false;
- DCHECK_LE(type_list->size(), kV8MaxWasmFunctionLocals);
- if (count > kV8MaxWasmFunctionLocals - type_list->size()) {
- decoder->error(decoder->pc() - 1, "local count too large");
+ while (entries-- > 0) {
+ if (!more()) {
+ error(end(), "expected more local decls but reached end of input");
+ return false;
+ }
+ uint32_t count =
+ read_u32v<kValidate>(pc + *total_length, &length, "local count");
+ if (failed()) {
+ error(pc + *total_length, "invalid local count");
return false;
}
- ValueType type;
- uint32_t type_length = value_type_reader::read_value_type<validate>(
- decoder, decoder->pc(), &type, enabled);
- if (type_length == 0) {
- decoder->error(decoder->pc(), "invalid local type");
+ DCHECK_LE(local_types_->size(), kV8MaxWasmFunctionLocals);
+ if (count > kV8MaxWasmFunctionLocals - local_types_->size()) {
+ error(pc + *total_length, "local count too large");
return false;
}
- type_list->insert(type_list->end(), count, type);
- decoder->consume_bytes(type_length);
+ *total_length += length;
+
+ ValueType type = value_type_reader::read_value_type<kValidate>(
+ this, pc + *total_length, &length, enabled_);
+ if (type == kWasmBottom) {
+ error(pc + *total_length, "invalid local type");
+ return false;
+ }
+ *total_length += length;
+ if (insert_position.has_value()) {
+ // Move the insertion iterator to the end of the newly inserted locals.
+ insert_iterator =
+ local_types_->insert(insert_iterator, count, type) + count;
+ }
}
- DCHECK(decoder->ok());
+ DCHECK(ok());
return true;
}
- static BitVector* AnalyzeLoopAssignment(Decoder* decoder, const byte* pc,
+ static BitVector* AnalyzeLoopAssignment(WasmDecoder* decoder, const byte* pc,
uint32_t locals_count, Zone* zone) {
if (pc >= decoder->end()) return nullptr;
if (*pc != kExprLoop) return nullptr;
@@ -1055,11 +1125,17 @@ class WasmDecoder : public Decoder {
return true;
}
- inline bool Complete(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.index < module_->exceptions.size())) {
+ inline bool Validate(const byte* pc, RefNullImmediate<validate>& imm) {
+ if (!VALIDATE(imm.type.is_nullable())) {
+ errorf(pc + 1, "ref.null does not exist for %s",
+ imm.type.type_name().c_str());
return false;
}
+ return true;
+ }
+
+ inline bool Complete(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
+ if (!VALIDATE(imm.index < module_->exceptions.size())) return false;
imm.exception = &module_->exceptions[imm.index];
return true;
}
@@ -1073,7 +1149,7 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, GlobalIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr && imm.index < module_->globals.size())) {
+ if (!VALIDATE(imm.index < module_->globals.size())) {
errorf(pc + 1, "invalid global index: %u", imm.index);
return false;
}
@@ -1083,9 +1159,7 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(const byte* pc, StructIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr && module_->has_struct(imm.index))) {
- return false;
- }
+ if (!VALIDATE(module_->has_struct(imm.index))) return false;
imm.struct_type = module_->struct_type(imm.index);
return true;
}
@@ -1104,9 +1178,7 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(const byte* pc, ArrayIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr && module_->has_array(imm.index))) {
- return false;
- }
+ if (!VALIDATE(module_->has_array(imm.index))) return false;
imm.array_type = module_->array_type(imm.index);
return true;
}
@@ -1117,6 +1189,15 @@ class WasmDecoder : public Decoder {
return false;
}
+ inline bool Validate(const byte* pc, TypeIndexImmediate<validate>& imm) {
+ if (!VALIDATE(module_ != nullptr && (module_->has_struct(imm.index) ||
+ module_->has_array(imm.index)))) {
+ errorf(pc, "invalid type index: %u", imm.index);
+ return false;
+ }
+ return true;
+ }
+
inline bool CanReturnCall(const FunctionSig* target_sig) {
if (target_sig == nullptr) return false;
size_t num_returns = sig_->return_count();
@@ -1128,11 +1209,11 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(const byte* pc, CallFunctionImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.index < module_->functions.size())) {
- return false;
- }
+ if (!VALIDATE(imm.index < module_->functions.size())) return false;
imm.sig = module_->functions[imm.index].sig;
+ if (imm.sig->return_count() > 1) {
+ this->detected_->Add(kFeature_mv);
+ }
return true;
}
@@ -1145,22 +1226,20 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(const byte* pc, CallIndirectImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- module_->has_signature(imm.sig_index))) {
- return false;
- }
+ if (!VALIDATE(module_->has_signature(imm.sig_index))) return false;
imm.sig = module_->signature(imm.sig_index);
+ if (imm.sig->return_count() > 1) {
+ this->detected_->Add(kFeature_mv);
+ }
return true;
}
inline bool Validate(const byte* pc, CallIndirectImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.table_index < module_->tables.size())) {
+ if (!VALIDATE(imm.table_index < module_->tables.size())) {
error("function table has to exist to execute call_indirect");
return false;
}
- if (!VALIDATE(module_ != nullptr &&
- module_->tables[imm.table_index].type == kWasmFuncRef)) {
+ if (!VALIDATE(module_->tables[imm.table_index].type == kWasmFuncRef)) {
error("table of call_indirect must be of type funcref");
return false;
}
@@ -1235,7 +1314,7 @@ class WasmDecoder : public Decoder {
max_lane = std::max(max_lane, imm.shuffle[i]);
}
// Shuffle indices must be in [0..31] for a 16 lane shuffle.
- if (!VALIDATE(max_lane <= 2 * kSimd128Size)) {
+ if (!VALIDATE(max_lane < 2 * kSimd128Size)) {
error(pc_ + 2, "invalid shuffle mask");
return false;
}
@@ -1244,24 +1323,24 @@ class WasmDecoder : public Decoder {
inline bool Complete(BlockTypeImmediate<validate>& imm) {
if (imm.type != kWasmBottom) return true;
- if (!VALIDATE(module_ && module_->has_signature(imm.sig_index))) {
- return false;
- }
+ if (!VALIDATE(module_->has_signature(imm.sig_index))) return false;
imm.sig = module_->signature(imm.sig_index);
+ if (imm.sig->return_count() > 1) {
+ this->detected_->Add(kFeature_mv);
+ }
return true;
}
inline bool Validate(BlockTypeImmediate<validate>& imm) {
if (!Complete(imm)) {
errorf(pc_, "block type index %u out of bounds (%zu types)",
- imm.sig_index, module_ ? module_->types.size() : 0);
+ imm.sig_index, module_->types.size());
return false;
}
return true;
}
inline bool Validate(const byte* pc, FunctionIndexImmediate<validate>& imm) {
- if (!module_) return true;
if (!VALIDATE(imm.index < module_->functions.size())) {
errorf(pc, "invalid function index: %u", imm.index);
return false;
@@ -1274,7 +1353,7 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, MemoryIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr && module_->has_memory)) {
+ if (!VALIDATE(module_->has_memory)) {
errorf(pc + 1, "memory instruction with no memory");
return false;
}
@@ -1282,9 +1361,8 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(MemoryInitImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.data_segment_index <
- module_->num_declared_data_segments)) {
+ if (!VALIDATE(imm.data_segment_index <
+ module_->num_declared_data_segments)) {
errorf(pc_ + 2, "invalid data segment index: %u", imm.data_segment_index);
return false;
}
@@ -1294,8 +1372,7 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(DataDropImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.index < module_->num_declared_data_segments)) {
+ if (!VALIDATE(imm.index < module_->num_declared_data_segments)) {
errorf(pc_ + 2, "invalid data segment index: %u", imm.index);
return false;
}
@@ -1309,7 +1386,7 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, TableIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr && imm.index < module_->tables.size())) {
+ if (!VALIDATE(imm.index < module_->tables.size())) {
errorf(pc, "invalid table index: %u", imm.index);
return false;
}
@@ -1317,8 +1394,7 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(TableInitImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.elem_segment_index < module_->elem_segments.size())) {
+ if (!VALIDATE(imm.elem_segment_index < module_->elem_segments.size())) {
errorf(pc_ + 2, "invalid element segment index: %u",
imm.elem_segment_index);
return false;
@@ -1327,18 +1403,17 @@ class WasmDecoder : public Decoder {
return false;
}
ValueType elem_type = module_->elem_segments[imm.elem_segment_index].type;
- if (!VALIDATE(
- elem_type.IsSubTypeOf(module_->tables[imm.table.index].type))) {
+ if (!VALIDATE(IsSubtypeOf(elem_type, module_->tables[imm.table.index].type,
+ module_))) {
errorf(pc_ + 2, "table %u is not a super-type of %s", imm.table.index,
- elem_type.type_name());
+ elem_type.type_name().c_str());
return false;
}
return true;
}
inline bool Validate(ElemDropImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.index < module_->elem_segments.size())) {
+ if (!VALIDATE(imm.index < module_->elem_segments.size())) {
errorf(pc_ + 2, "invalid element segment index: %u", imm.index);
return false;
}
@@ -1349,16 +1424,16 @@ class WasmDecoder : public Decoder {
if (!Validate(pc_ + 1, imm.table_src)) return false;
if (!Validate(pc_ + 2, imm.table_dst)) return false;
ValueType src_type = module_->tables[imm.table_src.index].type;
- if (!VALIDATE(
- src_type.IsSubTypeOf(module_->tables[imm.table_dst.index].type))) {
+ if (!VALIDATE(IsSubtypeOf(
+ src_type, module_->tables[imm.table_dst.index].type, module_))) {
errorf(pc_ + 2, "table %u is not a super-type of %s", imm.table_dst.index,
- src_type.type_name());
+ src_type.type_name().c_str());
return false;
}
return true;
}
- static uint32_t OpcodeLength(Decoder* decoder, const byte* pc) {
+ static uint32_t OpcodeLength(WasmDecoder* decoder, const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
switch (opcode) {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
@@ -1403,6 +1478,15 @@ class WasmDecoder : public Decoder {
return 1 + imm.length;
}
+ case kExprLet: {
+ BlockTypeImmediate<validate> imm(WasmFeatures::All(), decoder, pc);
+ uint32_t locals_length;
+ bool locals_result =
+ decoder->DecodeLocals(decoder->pc() + 1 + imm.length,
+ &locals_length, base::Optional<uint32_t>());
+ return 1 + imm.length + (locals_result ? locals_length : 0);
+ }
+
case kExprThrow: {
ExceptionIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
@@ -1442,6 +1526,10 @@ class WasmDecoder : public Decoder {
return 1 + imm.length;
}
case kExprRefNull: {
+ RefNullImmediate<validate> imm(WasmFeatures::All(), decoder, pc);
+ return 1 + imm.length;
+ }
+ case kExprRefIsNull: {
return 1;
}
case kExprRefFunc: {
@@ -1594,13 +1682,27 @@ class WasmDecoder : public Decoder {
BranchDepthImmediate<validate> imm(decoder, pc + 2);
return 2 + imm.length;
}
- case kExprRttGet:
+ case kExprRttCanon: {
+ // TODO(7748): Introduce "HeapTypeImmediate" and use it here.
+ TypeIndexImmediate<validate> heaptype(decoder, pc + 2);
+ return 2 + heaptype.length;
+ }
case kExprRttSub: {
- // TODO(7748): Impelement.
- UNIMPLEMENTED();
+ // TODO(7748): Implement.
+ decoder->error(pc, "rtt.sub not implemented yet");
+ return 2;
}
+ case kExprI31New:
+ case kExprI31GetS:
+ case kExprI31GetU:
+ case kExprRefTest:
+ case kExprRefCast:
+ return 2;
+
default:
+ // This is unreachable except for malformed modules.
+ decoder->error(pc, "invalid gc opcode");
return 2;
}
}
@@ -1609,7 +1711,8 @@ class WasmDecoder : public Decoder {
}
}
- std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
+ // TODO(clemensb): This is only used by the interpreter; move there.
+ V8_EXPORT_PRIVATE std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
// Handle "simple" opcodes with a fixed signature first.
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
@@ -1631,6 +1734,7 @@ class WasmDecoder : public Decoder {
case kExprMemoryGrow:
case kExprRefAsNonNull:
case kExprBrOnNull:
+ case kExprRefIsNull:
return {1, 1};
case kExprLocalSet:
case kExprGlobalSet:
@@ -1682,6 +1786,9 @@ class WasmDecoder : public Decoder {
case kExprReturnCallIndirect:
case kExprUnreachable:
return {0, 0};
+ case kExprLet:
+ // TODO(7748): Implement
+ return {0, 0};
case kNumericPrefix:
case kAtomicPrefix:
case kSimdPrefix: {
@@ -1712,12 +1819,14 @@ class WasmDecoder : public Decoder {
};
#define CALL_INTERFACE(name, ...) interface_.name(this, ##__VA_ARGS__)
-#define CALL_INTERFACE_IF_REACHABLE(name, ...) \
- do { \
- DCHECK(!control_.empty()); \
- if (VALIDATE(this->ok()) && control_.back().reachable()) { \
- interface_.name(this, ##__VA_ARGS__); \
- } \
+#define CALL_INTERFACE_IF_REACHABLE(name, ...) \
+ do { \
+ DCHECK(!control_.empty()); \
+ DCHECK_EQ(current_code_reachable_, \
+ this->ok() && control_.back().reachable()); \
+ if (current_code_reachable_) { \
+ interface_.name(this, ##__VA_ARGS__); \
+ } \
} while (false)
#define CALL_INTERFACE_IF_PARENT_REACHABLE(name, ...) \
do { \
@@ -1765,8 +1874,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DCHECK_EQ(0, this->local_types_->size());
- WasmDecoder<validate>::DecodeLocals(this->enabled_, this, this->sig_,
- this->local_types_);
+ this->InitializeLocalsFromSig();
+ uint32_t locals_length;
+ this->DecodeLocals(this->pc(), &locals_length,
+ static_cast<uint32_t>(this->local_types_->size()));
+ this->consume_bytes(locals_length);
+
CALL_INTERFACE(StartFunction);
DecodeFunctionBody();
if (!this->failed()) CALL_INTERFACE(FinishFunction);
@@ -1839,6 +1952,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return &*(stack_.end() - depth);
}
+ void SetSucceedingCodeDynamicallyUnreachable() {
+ Control* current = &control_.back();
+ if (current->reachable()) {
+ current->reachability = kSpecOnlyReachable;
+ current_code_reachable_ = false;
+ }
+ }
+
private:
Zone* zone_;
@@ -1847,6 +1968,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ZoneVector<ValueType> local_type_vec_; // types of local variables.
ZoneVector<Value> stack_; // stack of values.
ZoneVector<Control> control_; // stack of blocks, loops, and ifs.
+ // Controls whether code should be generated for the current block (basically
+ // a cache for {ok() && control_.back().reachable()}).
+ bool current_code_reachable_ = true;
static Value UnreachableValue(const uint8_t* pc) {
return Value{pc, kWasmBottom};
@@ -1895,832 +2019,905 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int len_ = 0;
};
- // Decodes the body of a function.
- void DecodeFunctionBody() {
- TRACE("wasm-decode %p...%p (module+%u, %d bytes)\n", this->start(),
- this->end(), this->pc_offset(),
- static_cast<int>(this->end() - this->start()));
-
- // Set up initial function block.
- {
- auto* c = PushControl(kControlBlock);
- InitMerge(&c->start_merge, 0, [](uint32_t) -> Value { UNREACHABLE(); });
- InitMerge(&c->end_merge,
- static_cast<uint32_t>(this->sig_->return_count()),
- [&](uint32_t i) {
- return Value{this->pc_, this->sig_->GetReturn(i)};
- });
- CALL_INTERFACE(StartFunctionBody, c);
- }
-
- while (this->pc_ < this->end_) { // decoding loop.
- uint32_t len = 1;
- WasmOpcode opcode = static_cast<WasmOpcode>(*this->pc_);
-
- CALL_INTERFACE_IF_REACHABLE(NextInstruction, opcode);
+ // Helper to avoid calling member methods (which are more expensive to call
+ // indirectly).
+ template <WasmOpcode opcode>
+ static int DecodeOp(WasmFullDecoder* decoder) {
+ return decoder->DecodeOp<opcode>();
+ }
+ template <WasmOpcode opcode>
+ int DecodeOp() {
#if DEBUG
- TraceLine trace_msg;
+ TraceLine trace_msg;
#define TRACE_PART(...) trace_msg.Append(__VA_ARGS__)
- if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- }
+ if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
+ }
#else
#define TRACE_PART(...)
#endif
- switch (opcode) {
+ int len = 1;
+
+ // TODO(clemensb): Break this up into individual functions.
+ switch (opcode) {
#define BUILD_SIMPLE_OPCODE(op, _, sig) \
case kExpr##op: \
BuildSimpleOperator_##sig(opcode); \
break;
- FOREACH_SIMPLE_OPCODE(BUILD_SIMPLE_OPCODE)
+ FOREACH_SIMPLE_OPCODE(BUILD_SIMPLE_OPCODE)
#undef BUILD_SIMPLE_OPCODE
- case kExprNop:
- break;
- case kExprBlock: {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- auto args = PopArgs(imm.sig);
- auto* block = PushControl(kControlBlock);
- SetBlockType(block, imm, args.begin());
- CALL_INTERFACE_IF_REACHABLE(Block, block);
- PushMergeValues(block, &block->start_merge);
- len = 1 + imm.length;
- break;
- }
- case kExprRethrow: {
- CHECK_PROTOTYPE_OPCODE(eh);
- auto exception = Pop(0, kWasmExnRef);
- CALL_INTERFACE_IF_REACHABLE(Rethrow, exception);
- EndControl();
+ case kExprNop:
+ break;
+ case kExprBlock: {
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ ArgVector args = PopArgs(imm.sig);
+ Control* block = PushControl(kControlBlock);
+ SetBlockType(block, imm, args.begin());
+ CALL_INTERFACE_IF_REACHABLE(Block, block);
+ PushMergeValues(block, &block->start_merge);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprRethrow: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ Value exception = Pop(0, kWasmExnRef);
+ CALL_INTERFACE_IF_REACHABLE(Rethrow, exception);
+ EndControl();
+ break;
+ }
+ case kExprThrow: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ ExceptionIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ ArgVector args = PopArgs(imm.exception->ToFunctionSig());
+ CALL_INTERFACE_IF_REACHABLE(Throw, imm, VectorOf(args));
+ EndControl();
+ break;
+ }
+ case kExprTry: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ ArgVector args = PopArgs(imm.sig);
+ Control* try_block = PushControl(kControlTry);
+ SetBlockType(try_block, imm, args.begin());
+ len = 1 + imm.length;
+ CALL_INTERFACE_IF_REACHABLE(Try, try_block);
+ PushMergeValues(try_block, &try_block->start_merge);
+ break;
+ }
+ case kExprCatch: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ if (!VALIDATE(!control_.empty())) {
+ this->error("catch does not match any try");
break;
}
- case kExprThrow: {
- CHECK_PROTOTYPE_OPCODE(eh);
- ExceptionIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto args = PopArgs(imm.exception->ToFunctionSig());
- CALL_INTERFACE_IF_REACHABLE(Throw, imm, VectorOf(args));
- EndControl();
+ Control* c = &control_.back();
+ if (!VALIDATE(c->is_try())) {
+ this->error("catch does not match any try");
break;
}
- case kExprTry: {
- CHECK_PROTOTYPE_OPCODE(eh);
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- auto args = PopArgs(imm.sig);
- auto* try_block = PushControl(kControlTry);
- SetBlockType(try_block, imm, args.begin());
- len = 1 + imm.length;
- CALL_INTERFACE_IF_REACHABLE(Try, try_block);
- PushMergeValues(try_block, &try_block->start_merge);
+ if (!VALIDATE(c->is_incomplete_try())) {
+ this->error("catch already present for try");
break;
}
- case kExprCatch: {
- CHECK_PROTOTYPE_OPCODE(eh);
- if (!VALIDATE(!control_.empty())) {
- this->error("catch does not match any try");
- break;
- }
- Control* c = &control_.back();
- if (!VALIDATE(c->is_try())) {
- this->error("catch does not match any try");
- break;
- }
- if (!VALIDATE(c->is_incomplete_try())) {
- this->error("catch already present for try");
- break;
+ c->kind = kControlTryCatch;
+ FallThruTo(c);
+ stack_.erase(stack_.begin() + c->stack_depth, stack_.end());
+ c->reachability = control_at(1)->innerReachability();
+ current_code_reachable_ = this->ok() && c->reachable();
+ Value* exception = Push(kWasmExnRef);
+ CALL_INTERFACE_IF_PARENT_REACHABLE(Catch, c, exception);
+ break;
+ }
+ case kExprBrOnExn: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ BranchOnExceptionImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm.depth, control_.size())) break;
+ if (!this->Validate(this->pc_ + imm.depth.length, imm.index)) break;
+ Control* c = control_at(imm.depth.depth);
+ Value exception = Pop(0, kWasmExnRef);
+ const WasmExceptionSig* sig = imm.index.exception->sig;
+ size_t value_count = sig->parameter_count();
+ // TODO(wasm): This operand stack mutation is an ugly hack to make
+ // both type checking here as well as environment merging in the
+ // graph builder interface work out of the box. We should introduce
+ // special handling for both and do minimal/no stack mutation here.
+ for (size_t i = 0; i < value_count; ++i) Push(sig->GetParam(i));
+ Vector<Value> values(stack_.data() + c->stack_depth, value_count);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (this->failed()) break;
+ if (V8_LIKELY(check_result == kReachableBranch)) {
+ CALL_INTERFACE(BrOnException, exception, imm.index, imm.depth.depth,
+ values);
+ c->br_merge()->reached = true;
+ } else if (check_result == kInvalidStack) {
+ break;
+ }
+ len = 1 + imm.length;
+ for (size_t i = 0; i < value_count; ++i) Pop();
+ Value* pexception = Push(kWasmExnRef);
+ *pexception = exception;
+ break;
+ }
+ case kExprBrOnNull: {
+ CHECK_PROTOTYPE_OPCODE(typed_funcref);
+ BranchDepthImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+ len = 1 + imm.length;
+ Value ref_object = Pop();
+ if (this->failed()) break;
+ Control* c = control_at(imm.depth);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
+ switch (ref_object.type.kind()) {
+ case ValueType::kRef: {
+ Value* result = Push(ref_object.type);
+ CALL_INTERFACE(PassThrough, ref_object, result);
+ break;
+ }
+ case ValueType::kOptRef: {
+ // We need to Push the result value after calling BrOnNull on
+ // the interface. Therefore we must sync the ref_object and
+ // result nodes afterwards (in PassThrough).
+ CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
+ Value* result = Push(
+ ValueType::Ref(ref_object.type.heap_type(), kNonNullable));
+ CALL_INTERFACE(PassThrough, ref_object, result);
+ c->br_merge()->reached = true;
+ break;
+ }
+ default:
+ this->error(this->pc_,
+ "invalid agrument type to ref.as_non_null");
+ break;
}
- c->kind = kControlTryCatch;
- FallThruTo(c);
- stack_.erase(stack_.begin() + c->stack_depth, stack_.end());
- c->reachability = control_at(1)->innerReachability();
- auto* exception = Push(kWasmExnRef);
- CALL_INTERFACE_IF_PARENT_REACHABLE(Catch, c, exception);
- break;
}
- case kExprBrOnExn: {
- CHECK_PROTOTYPE_OPCODE(eh);
- BranchOnExceptionImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm.depth, control_.size())) break;
- if (!this->Validate(this->pc_ + imm.depth.length, imm.index)) break;
- Control* c = control_at(imm.depth.depth);
- auto exception = Pop(0, kWasmExnRef);
- const WasmExceptionSig* sig = imm.index.exception->sig;
- size_t value_count = sig->parameter_count();
- // TODO(wasm): This operand stack mutation is an ugly hack to make
- // both type checking here as well as environment merging in the
- // graph builder interface work out of the box. We should introduce
- // special handling for both and do minimal/no stack mutation here.
- for (size_t i = 0; i < value_count; ++i) Push(sig->GetParam(i));
- Vector<Value> values(stack_.data() + c->stack_depth, value_count);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE(BrOnException, exception, imm.index, imm.depth.depth,
- values);
- c->br_merge()->reached = true;
- } else if (check_result == kInvalidStack) {
- break;
- }
- len = 1 + imm.length;
- for (size_t i = 0; i < value_count; ++i) Pop();
- auto* pexception = Push(kWasmExnRef);
- *pexception = exception;
+ break;
+ }
+ case kExprLet: {
+ CHECK_PROTOTYPE_OPCODE(typed_funcref);
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ uint32_t current_local_count =
+ static_cast<uint32_t>(local_type_vec_.size());
+ // Temporarily add the let-defined values
+ // to the beginning of the function locals.
+ uint32_t locals_length;
+ if (!this->DecodeLocals(this->pc() + 1 + imm.length, &locals_length,
+ 0)) {
+ break;
+ }
+ len = 1 + imm.length + locals_length;
+ uint32_t locals_count =
+ static_cast<uint32_t>(local_type_vec_.size() - current_local_count);
+ ArgVector let_local_values =
+ PopArgs(static_cast<uint32_t>(imm.in_arity()),
+ VectorOf(local_type_vec_.data(), locals_count));
+ ArgVector args = PopArgs(imm.sig);
+ Control* let_block = PushControl(kControlLet, locals_count);
+ SetBlockType(let_block, imm, args.begin());
+ CALL_INTERFACE_IF_REACHABLE(Block, let_block);
+ PushMergeValues(let_block, &let_block->start_merge);
+ CALL_INTERFACE_IF_REACHABLE(AllocateLocals, VectorOf(let_local_values));
+ break;
+ }
+ case kExprLoop: {
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ ArgVector args = PopArgs(imm.sig);
+ Control* block = PushControl(kControlLoop);
+ SetBlockType(&control_.back(), imm, args.begin());
+ len = 1 + imm.length;
+ CALL_INTERFACE_IF_REACHABLE(Loop, block);
+ PushMergeValues(block, &block->start_merge);
+ break;
+ }
+ case kExprIf: {
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ Value cond = Pop(0, kWasmI32);
+ ArgVector args = PopArgs(imm.sig);
+ if (!VALIDATE(this->ok())) break;
+ Control* if_block = PushControl(kControlIf);
+ SetBlockType(if_block, imm, args.begin());
+ CALL_INTERFACE_IF_REACHABLE(If, cond, if_block);
+ len = 1 + imm.length;
+ PushMergeValues(if_block, &if_block->start_merge);
+ break;
+ }
+ case kExprElse: {
+ if (!VALIDATE(!control_.empty())) {
+ this->error("else does not match any if");
break;
}
- case kExprBrOnNull: {
- CHECK_PROTOTYPE_OPCODE(gc);
- BranchDepthImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm, control_.size())) break;
- len = 1 + imm.length;
- Value ref_object = Pop();
- if (this->failed()) break;
- Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- switch (ref_object.type.kind()) {
- case ValueType::kRef: {
- auto* result = Push(
- ValueType(ValueType::kRef, ref_object.type.ref_index()));
- CALL_INTERFACE(PassThrough, ref_object, result);
- break;
- }
- case ValueType::kOptRef: {
- // We need to Push the result value after calling BrOnNull on
- // the interface. Therefore we must sync the ref_object and
- // result nodes afterwards (in PassThrough).
- CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
- auto* result = Push(
- ValueType(ValueType::kRef, ref_object.type.ref_index()));
- CALL_INTERFACE(PassThrough, ref_object, result);
- c->br_merge()->reached = true;
- break;
- }
- case ValueType::kNullRef:
- if (imm.depth == control_.size() - 1) {
- DoReturn();
- } else {
- CALL_INTERFACE(Br, c);
- c->br_merge()->reached = true;
- }
- EndControl();
- break;
- default:
- this->error(this->pc_,
- "invalid agrument type to ref.as_non_null");
- break;
- }
- }
+ Control* c = &control_.back();
+ if (!VALIDATE(c->is_if())) {
+ this->error(this->pc_, "else does not match an if");
break;
}
- case kExprLoop: {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- auto args = PopArgs(imm.sig);
- auto* block = PushControl(kControlLoop);
- SetBlockType(&control_.back(), imm, args.begin());
- len = 1 + imm.length;
- CALL_INTERFACE_IF_REACHABLE(Loop, block);
- PushMergeValues(block, &block->start_merge);
+ if (c->is_if_else()) {
+ this->error(this->pc_, "else already present for if");
break;
}
- case kExprIf: {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- auto cond = Pop(0, kWasmI32);
- auto args = PopArgs(imm.sig);
- if (!VALIDATE(this->ok())) break;
- auto* if_block = PushControl(kControlIf);
- SetBlockType(if_block, imm, args.begin());
- CALL_INTERFACE_IF_REACHABLE(If, cond, if_block);
- len = 1 + imm.length;
- PushMergeValues(if_block, &if_block->start_merge);
+ if (!TypeCheckFallThru()) break;
+ c->kind = kControlIfElse;
+ CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
+ if (c->reachable()) c->end_merge.reached = true;
+ PushMergeValues(c, &c->start_merge);
+ c->reachability = control_at(1)->innerReachability();
+ current_code_reachable_ = this->ok() && c->reachable();
+ break;
+ }
+ case kExprEnd: {
+ if (!VALIDATE(!control_.empty())) {
+ this->error("end does not match any if, try, or block");
break;
}
- case kExprElse: {
- if (!VALIDATE(!control_.empty())) {
- this->error("else does not match any if");
- break;
- }
- Control* c = &control_.back();
- if (!VALIDATE(c->is_if())) {
- this->error(this->pc_, "else does not match an if");
- break;
- }
- if (c->is_if_else()) {
- this->error(this->pc_, "else already present for if");
- break;
- }
- if (!TypeCheckFallThru()) break;
- c->kind = kControlIfElse;
- CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
- if (c->reachable()) c->end_merge.reached = true;
- PushMergeValues(c, &c->start_merge);
- c->reachability = control_at(1)->innerReachability();
+ Control* c = &control_.back();
+ if (!VALIDATE(!c->is_incomplete_try())) {
+ this->error(this->pc_, "missing catch or catch-all in try");
break;
}
- case kExprEnd: {
- if (!VALIDATE(!control_.empty())) {
- this->error("end does not match any if, try, or block");
- break;
- }
- Control* c = &control_.back();
- if (!VALIDATE(!c->is_incomplete_try())) {
- this->error(this->pc_, "missing catch or catch-all in try");
+ if (c->is_onearmed_if()) {
+ if (!VALIDATE(c->end_merge.arity == c->start_merge.arity)) {
+ this->error(c->pc,
+ "start-arity and end-arity of one-armed if must match");
break;
}
- if (c->is_onearmed_if()) {
- if (!VALIDATE(c->end_merge.arity == c->start_merge.arity)) {
- this->error(
- c->pc,
- "start-arity and end-arity of one-armed if must match");
- break;
- }
- if (!TypeCheckOneArmedIf(c)) break;
- }
- if (!TypeCheckFallThru()) break;
-
- if (control_.size() == 1) {
- // If at the last (implicit) control, check we are at end.
- if (!VALIDATE(this->pc_ + 1 == this->end_)) {
- this->error(this->pc_ + 1, "trailing code after function end");
- break;
- }
- // The result of the block is the return value.
- TRACE_PART("\n" TRACE_INST_FORMAT, startrel(this->pc_),
- "(implicit) return");
- DoReturn();
- control_.clear();
- break;
- }
- PopControl(c);
- break;
+ if (!TypeCheckOneArmedIf(c)) break;
+ }
+ if (c->is_let()) {
+ this->local_types_->erase(
+ this->local_types_->begin(),
+ this->local_types_->begin() + c->locals_count);
+ CALL_INTERFACE_IF_REACHABLE(DeallocateLocals, c->locals_count);
}
- case kExprSelect: {
- auto cond = Pop(2, kWasmI32);
- auto fval = Pop();
- auto tval = Pop(0, fval.type);
- ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
- if (type.IsSubTypeOf(kWasmAnyRef)) {
- this->error(
- "select without type is only valid for value type inputs");
+ if (!TypeCheckFallThru()) break;
+
+ if (control_.size() == 1) {
+ // If at the last (implicit) control, check we are at end.
+ if (!VALIDATE(this->pc_ + 1 == this->end_)) {
+ this->error(this->pc_ + 1, "trailing code after function end");
break;
}
- auto* result = Push(type);
- CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
- break;
- }
- case kExprSelectWithType: {
- CHECK_PROTOTYPE_OPCODE(anyref);
- SelectTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (this->failed()) break;
- auto cond = Pop(2, kWasmI32);
- auto fval = Pop(1, imm.type);
- auto tval = Pop(0, imm.type);
- auto* result = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
- len = 1 + imm.length;
+ // The result of the block is the return value.
+ TRACE_PART("\n" TRACE_INST_FORMAT, startrel(this->pc_),
+ "(implicit) return");
+ DoReturn();
+ control_.clear();
break;
}
- case kExprBr: {
- BranchDepthImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm, control_.size())) break;
- Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, false);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- if (imm.depth == control_.size() - 1) {
- DoReturn();
- } else {
- CALL_INTERFACE(Br, c);
- c->br_merge()->reached = true;
- }
- } else if (check_result == kInvalidStack) {
- break;
- }
- len = 1 + imm.length;
- EndControl();
+ PopControl(c);
+ break;
+ }
+ case kExprSelect: {
+ Value cond = Pop(2, kWasmI32);
+ Value fval = Pop();
+ Value tval = Pop(0, fval.type);
+ ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
+ if (type.is_reference_type()) {
+ this->error(
+ "select without type is only valid for value type inputs");
break;
}
- case kExprBrIf: {
- BranchDepthImmediate<validate> imm(this, this->pc_);
- auto cond = Pop(0, kWasmI32);
- if (this->failed()) break;
- if (!this->Validate(this->pc_, imm, control_.size())) break;
- Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE(BrIf, cond, imm.depth);
+ Value* result = Push(type);
+ CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
+ break;
+ }
+ case kExprSelectWithType: {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ SelectTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (this->failed()) break;
+ Value cond = Pop(2, kWasmI32);
+ Value fval = Pop(1, imm.type);
+ Value tval = Pop(0, imm.type);
+ Value* result = Push(imm.type);
+ CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprBr: {
+ BranchDepthImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+ Control* c = control_at(imm.depth);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, false);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
+ if (imm.depth == control_.size() - 1) {
+ DoReturn();
+ } else {
+ CALL_INTERFACE(Br, c);
c->br_merge()->reached = true;
- } else if (check_result == kInvalidStack) {
- break;
}
- len = 1 + imm.length;
+ } else if (check_result == kInvalidStack) {
break;
}
- case kExprBrTable: {
- BranchTableImmediate<validate> imm(this, this->pc_);
- BranchTableIterator<validate> iterator(this, imm);
- auto key = Pop(0, kWasmI32);
- if (this->failed()) break;
- if (!this->Validate(this->pc_, imm, control_.size())) break;
-
- // Cache the branch targets during the iteration, so that we can set
- // all branch targets as reachable after the {CALL_INTERFACE} call.
- std::vector<bool> br_targets(control_.size());
-
- // The result types of the br_table instruction. We have to check the
- // stack against these types. Only needed during validation.
- std::vector<ValueType> result_types;
-
- while (iterator.has_next()) {
- const uint32_t index = iterator.cur_index();
- const byte* pos = iterator.pc();
- uint32_t target = iterator.next();
- if (!VALIDATE(ValidateBrTableTarget(target, pos, index))) break;
- // Avoid redundant branch target checks.
- if (br_targets[target]) continue;
- br_targets[target] = true;
-
- if (validate) {
- if (index == 0) {
- // With the first branch target, initialize the result types.
- result_types = InitializeBrTableResultTypes(target);
- } else if (!UpdateBrTableResultTypes(&result_types, target, pos,
- index)) {
- break;
- }
+ len = 1 + imm.length;
+ EndControl();
+ break;
+ }
+ case kExprBrIf: {
+ BranchDepthImmediate<validate> imm(this, this->pc_);
+ Value cond = Pop(0, kWasmI32);
+ if (this->failed()) break;
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+ Control* c = control_at(imm.depth);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
+ CALL_INTERFACE(BrIf, cond, imm.depth);
+ c->br_merge()->reached = true;
+ } else if (check_result == kInvalidStack) {
+ break;
+ }
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprBrTable: {
+ BranchTableImmediate<validate> imm(this, this->pc_);
+ BranchTableIterator<validate> iterator(this, imm);
+ Value key = Pop(0, kWasmI32);
+ if (this->failed()) break;
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+
+ // Cache the branch targets during the iteration, so that we can set
+ // all branch targets as reachable after the {CALL_INTERFACE} call.
+ std::vector<bool> br_targets(control_.size());
+
+ // The result types of the br_table instruction. We have to check the
+ // stack against these types. Only needed during validation.
+ std::vector<ValueType> result_types;
+
+ while (iterator.has_next()) {
+ const uint32_t index = iterator.cur_index();
+ const byte* pos = iterator.pc();
+ uint32_t target = iterator.next();
+ if (!VALIDATE(ValidateBrTableTarget(target, pos, index))) break;
+ // Avoid redundant branch target checks.
+ if (br_targets[target]) continue;
+ br_targets[target] = true;
+
+ if (validate) {
+ if (index == 0) {
+ // With the first branch target, initialize the result types.
+ result_types = InitializeBrTableResultTypes(target);
+ } else if (!UpdateBrTableResultTypes(&result_types, target, pos,
+ index)) {
+ break;
}
}
+ }
- if (!VALIDATE(TypeCheckBrTable(result_types))) break;
+ if (!VALIDATE(TypeCheckBrTable(result_types))) break;
- DCHECK(this->ok());
+ DCHECK(this->ok());
- if (control_.back().reachable()) {
- CALL_INTERFACE(BrTable, imm, key);
+ if (current_code_reachable_) {
+ CALL_INTERFACE(BrTable, imm, key);
- for (int i = 0, e = control_depth(); i < e; ++i) {
- if (!br_targets[i]) continue;
- control_at(i)->br_merge()->reached = true;
- }
+ for (int i = 0, e = control_depth(); i < e; ++i) {
+ if (!br_targets[i]) continue;
+ control_at(i)->br_merge()->reached = true;
}
-
- len = 1 + iterator.length();
- EndControl();
- break;
}
- case kExprReturn: {
- if (V8_LIKELY(control_.back().reachable())) {
- if (!VALIDATE(TypeCheckReturn())) break;
- DoReturn();
- } else {
- // We pop all return values from the stack to check their type.
- // Since we deal with unreachable code, we do not have to keep the
- // values.
- int num_returns = static_cast<int>(this->sig_->return_count());
- for (int i = num_returns - 1; i >= 0; --i) {
- Pop(i, this->sig_->GetReturn(i));
- }
- }
- EndControl();
- break;
- }
- case kExprUnreachable: {
- CALL_INTERFACE_IF_REACHABLE(Unreachable);
- EndControl();
- break;
- }
- case kExprI32Const: {
- ImmI32Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(I32Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprI64Const: {
- ImmI64Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmI64);
- CALL_INTERFACE_IF_REACHABLE(I64Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprF32Const: {
- ImmF32Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmF32);
- CALL_INTERFACE_IF_REACHABLE(F32Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprF64Const: {
- ImmF64Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmF64);
- CALL_INTERFACE_IF_REACHABLE(F64Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprRefNull: {
- CHECK_PROTOTYPE_OPCODE(anyref);
- auto* value = Push(kWasmNullRef);
- CALL_INTERFACE_IF_REACHABLE(RefNull, value);
- len = 1;
- break;
- }
- case kExprRefFunc: {
- CHECK_PROTOTYPE_OPCODE(anyref);
- FunctionIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto* value = Push(kWasmFuncRef);
- CALL_INTERFACE_IF_REACHABLE(RefFunc, imm.index, value);
- len = 1 + imm.length;
- break;
- }
- case kExprRefAsNonNull: {
- CHECK_PROTOTYPE_OPCODE(gc);
- auto value = Pop();
- switch (value.type.kind()) {
- case ValueType::kRef: {
- auto* result =
- Push(ValueType(ValueType::kRef, value.type.ref_index()));
- CALL_INTERFACE_IF_REACHABLE(PassThrough, value, result);
- break;
- }
- case ValueType::kOptRef: {
- auto* result =
- Push(ValueType(ValueType::kRef, value.type.ref_index()));
- CALL_INTERFACE_IF_REACHABLE(RefAsNonNull, value, result);
- break;
- }
- case ValueType::kNullRef:
- // TODO(7748): Fix this once the standard clears up (see
- // https://github.com/WebAssembly/function-references/issues/21).
- CALL_INTERFACE_IF_REACHABLE(Unreachable);
- EndControl();
- break;
- default:
- this->error(this->pc_ + 1,
- "invalid agrument type to ref.as_non_null");
- break;
+ len = 1 + iterator.length();
+ EndControl();
+ break;
+ }
+ case kExprReturn: {
+ if (V8_LIKELY(current_code_reachable_)) {
+ if (!VALIDATE(TypeCheckReturn())) break;
+ DoReturn();
+ } else {
+ // We pop all return values from the stack to check their type.
+ // Since we deal with unreachable code, we do not have to keep the
+ // values.
+ int num_returns = static_cast<int>(this->sig_->return_count());
+ for (int i = num_returns - 1; i >= 0; --i) {
+ Pop(i, this->sig_->GetReturn(i));
}
- break;
- }
- case kExprLocalGet: {
- LocalIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto* value = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(LocalGet, value, imm);
- len = 1 + imm.length;
- break;
}
- case kExprLocalSet: {
- LocalIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto value = Pop(0, local_type_vec_[imm.index]);
- CALL_INTERFACE_IF_REACHABLE(LocalSet, value, imm);
- len = 1 + imm.length;
- break;
- }
- case kExprLocalTee: {
- LocalIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto value = Pop(0, local_type_vec_[imm.index]);
- auto* result = Push(value.type);
- CALL_INTERFACE_IF_REACHABLE(LocalTee, value, result, imm);
- len = 1 + imm.length;
- break;
- }
- case kExprDrop: {
- auto value = Pop();
- CALL_INTERFACE_IF_REACHABLE(Drop, value);
+
+ EndControl();
+ break;
+ }
+ case kExprUnreachable: {
+ CALL_INTERFACE_IF_REACHABLE(Unreachable);
+ EndControl();
+ break;
+ }
+ case kExprI32Const: {
+ ImmI32Immediate<validate> imm(this, this->pc_);
+ Value* value = Push(kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(I32Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprI64Const: {
+ ImmI64Immediate<validate> imm(this, this->pc_);
+ Value* value = Push(kWasmI64);
+ CALL_INTERFACE_IF_REACHABLE(I64Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprF32Const: {
+ ImmF32Immediate<validate> imm(this, this->pc_);
+ Value* value = Push(kWasmF32);
+ CALL_INTERFACE_IF_REACHABLE(F32Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprF64Const: {
+ ImmF64Immediate<validate> imm(this, this->pc_);
+ Value* value = Push(kWasmF64);
+ CALL_INTERFACE_IF_REACHABLE(F64Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprRefNull: {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ RefNullImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ Value* value = Push(imm.type);
+ CALL_INTERFACE_IF_REACHABLE(RefNull, value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprRefIsNull: {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ Value value = Pop();
+ Value* result = Push(kWasmI32);
+ len = 1;
+ if (value.type.is_nullable()) {
+ CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, value, result);
break;
}
- case kExprGlobalGet: {
- GlobalIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto* result = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(GlobalGet, result, imm);
+ if (value.type.is_reference_type()) {
+ // Due to the check above, we know that the value is not null.
+ CALL_INTERFACE_IF_REACHABLE(I32Const, result, 0);
break;
}
- case kExprGlobalSet: {
- GlobalIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- if (!VALIDATE(imm.global->mutability)) {
- this->errorf(this->pc_, "immutable global #%u cannot be assigned",
- imm.index);
+ this->errorf(this->pc_,
+ "invalid argument type to ref.is_null. Expected "
+ "reference type, got %s",
+ value.type.type_name().c_str());
+ break;
+ }
+ case kExprRefFunc: {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ FunctionIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ Value* value = Push(ValueType::Ref(kHeapFunc, kNonNullable));
+ CALL_INTERFACE_IF_REACHABLE(RefFunc, imm.index, value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprRefAsNonNull: {
+ CHECK_PROTOTYPE_OPCODE(typed_funcref);
+ Value value = Pop();
+ switch (value.type.kind()) {
+ case ValueType::kRef: {
+ Value* result = Push(value.type);
+ CALL_INTERFACE_IF_REACHABLE(PassThrough, value, result);
break;
}
- auto value = Pop(0, imm.type);
- CALL_INTERFACE_IF_REACHABLE(GlobalSet, value, imm);
- break;
- }
- case kExprTableGet: {
- CHECK_PROTOTYPE_OPCODE(anyref);
- TableIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- DCHECK_NOT_NULL(this->module_);
- auto index = Pop(0, kWasmI32);
- auto* result = Push(this->module_->tables[imm.index].type);
- CALL_INTERFACE_IF_REACHABLE(TableGet, index, result, imm);
- break;
- }
- case kExprTableSet: {
- CHECK_PROTOTYPE_OPCODE(anyref);
- TableIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto value = Pop(1, this->module_->tables[imm.index].type);
- auto index = Pop(0, kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(TableSet, index, value, imm);
- break;
- }
-
- case kExprI32LoadMem8S:
- len = 1 + DecodeLoadMem(LoadType::kI32Load8S);
- break;
- case kExprI32LoadMem8U:
- len = 1 + DecodeLoadMem(LoadType::kI32Load8U);
- break;
- case kExprI32LoadMem16S:
- len = 1 + DecodeLoadMem(LoadType::kI32Load16S);
- break;
- case kExprI32LoadMem16U:
- len = 1 + DecodeLoadMem(LoadType::kI32Load16U);
- break;
- case kExprI32LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kI32Load);
- break;
- case kExprI64LoadMem8S:
- len = 1 + DecodeLoadMem(LoadType::kI64Load8S);
- break;
- case kExprI64LoadMem8U:
- len = 1 + DecodeLoadMem(LoadType::kI64Load8U);
- break;
- case kExprI64LoadMem16S:
- len = 1 + DecodeLoadMem(LoadType::kI64Load16S);
- break;
- case kExprI64LoadMem16U:
- len = 1 + DecodeLoadMem(LoadType::kI64Load16U);
- break;
- case kExprI64LoadMem32S:
- len = 1 + DecodeLoadMem(LoadType::kI64Load32S);
- break;
- case kExprI64LoadMem32U:
- len = 1 + DecodeLoadMem(LoadType::kI64Load32U);
- break;
- case kExprI64LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kI64Load);
- break;
- case kExprF32LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kF32Load);
- break;
- case kExprF64LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kF64Load);
- break;
- case kExprI32StoreMem8:
- len = 1 + DecodeStoreMem(StoreType::kI32Store8);
- break;
- case kExprI32StoreMem16:
- len = 1 + DecodeStoreMem(StoreType::kI32Store16);
- break;
- case kExprI32StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kI32Store);
- break;
- case kExprI64StoreMem8:
- len = 1 + DecodeStoreMem(StoreType::kI64Store8);
- break;
- case kExprI64StoreMem16:
- len = 1 + DecodeStoreMem(StoreType::kI64Store16);
- break;
- case kExprI64StoreMem32:
- len = 1 + DecodeStoreMem(StoreType::kI64Store32);
- break;
- case kExprI64StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kI64Store);
- break;
- case kExprF32StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kF32Store);
- break;
- case kExprF64StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kF64Store);
- break;
- case kExprMemoryGrow: {
- if (!CheckHasMemory()) break;
- MemoryIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- DCHECK_NOT_NULL(this->module_);
- if (!VALIDATE(this->module_->origin == kWasmOrigin)) {
- this->error("grow_memory is not supported for asmjs modules");
+ case ValueType::kOptRef: {
+ Value* result =
+ Push(ValueType::Ref(value.type.heap_type(), kNonNullable));
+ CALL_INTERFACE_IF_REACHABLE(RefAsNonNull, value, result);
break;
}
- auto value = Pop(0, kWasmI32);
- auto* result = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(MemoryGrow, value, result);
- break;
+ default:
+ this->error(this->pc_ + 1,
+ "invalid agrument type to ref.as_non_null");
+ break;
}
- case kExprMemorySize: {
- if (!CheckHasMemory()) break;
- MemoryIndexImmediate<validate> imm(this, this->pc_);
- auto* result = Push(kWasmI32);
- len = 1 + imm.length;
- CALL_INTERFACE_IF_REACHABLE(CurrentMemoryPages, result);
+ break;
+ }
+ case kExprLocalGet: {
+ LocalIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ Value* value = Push(imm.type);
+ CALL_INTERFACE_IF_REACHABLE(LocalGet, value, imm);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprLocalSet: {
+ LocalIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ Value value = Pop(0, local_type_vec_[imm.index]);
+ CALL_INTERFACE_IF_REACHABLE(LocalSet, value, imm);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprLocalTee: {
+ LocalIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ Value value = Pop(0, local_type_vec_[imm.index]);
+ Value* result = Push(value.type);
+ CALL_INTERFACE_IF_REACHABLE(LocalTee, value, result, imm);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprDrop: {
+ Value value = Pop();
+ CALL_INTERFACE_IF_REACHABLE(Drop, value);
+ break;
+ }
+ case kExprGlobalGet: {
+ GlobalIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ Value* result = Push(imm.type);
+ CALL_INTERFACE_IF_REACHABLE(GlobalGet, result, imm);
+ break;
+ }
+ case kExprGlobalSet: {
+ GlobalIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ if (!VALIDATE(imm.global->mutability)) {
+ this->errorf(this->pc_, "immutable global #%u cannot be assigned",
+ imm.index);
break;
}
- case kExprCallFunction: {
- CallFunctionImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto args = PopArgs(imm.sig);
- auto* returns = PushReturns(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(CallDirect, imm, args.begin(), returns);
+ Value value = Pop(0, imm.type);
+ CALL_INTERFACE_IF_REACHABLE(GlobalSet, value, imm);
+ break;
+ }
+ case kExprTableGet: {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ TableIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ Value index = Pop(0, kWasmI32);
+ Value* result = Push(this->module_->tables[imm.index].type);
+ CALL_INTERFACE_IF_REACHABLE(TableGet, index, result, imm);
+ break;
+ }
+ case kExprTableSet: {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ TableIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ Value value = Pop(1, this->module_->tables[imm.index].type);
+ Value index = Pop(0, kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(TableSet, index, value, imm);
+ break;
+ }
+
+ case kExprI32LoadMem8S:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load8S);
+ break;
+ case kExprI32LoadMem8U:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load8U);
+ break;
+ case kExprI32LoadMem16S:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load16S);
+ break;
+ case kExprI32LoadMem16U:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load16U);
+ break;
+ case kExprI32LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load);
+ break;
+ case kExprI64LoadMem8S:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load8S);
+ break;
+ case kExprI64LoadMem8U:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load8U);
+ break;
+ case kExprI64LoadMem16S:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load16S);
+ break;
+ case kExprI64LoadMem16U:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load16U);
+ break;
+ case kExprI64LoadMem32S:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load32S);
+ break;
+ case kExprI64LoadMem32U:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load32U);
+ break;
+ case kExprI64LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load);
+ break;
+ case kExprF32LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kF32Load);
+ break;
+ case kExprF64LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kF64Load);
+ break;
+ case kExprI32StoreMem8:
+ len = 1 + DecodeStoreMem(StoreType::kI32Store8);
+ break;
+ case kExprI32StoreMem16:
+ len = 1 + DecodeStoreMem(StoreType::kI32Store16);
+ break;
+ case kExprI32StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kI32Store);
+ break;
+ case kExprI64StoreMem8:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store8);
+ break;
+ case kExprI64StoreMem16:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store16);
+ break;
+ case kExprI64StoreMem32:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store32);
+ break;
+ case kExprI64StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store);
+ break;
+ case kExprF32StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kF32Store);
+ break;
+ case kExprF64StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kF64Store);
+ break;
+ case kExprMemoryGrow: {
+ if (!CheckHasMemory()) break;
+ MemoryIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!VALIDATE(this->module_->origin == kWasmOrigin)) {
+ this->error("grow_memory is not supported for asmjs modules");
break;
}
- case kExprCallIndirect: {
- CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto index = Pop(0, kWasmI32);
- auto args = PopArgs(imm.sig);
- auto* returns = PushReturns(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(CallIndirect, index, imm, args.begin(),
- returns);
+ Value value = Pop(0, kWasmI32);
+ Value* result = Push(kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(MemoryGrow, value, result);
+ break;
+ }
+ case kExprMemorySize: {
+ if (!CheckHasMemory()) break;
+ MemoryIndexImmediate<validate> imm(this, this->pc_);
+ Value* result = Push(kWasmI32);
+ len = 1 + imm.length;
+ CALL_INTERFACE_IF_REACHABLE(CurrentMemoryPages, result);
+ break;
+ }
+ case kExprCallFunction: {
+ CallFunctionImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ ArgVector args = PopArgs(imm.sig);
+ Value* returns = PushReturns(imm.sig);
+ CALL_INTERFACE_IF_REACHABLE(CallDirect, imm, args.begin(), returns);
+ break;
+ }
+ case kExprCallIndirect: {
+ CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ Value index = Pop(0, kWasmI32);
+ ArgVector args = PopArgs(imm.sig);
+ Value* returns = PushReturns(imm.sig);
+ CALL_INTERFACE_IF_REACHABLE(CallIndirect, index, imm, args.begin(),
+ returns);
+ break;
+ }
+ case kExprReturnCall: {
+ CHECK_PROTOTYPE_OPCODE(return_call);
+
+ CallFunctionImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ if (!this->CanReturnCall(imm.sig)) {
+ OPCODE_ERROR(opcode, "tail call return types mismatch");
break;
}
- case kExprReturnCall: {
- CHECK_PROTOTYPE_OPCODE(return_call);
- CallFunctionImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- if (!this->CanReturnCall(imm.sig)) {
- OPCODE_ERROR(opcode, "tail call return types mismatch");
- break;
- }
+ ArgVector args = PopArgs(imm.sig);
- auto args = PopArgs(imm.sig);
-
- CALL_INTERFACE_IF_REACHABLE(ReturnCall, imm, args.begin());
- EndControl();
- break;
- }
- case kExprReturnCallIndirect: {
- CHECK_PROTOTYPE_OPCODE(return_call);
- CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- if (!this->CanReturnCall(imm.sig)) {
- OPCODE_ERROR(opcode, "tail call return types mismatch");
- break;
- }
- auto index = Pop(0, kWasmI32);
- auto args = PopArgs(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(ReturnCallIndirect, index, imm,
- args.begin());
- EndControl();
- break;
- }
- case kNumericPrefix: {
- ++len;
- byte numeric_index =
- this->template read_u8<validate>(this->pc_ + 1, "numeric index");
- opcode = static_cast<WasmOpcode>(opcode << 8 | numeric_index);
- if (opcode == kExprTableGrow || opcode == kExprTableSize ||
- opcode == kExprTableFill) {
- CHECK_PROTOTYPE_OPCODE(anyref);
- } else if (opcode >= kExprMemoryInit) {
- CHECK_PROTOTYPE_OPCODE(bulk_memory);
- }
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- len += DecodeNumericOpcode(opcode);
- break;
- }
- case kSimdPrefix: {
- CHECK_PROTOTYPE_OPCODE(simd);
- uint32_t length = 0;
- opcode =
- this->template read_prefixed_opcode<validate>(this->pc_, &length);
- if (!VALIDATE(this->ok())) break;
- len += length;
-
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- len += DecodeSimdOpcode(opcode, length);
- break;
- }
- case kAtomicPrefix: {
- CHECK_PROTOTYPE_OPCODE(threads);
- len++;
- byte atomic_index =
- this->template read_u8<validate>(this->pc_ + 1, "atomic index");
- opcode = static_cast<WasmOpcode>(opcode << 8 | atomic_index);
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- len += DecodeAtomicOpcode(opcode);
+ CALL_INTERFACE_IF_REACHABLE(ReturnCall, imm, args.begin());
+ EndControl();
+ break;
+ }
+ case kExprReturnCallIndirect: {
+ CHECK_PROTOTYPE_OPCODE(return_call);
+ CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ if (!this->CanReturnCall(imm.sig)) {
+ OPCODE_ERROR(opcode, "tail call return types mismatch");
break;
}
- case kGCPrefix: {
- CHECK_PROTOTYPE_OPCODE(gc);
- byte gc_index =
- this->template read_u8<validate>(this->pc_ + 1, "gc index");
- opcode = static_cast<WasmOpcode>(opcode << 8 | gc_index);
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- len = DecodeGCOpcode(opcode);
- break;
+ Value index = Pop(0, kWasmI32);
+ ArgVector args = PopArgs(imm.sig);
+ CALL_INTERFACE_IF_REACHABLE(ReturnCallIndirect, index, imm,
+ args.begin());
+ EndControl();
+ break;
+ }
+ case kNumericPrefix: {
+ ++len;
+ byte numeric_index =
+ this->template read_u8<validate>(this->pc_ + 1, "numeric index");
+ WasmOpcode full_opcode =
+ static_cast<WasmOpcode>(opcode << 8 | numeric_index);
+ if (full_opcode == kExprTableGrow || full_opcode == kExprTableSize ||
+ full_opcode == kExprTableFill) {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ } else if (full_opcode >= kExprMemoryInit) {
+ CHECK_PROTOTYPE_OPCODE(bulk_memory);
}
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(full_opcode));
+ len += DecodeNumericOpcode(full_opcode);
+ break;
+ }
+ case kSimdPrefix: {
+ CHECK_PROTOTYPE_OPCODE(simd);
+ uint32_t length = 0;
+ WasmOpcode full_opcode =
+ this->template read_prefixed_opcode<validate>(this->pc_, &length);
+ if (!VALIDATE(this->ok())) break;
+ len += length;
+
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(full_opcode));
+ len += DecodeSimdOpcode(full_opcode, length);
+ break;
+ }
+ case kAtomicPrefix: {
+ CHECK_PROTOTYPE_OPCODE(threads);
+ len++;
+ byte atomic_index =
+ this->template read_u8<validate>(this->pc_ + 1, "atomic index");
+ WasmOpcode full_opcode =
+ static_cast<WasmOpcode>(opcode << 8 | atomic_index);
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(full_opcode));
+ len += DecodeAtomicOpcode(full_opcode);
+ break;
+ }
+ case kGCPrefix: {
+ CHECK_PROTOTYPE_OPCODE(gc);
+ byte gc_index =
+ this->template read_u8<validate>(this->pc_ + 1, "gc index");
+ WasmOpcode full_opcode =
+ static_cast<WasmOpcode>(opcode << 8 | gc_index);
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(full_opcode));
+ len = DecodeGCOpcode(full_opcode);
+ break;
+ }
// Note that prototype opcodes are not handled in the fastpath
// above this switch, to avoid checking a feature flag.
#define SIMPLE_PROTOTYPE_CASE(name, opc, sig) \
case kExpr##name: /* fallthrough */
- FOREACH_SIMPLE_PROTOTYPE_OPCODE(SIMPLE_PROTOTYPE_CASE)
+ FOREACH_SIMPLE_PROTOTYPE_OPCODE(SIMPLE_PROTOTYPE_CASE)
#undef SIMPLE_PROTOTYPE_CASE
- BuildSimplePrototypeOperator(opcode);
- break;
- default: {
- // Deal with special asmjs opcodes.
- if (this->module_ != nullptr && is_asmjs_module(this->module_)) {
- const FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
- if (sig) {
- BuildSimpleOperator(opcode, sig);
- }
- } else {
- this->error("Invalid opcode");
- return;
+ BuildSimplePrototypeOperator(opcode);
+ break;
+ default: {
+ // Deal with special asmjs opcodes.
+ if (is_asmjs_module(this->module_)) {
+ const FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
+ if (sig) {
+ BuildSimpleOperator(opcode, sig);
}
+ } else {
+ this->error("Invalid opcode");
+ return 0;
}
}
+ }
#if DEBUG
- if (FLAG_trace_wasm_decoder) {
- TRACE_PART(" ");
- for (Control& c : control_) {
- switch (c.kind) {
- case kControlIf:
- TRACE_PART("I");
- break;
- case kControlBlock:
- TRACE_PART("B");
- break;
- case kControlLoop:
- TRACE_PART("L");
- break;
- case kControlTry:
- TRACE_PART("T");
- break;
- default:
- break;
- }
- if (c.start_merge.arity) TRACE_PART("%u-", c.start_merge.arity);
- TRACE_PART("%u", c.end_merge.arity);
- if (!c.reachable()) TRACE_PART("%c", c.unreachable() ? '*' : '#');
+ if (FLAG_trace_wasm_decoder) {
+ TRACE_PART(" ");
+ for (Control& c : control_) {
+ switch (c.kind) {
+ case kControlIf:
+ TRACE_PART("I");
+ break;
+ case kControlBlock:
+ TRACE_PART("B");
+ break;
+ case kControlLoop:
+ TRACE_PART("L");
+ break;
+ case kControlTry:
+ TRACE_PART("T");
+ break;
+ case kControlIfElse:
+ case kControlTryCatch:
+ case kControlLet: // TODO(7748): Implement
+ break;
}
- TRACE_PART(" | ");
- for (size_t i = 0; i < stack_.size(); ++i) {
- auto& val = stack_[i];
- WasmOpcode opcode = static_cast<WasmOpcode>(*val.pc);
- if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- opcode = this->template read_prefixed_opcode<Decoder::kNoValidate>(
- val.pc);
+ if (c.start_merge.arity) TRACE_PART("%u-", c.start_merge.arity);
+ TRACE_PART("%u", c.end_merge.arity);
+ if (!c.reachable()) TRACE_PART("%c", c.unreachable() ? '*' : '#');
+ }
+ TRACE_PART(" | ");
+ for (size_t i = 0; i < stack_.size(); ++i) {
+ Value& val = stack_[i];
+ WasmOpcode val_opcode = static_cast<WasmOpcode>(*val.pc);
+ if (WasmOpcodes::IsPrefixOpcode(val_opcode)) {
+ val_opcode =
+ this->template read_prefixed_opcode<Decoder::kNoValidate>(val.pc);
+ }
+ TRACE_PART(" %c@%d:%s", val.type.short_name(),
+ static_cast<int>(val.pc - this->start_),
+ WasmOpcodes::OpcodeName(val_opcode));
+ // If the decoder failed, don't try to decode the immediates, as this
+ // can trigger a DCHECK failure.
+ if (this->failed()) continue;
+ switch (val_opcode) {
+ case kExprI32Const: {
+ ImmI32Immediate<Decoder::kNoValidate> imm(this, val.pc);
+ TRACE_PART("[%d]", imm.value);
+ break;
}
- TRACE_PART(" %c@%d:%s", val.type.short_name(),
- static_cast<int>(val.pc - this->start_),
- WasmOpcodes::OpcodeName(opcode));
- // If the decoder failed, don't try to decode the immediates, as this
- // can trigger a DCHECK failure.
- if (this->failed()) continue;
- switch (opcode) {
- case kExprI32Const: {
- ImmI32Immediate<Decoder::kNoValidate> imm(this, val.pc);
- TRACE_PART("[%d]", imm.value);
- break;
- }
- case kExprLocalGet:
- case kExprLocalSet:
- case kExprLocalTee: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc);
- TRACE_PART("[%u]", imm.index);
- break;
- }
- case kExprGlobalGet:
- case kExprGlobalSet: {
- GlobalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc);
- TRACE_PART("[%u]", imm.index);
- break;
- }
- default:
- break;
+ case kExprLocalGet:
+ case kExprLocalSet:
+ case kExprLocalTee: {
+ LocalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc);
+ TRACE_PART("[%u]", imm.index);
+ break;
+ }
+ case kExprGlobalGet:
+ case kExprGlobalSet: {
+ GlobalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc);
+ TRACE_PART("[%u]", imm.index);
+ break;
}
+ default:
+ break;
}
}
+ }
#endif
+ return len;
+ }
+
+ using OpcodeHandler = int (*)(WasmFullDecoder*);
+
+ template <size_t idx>
+ struct GetOpcodeHandlerTableEntry
+ : public std::integral_constant<
+ OpcodeHandler,
+ &WasmFullDecoder::DecodeOp<static_cast<WasmOpcode>(idx)>> {};
+
+ OpcodeHandler GetOpcodeHandler(uint8_t opcode) {
+ static constexpr std::array<OpcodeHandler, 256> kOpcodeHandlers =
+ base::make_array<256, GetOpcodeHandlerTableEntry>();
+ return kOpcodeHandlers[opcode];
+ }
+
+ void DecodeFunctionBody() {
+ TRACE("wasm-decode %p...%p (module+%u, %d bytes)\n", this->start(),
+ this->end(), this->pc_offset(),
+ static_cast<int>(this->end() - this->start()));
+
+ // Set up initial function block.
+ {
+ Control* c = PushControl(kControlBlock);
+ InitMerge(&c->start_merge, 0, [](uint32_t) -> Value { UNREACHABLE(); });
+ InitMerge(&c->end_merge,
+ static_cast<uint32_t>(this->sig_->return_count()),
+ [&](uint32_t i) {
+ return Value{this->pc_, this->sig_->GetReturn(i)};
+ });
+ CALL_INTERFACE(StartFunctionBody, c);
+ }
+
+ // Decode the function body.
+ while (this->pc_ < this->end_) {
+ uint8_t first_byte = *this->pc_;
+ CALL_INTERFACE_IF_REACHABLE(NextInstruction,
+ static_cast<WasmOpcode>(first_byte));
+ OpcodeHandler handler = GetOpcodeHandler(first_byte);
+ int len = (*handler)(this);
this->pc_ += len;
- } // end decode loop
+ }
+
if (!VALIDATE(this->pc_ == this->end_) && this->ok()) {
this->error("Beyond end of code");
}
@@ -2728,13 +2925,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
void EndControl() {
DCHECK(!control_.empty());
- auto* current = &control_.back();
+ Control* current = &control_.back();
stack_.erase(stack_.begin() + current->stack_depth, stack_.end());
CALL_INTERFACE_IF_REACHABLE(EndControl, current);
current->reachability = kUnreachable;
+ current_code_reachable_ = false;
}
- template<typename func>
+ template <typename func>
void InitMerge(Merge<Value>* merge, uint32_t arity, func get_val) {
merge->arity = arity;
if (arity == 1) {
@@ -2771,7 +2969,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int count = static_cast<int>(type->field_count());
ArgVector args(count);
for (int i = count - 1; i >= 0; i--) {
- args[i] = Pop(i, type->field(i));
+ args[i] = Pop(i, type->field(i).Unpacked());
+ }
+ return args;
+ }
+
+ V8_INLINE ArgVector PopArgs(uint32_t base_index,
+ Vector<ValueType> arg_types) {
+ ArgVector args(arg_types.size());
+ for (int i = static_cast<int>(arg_types.size()) - 1; i >= 0; i--) {
+ args[i] = Pop(base_index + i, arg_types[i]);
}
return args;
}
@@ -2781,10 +2988,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return sig->return_count() == 0 ? kWasmStmt : sig->GetReturn();
}
- Control* PushControl(ControlKind kind) {
+ Control* PushControl(ControlKind kind, uint32_t locals_count = 0) {
Reachability reachability =
control_.empty() ? kReachable : control_.back().innerReachability();
- control_.emplace_back(kind, stack_size(), this->pc_, reachability);
+ control_.emplace_back(kind, locals_count, stack_size(), this->pc_,
+ reachability);
+ current_code_reachable_ = this->ok() && reachability == kReachable;
return &control_.back();
}
@@ -2800,17 +3009,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
control_.pop_back();
// If the parent block was reachable before, but the popped control does not
// return to here, this block becomes "spec only reachable".
- if (!parent_reached && control_.back().reachable()) {
- control_.back().reachability = kSpecOnlyReachable;
- }
+ if (!parent_reached) SetSucceedingCodeDynamicallyUnreachable();
+ current_code_reachable_ = control_.back().reachable();
}
int DecodeLoadMem(LoadType type, int prefix_len = 0) {
if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
type.size_log_2());
- auto index = Pop(0, kWasmI32);
- auto* result = Push(type.value_type());
+ Value index = Pop(0, kWasmI32);
+ Value* result = Push(type.value_type());
CALL_INTERFACE_IF_REACHABLE(LoadMem, type, imm, index, result);
return imm.length;
}
@@ -2823,8 +3031,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
transform == LoadTransformationKind::kExtend ? 3 : type.size_log_2();
MemoryAccessImmediate<validate> imm(this, this->pc_ + opcode_length,
max_alignment);
- auto index = Pop(0, kWasmI32);
- auto* result = Push(kWasmS128);
+ Value index = Pop(0, kWasmI32);
+ Value* result = Push(kWasmS128);
CALL_INTERFACE_IF_REACHABLE(LoadTransform, type, transform, imm, index,
result);
return imm.length;
@@ -2834,8 +3042,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
store.size_log_2());
- auto value = Pop(1, store.value_type());
- auto index = Pop(0, kWasmI32);
+ Value value = Pop(1, store.value_type());
+ Value index = Pop(0, kWasmI32);
CALL_INTERFACE_IF_REACHABLE(StoreMem, store, imm, index, value);
return imm.length;
}
@@ -2850,7 +3058,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
std::vector<ValueType> InitializeBrTableResultTypes(uint32_t target) {
- auto* merge = control_at(target)->br_merge();
+ Merge<Value>* merge = control_at(target)->br_merge();
int br_arity = merge->arity;
std::vector<ValueType> result(br_arity);
for (int i = 0; i < br_arity; ++i) {
@@ -2861,7 +3069,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool UpdateBrTableResultTypes(std::vector<ValueType>* result_types,
uint32_t target, const byte* pos, int index) {
- auto* merge = control_at(target)->br_merge();
+ Merge<Value>* merge = control_at(target)->br_merge();
int br_arity = merge->arity;
// First we check if the arities match.
if (br_arity != static_cast<int>(result_types->size())) {
@@ -2873,18 +3081,27 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
for (int i = 0; i < br_arity; ++i) {
- if (this->enabled_.has_anyref()) {
+ if (this->enabled_.has_reftypes()) {
// The expected type is the biggest common sub type of all targets.
+ ValueType type = (*result_types)[i];
(*result_types)[i] =
- ValueType::CommonSubType((*result_types)[i], (*merge)[i].type);
+ CommonSubtype((*result_types)[i], (*merge)[i].type, this->module_);
+ if ((*result_types)[i] == kWasmBottom) {
+ this->errorf(pos,
+ "inconsistent type in br_table target %u (previous "
+ "was %s, this one is %s)",
+ index, type.type_name().c_str(),
+ (*merge)[i].type.type_name().c_str());
+ return false;
+ }
} else {
// All target must have the same signature.
if ((*result_types)[i] != (*merge)[i].type) {
this->errorf(pos,
"inconsistent type in br_table target %u (previous "
"was %s, this one is %s)",
- index, (*result_types)[i].type_name(),
- (*merge)[i].type.type_name());
+ index, (*result_types)[i].type_name().c_str(),
+ (*merge)[i].type.type_name().c_str());
return false;
}
}
@@ -2909,10 +3126,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Type-check the topmost br_arity values on the stack.
for (int i = 0; i < br_arity; ++i) {
Value& val = stack_values[i];
- if (!val.type.IsSubTypeOf(result_types[i])) {
+ if (!IsSubtypeOf(val.type, result_types[i], this->module_)) {
this->errorf(this->pc_,
"type error in merge[%u] (expected %s, got %s)", i,
- result_types[i].type_name(), val.type.type_name());
+ result_types[i].type_name().c_str(),
+ val.type.type_name().c_str());
return false;
}
}
@@ -2928,7 +3146,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
SimdLaneImmediate<validate> imm(this, this->pc_, opcode_length);
if (this->Validate(this->pc_, opcode, imm)) {
Value inputs[] = {Pop(0, kWasmS128)};
- auto* result = Push(type);
+ Value* result = Push(type);
CALL_INTERFACE_IF_REACHABLE(SimdLaneOp, opcode, imm, ArrayVector(inputs),
result);
}
@@ -2943,7 +3161,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
UnreachableValue(this->pc_)};
inputs[1] = Pop(1, type);
inputs[0] = Pop(0, kWasmS128);
- auto* result = Push(kWasmS128);
+ Value* result = Push(kWasmS128);
CALL_INTERFACE_IF_REACHABLE(SimdLaneOp, opcode, imm, ArrayVector(inputs),
result);
}
@@ -2953,9 +3171,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t Simd8x16ShuffleOp(uint32_t opcode_length) {
Simd8x16ShuffleImmediate<validate> imm(this, this->pc_, opcode_length);
if (this->Validate(this->pc_, imm)) {
- auto input1 = Pop(1, kWasmS128);
- auto input0 = Pop(0, kWasmS128);
- auto* result = Push(kWasmS128);
+ Value input1 = Pop(1, kWasmS128);
+ Value input0 = Pop(0, kWasmS128);
+ Value* result = Push(kWasmS128);
CALL_INTERFACE_IF_REACHABLE(Simd8x16ShuffleOp, imm, input0, input1,
result);
}
@@ -3075,8 +3293,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error("invalid simd opcode");
break;
}
- auto args = PopArgs(sig);
- auto* results =
+ ArgVector args = PopArgs(sig);
+ Value* results =
sig->return_count() == 0 ? nullptr : Push(GetReturnType(sig));
CALL_INTERFACE_IF_REACHABLE(SimdOp, opcode, VectorOf(args), results);
}
@@ -3091,29 +3309,66 @@ class WasmFullDecoder : public WasmDecoder<validate> {
StructIndexImmediate<validate> imm(this, this->pc_ + len);
len += imm.length;
if (!this->Validate(this->pc_, imm)) break;
- auto args = PopArgs(imm.struct_type);
- auto* value = Push(ValueType(ValueType::kRef, imm.index));
+ ArgVector args = PopArgs(imm.struct_type);
+ Value* value = Push(
+ ValueType::Ref(static_cast<HeapType>(imm.index), kNonNullable));
CALL_INTERFACE_IF_REACHABLE(StructNew, imm, args.begin(), value);
break;
}
case kExprStructGet: {
FieldIndexImmediate<validate> field(this, this->pc_ + len);
if (!this->Validate(this->pc_ + len, field)) break;
+ ValueType field_type =
+ field.struct_index.struct_type->field(field.index);
+ if (field_type.is_packed()) {
+ this->error(this->pc_,
+ "struct.get used with a field of packed type. "
+ "Use struct.get_s or struct.get_u instead.");
+ break;
+ }
len += field.length;
- auto struct_obj =
- Pop(0, ValueType(ValueType::kOptRef, field.struct_index.index));
- auto* value = Push(field.struct_index.struct_type->field(field.index));
- CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field, value);
+ Value struct_obj = Pop(
+ 0, ValueType::Ref(static_cast<HeapType>(field.struct_index.index),
+ kNullable));
+ Value* value = Push(field_type);
+ CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field, true, value);
+ break;
+ }
+ case kExprStructGetU:
+ case kExprStructGetS: {
+ FieldIndexImmediate<validate> field(this, this->pc_ + len);
+ if (!this->Validate(this->pc_ + len, field)) break;
+ len += field.length;
+ ValueType field_type =
+ field.struct_index.struct_type->field(field.index);
+ if (!field_type.is_packed()) {
+ this->errorf(this->pc_,
+ "%s is only valid for packed struct fields. "
+ "Use struct.get instead.",
+ WasmOpcodes::OpcodeName(opcode));
+ break;
+ }
+ Value struct_obj = Pop(
+ 0, ValueType::Ref(static_cast<HeapType>(field.struct_index.index),
+ kNullable));
+ Value* value = Push(field_type.Unpacked());
+ CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field,
+ opcode == kExprStructGetS, value);
break;
}
case kExprStructSet: {
FieldIndexImmediate<validate> field(this, this->pc_ + len);
if (!this->Validate(this->pc_ + len, field)) break;
len += field.length;
- auto field_value = Pop(
- 0, ValueType(field.struct_index.struct_type->field(field.index)));
- auto struct_obj =
- Pop(0, ValueType(ValueType::kOptRef, field.struct_index.index));
+ const StructType* struct_type = field.struct_index.struct_type;
+ if (!struct_type->mutability(field.index)) {
+ this->error(this->pc_, "setting immutable struct field");
+ break;
+ }
+ Value field_value = Pop(1, struct_type->field(field.index).Unpacked());
+ Value struct_obj = Pop(
+ 0, ValueType::Ref(static_cast<HeapType>(field.struct_index.index),
+ kNullable));
CALL_INTERFACE_IF_REACHABLE(StructSet, struct_obj, field, field_value);
break;
}
@@ -3121,31 +3376,66 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
len += imm.length;
if (!this->Validate(this->pc_, imm)) break;
- auto length = Pop(0, kWasmI32);
- auto initial_value = Pop(0, imm.array_type->element_type());
- auto* value = Push(ValueType(ValueType::kRef, imm.index));
+ Value length = Pop(1, kWasmI32);
+ Value initial_value = Pop(0, imm.array_type->element_type().Unpacked());
+ Value* value = Push(
+ ValueType::Ref(static_cast<HeapType>(imm.index), kNonNullable));
CALL_INTERFACE_IF_REACHABLE(ArrayNew, imm, length, initial_value,
value);
break;
}
+ case kExprArrayGetS:
+ case kExprArrayGetU: {
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
+ len += imm.length;
+ if (!this->Validate(this->pc_ + len, imm)) break;
+ if (!imm.array_type->element_type().is_packed()) {
+ this->errorf(this->pc_,
+ "%s is only valid for packed arrays. "
+ "Use or array.get instead.",
+ WasmOpcodes::OpcodeName(opcode));
+ break;
+ }
+ Value index = Pop(1, kWasmI32);
+ Value array_obj =
+ Pop(0, ValueType::Ref(static_cast<HeapType>(imm.index), kNullable));
+ Value* value = Push(imm.array_type->element_type().Unpacked());
+ // TODO(7748): Optimize this when array_obj is non-nullable ref.
+ CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index,
+ opcode == kExprArrayGetS, value);
+ break;
+ }
case kExprArrayGet: {
ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
len += imm.length;
if (!this->Validate(this->pc_ + len, imm)) break;
- auto index = Pop(0, kWasmI32);
- auto array_obj = Pop(0, ValueType(ValueType::kOptRef, imm.index));
- auto* value = Push(imm.array_type->element_type());
+ if (imm.array_type->element_type().is_packed()) {
+ this->error(this->pc_,
+ "array.get used with a field of packed type. "
+ "Use array.get_s or array.get_u instead.");
+ break;
+ }
+ Value index = Pop(1, kWasmI32);
+ Value array_obj =
+ Pop(0, ValueType::Ref(static_cast<HeapType>(imm.index), kNullable));
+ Value* value = Push(imm.array_type->element_type());
// TODO(7748): Optimize this when array_obj is non-nullable ref.
- CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index, value);
+ CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index, true,
+ value);
break;
}
case kExprArraySet: {
ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
len += imm.length;
if (!this->Validate(this->pc_ + len, imm)) break;
- auto value = Pop(0, imm.array_type->element_type());
- auto index = Pop(0, kWasmI32);
- auto array_obj = Pop(0, ValueType(ValueType::kOptRef, imm.index));
+ if (!imm.array_type->mutability()) {
+ this->error(this->pc_, "setting element of immutable array");
+ break;
+ }
+ Value value = Pop(2, imm.array_type->element_type().Unpacked());
+ Value index = Pop(1, kWasmI32);
+ Value array_obj =
+ Pop(0, ValueType::Ref(static_cast<HeapType>(imm.index), kNullable));
// TODO(7748): Optimize this when array_obj is non-nullable ref.
CALL_INTERFACE_IF_REACHABLE(ArraySet, array_obj, imm, index, value);
break;
@@ -3154,11 +3444,22 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
len += imm.length;
if (!this->Validate(this->pc_ + len, imm)) break;
- auto array_obj = Pop(0, ValueType(ValueType::kOptRef, imm.index));
- auto* value = Push(kWasmI32);
+ Value array_obj =
+ Pop(0, ValueType::Ref(static_cast<HeapType>(imm.index), kNullable));
+ Value* value = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(ArrayLen, array_obj, value);
break;
}
+ case kExprRttCanon: {
+ // TODO(7748): Introduce HeapTypeImmediate and use that here.
+ TypeIndexImmediate<validate> imm(this, this->pc_ + len);
+ len += imm.length;
+ if (!this->Validate(this->pc_ + len, imm)) break;
+ Value* value =
+ Push(ValueType::Rtt(static_cast<HeapType>(imm.index), 1));
+ CALL_INTERFACE_IF_REACHABLE(RttCanon, imm, value);
+ break;
+ }
default:
this->error("invalid gc opcode");
return 0;
@@ -3209,8 +3510,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryAccessImmediate<validate> imm(
this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
len += imm.length;
- auto args = PopArgs(sig);
- auto result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
+ ArgVector args = PopArgs(sig);
+ Value* result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm, result);
return len;
}
@@ -3234,9 +3535,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryInitImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
- auto size = Pop(2, sig->GetParam(2));
- auto src = Pop(1, sig->GetParam(1));
- auto dst = Pop(0, sig->GetParam(0));
+ Value size = Pop(2, sig->GetParam(2));
+ Value src = Pop(1, sig->GetParam(1));
+ Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryInit, imm, dst, src, size);
break;
}
@@ -3251,9 +3552,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryCopyImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
- auto size = Pop(2, sig->GetParam(2));
- auto src = Pop(1, sig->GetParam(1));
- auto dst = Pop(0, sig->GetParam(0));
+ Value size = Pop(2, sig->GetParam(2));
+ Value src = Pop(1, sig->GetParam(1));
+ Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryCopy, imm, dst, src, size);
break;
}
@@ -3261,9 +3562,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) break;
len += imm.length;
- auto size = Pop(2, sig->GetParam(2));
- auto value = Pop(1, sig->GetParam(1));
- auto dst = Pop(0, sig->GetParam(0));
+ Value size = Pop(2, sig->GetParam(2));
+ Value value = Pop(1, sig->GetParam(1));
+ Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryFill, imm, dst, value, size);
break;
}
@@ -3271,7 +3572,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableInitImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
- auto args = PopArgs(sig);
+ ArgVector args = PopArgs(sig);
CALL_INTERFACE_IF_REACHABLE(TableInit, imm, VectorOf(args));
break;
}
@@ -3286,7 +3587,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableCopyImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
- auto args = PopArgs(sig);
+ ArgVector args = PopArgs(sig);
CALL_INTERFACE_IF_REACHABLE(TableCopy, imm, VectorOf(args));
break;
}
@@ -3294,9 +3595,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_, imm)) break;
len += imm.length;
- auto delta = Pop(1, sig->GetParam(1));
- auto value = Pop(0, this->module_->tables[imm.index].type);
- auto* result = Push(kWasmI32);
+ Value delta = Pop(1, sig->GetParam(1));
+ Value value = Pop(0, this->module_->tables[imm.index].type);
+ Value* result = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(TableGrow, imm, value, delta, result);
break;
}
@@ -3304,7 +3605,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_, imm)) break;
len += imm.length;
- auto* result = Push(kWasmI32);
+ Value* result = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(TableSize, imm, result);
break;
}
@@ -3312,9 +3613,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_, imm)) break;
len += imm.length;
- auto count = Pop(2, sig->GetParam(2));
- auto value = Pop(1, this->module_->tables[imm.index].type);
- auto start = Pop(0, sig->GetParam(0));
+ Value count = Pop(2, sig->GetParam(2));
+ Value value = Pop(1, this->module_->tables[imm.index].type);
+ Value start = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(TableFill, imm, start, value, count);
break;
}
@@ -3330,6 +3631,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
void DoReturn() {
size_t return_count = this->sig_->return_count();
+ if (return_count > 1) {
+ this->detected_->Add(kFeature_mv);
+ }
DCHECK_GE(stack_.size(), return_count);
Vector<Value> return_values =
return_count == 0
@@ -3370,12 +3674,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
V8_INLINE Value Pop(int index, ValueType expected) {
- auto val = Pop();
- if (!VALIDATE(val.type.IsSubTypeOf(expected) || val.type == kWasmBottom ||
- expected == kWasmBottom)) {
+ Value val = Pop();
+ if (!VALIDATE(IsSubtypeOf(val.type, expected, this->module_) ||
+ val.type == kWasmBottom || expected == kWasmBottom)) {
this->errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
- SafeOpcodeNameAt(this->pc_), index, expected.type_name(),
- SafeOpcodeNameAt(val.pc), val.type.type_name());
+ SafeOpcodeNameAt(this->pc_), index,
+ expected.type_name().c_str(), SafeOpcodeNameAt(val.pc),
+ val.type.type_name().c_str());
}
return val;
}
@@ -3391,7 +3696,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
return UnreachableValue(this->pc_);
}
- auto val = stack_.back();
+ Value val = stack_.back();
stack_.pop_back();
return val;
}
@@ -3435,9 +3740,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (uint32_t i = 0; i < merge->arity; ++i) {
Value& val = stack_values[i];
Value& old = (*merge)[i];
- if (!val.type.IsSubTypeOf(old.type)) {
+ if (!IsSubtypeOf(val.type, old.type, this->module_)) {
this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
- i, old.type.type_name(), val.type.type_name());
+ i, old.type.type_name().c_str(),
+ val.type.type_name().c_str());
return false;
}
}
@@ -3452,9 +3758,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (uint32_t i = 0; i < c->start_merge.arity; ++i) {
Value& start = c->start_merge[i];
Value& end = c->end_merge[i];
- if (!start.type.IsSubTypeOf(end.type)) {
+ if (!IsSubtypeOf(start.type, end.type, this->module_)) {
this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
- i, end.type.type_name(), start.type.type_name());
+ i, end.type.type_name().c_str(),
+ start.type.type_name().c_str());
return false;
}
}
@@ -3463,7 +3770,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
bool TypeCheckFallThru() {
- static_assert(validate, "Call this function only whithin VALIDATE");
+ static_assert(validate, "Call this function only within VALIDATE");
Control& c = control_.back();
if (V8_LIKELY(c.reachable())) {
uint32_t expected = c.end_merge.arity;
@@ -3554,12 +3861,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// This line requires num_returns > 0.
Value* stack_values = &*(stack_.end() - num_returns);
for (int i = 0; i < num_returns; ++i) {
- auto& val = stack_values[i];
+ Value& val = stack_values[i];
ValueType expected_type = this->sig_->GetReturn(i);
- if (!val.type.IsSubTypeOf(expected_type)) {
- this->errorf(this->pc_,
- "type error in return[%u] (expected %s, got %s)", i,
- expected_type.type_name(), val.type.type_name());
+ if (!IsSubtypeOf(val.type, expected_type, this->module_)) {
+ this->errorf(
+ this->pc_, "type error in return[%u] (expected %s, got %s)", i,
+ expected_type.type_name().c_str(), val.type.type_name().c_str());
return false;
}
}
@@ -3568,14 +3875,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
void onFirstError() override {
this->end_ = this->pc_; // Terminate decoding loop.
+ this->current_code_reachable_ = false;
TRACE(" !%s\n", this->error_.message().c_str());
CALL_INTERFACE(OnFirstError);
}
void BuildSimplePrototypeOperator(WasmOpcode opcode) {
- if (opcode == kExprRefIsNull) {
- RET_ON_PROTOTYPE_OPCODE(anyref);
- } else if (opcode == kExprRefEq) {
+ if (opcode == kExprRefEq) {
RET_ON_PROTOTYPE_OPCODE(gc);
}
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
@@ -3583,39 +3889,28 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
void BuildSimpleOperator(WasmOpcode opcode, const FunctionSig* sig) {
- switch (sig->parameter_count()) {
- case 1: {
- auto val = Pop(0, sig->GetParam(0));
- auto* ret =
- sig->return_count() == 0 ? nullptr : Push(sig->GetReturn(0));
- CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, val, ret);
- break;
- }
- case 2: {
- auto rval = Pop(1, sig->GetParam(1));
- auto lval = Pop(0, sig->GetParam(0));
- auto* ret =
- sig->return_count() == 0 ? nullptr : Push(sig->GetReturn(0));
- CALL_INTERFACE_IF_REACHABLE(BinOp, opcode, lval, rval, ret);
- break;
- }
- default:
- UNREACHABLE();
+ DCHECK_GE(1, sig->return_count());
+ ValueType ret = sig->return_count() == 0 ? kWasmStmt : sig->GetReturn(0);
+ if (sig->parameter_count() == 1) {
+ BuildSimpleOperator(opcode, ret, sig->GetParam(0));
+ } else {
+ DCHECK_EQ(2, sig->parameter_count());
+ BuildSimpleOperator(opcode, ret, sig->GetParam(0), sig->GetParam(1));
}
}
void BuildSimpleOperator(WasmOpcode opcode, ValueType return_type,
ValueType arg_type) {
- auto val = Pop(0, arg_type);
- auto* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
+ Value val = Pop(0, arg_type);
+ Value* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, val, ret);
}
void BuildSimpleOperator(WasmOpcode opcode, ValueType return_type,
ValueType lhs_type, ValueType rhs_type) {
- auto rval = Pop(1, rhs_type);
- auto lval = Pop(0, lhs_type);
- auto* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
+ Value rval = Pop(1, rhs_type);
+ Value lval = Pop(0, lhs_type);
+ Value* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
CALL_INTERFACE_IF_REACHABLE(BinOp, opcode, lval, rval, ret);
}
diff --git a/chromium/v8/src/wasm/function-body-decoder.cc b/chromium/v8/src/wasm/function-body-decoder.cc
index 8b2b027b13a..a69d4166959 100644
--- a/chromium/v8/src/wasm/function-body-decoder.cc
+++ b/chromium/v8/src/wasm/function-body-decoder.cc
@@ -13,7 +13,7 @@
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-opcodes-inl.h"
namespace v8 {
namespace internal {
@@ -21,14 +21,24 @@ namespace wasm {
bool DecodeLocalDecls(const WasmFeatures& enabled, BodyLocalDecls* decls,
const byte* start, const byte* end) {
- Decoder decoder(start, end);
- if (WasmDecoder<Decoder::kValidate>::DecodeLocals(enabled, &decoder, nullptr,
- &decls->type_list)) {
+ WasmFeatures no_features = WasmFeatures::None();
+ WasmDecoder<Decoder::kValidate> decoder(nullptr, enabled, &no_features,
+ nullptr, start, end, 0);
+ // The decoded functions need to be inserted into &decls->type_list,
+ // so we pass a pointer to it to local_types_ which will be updated
+ // in DecodeLocals.
+ decoder.local_types_ = &decls->type_list;
+ uint32_t length;
+ if (decoder.DecodeLocals(
+ decoder.pc(), &length,
+ static_cast<uint32_t>(decoder.local_types_->size()))) {
DCHECK(decoder.ok());
- decls->encoded_size = decoder.pc_offset();
+ decls->encoded_size = length;
return true;
+ } else {
+ decls->encoded_size = 0;
+ return false;
}
- return false;
}
BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
@@ -54,7 +64,9 @@ DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
}
unsigned OpcodeLength(const byte* pc, const byte* end) {
- Decoder decoder(pc, end);
+ WasmFeatures no_features = WasmFeatures::None();
+ WasmDecoder<Decoder::kNoValidate> decoder(nullptr, no_features, &no_features,
+ nullptr, pc, end, 0);
return WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, pc);
}
@@ -164,8 +176,10 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
unsigned offset = 1;
WasmOpcode opcode = i.current();
- if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- os << PrefixName(opcode) << ", ";
+ WasmOpcode prefix = kExprUnreachable;
+ bool has_prefix = WasmOpcodes::IsPrefixOpcode(opcode);
+ if (has_prefix) {
+ prefix = i.current();
opcode = i.prefixed_opcode();
offset = 2;
}
@@ -181,6 +195,10 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
" ";
os.write(padding, num_whitespaces);
+ if (has_prefix) {
+ os << PrefixName(prefix) << ", ";
+ }
+
os << RawOpcodeName(opcode) << ",";
if (opcode == kExprLoop || opcode == kExprIf || opcode == kExprBlock ||
@@ -283,7 +301,9 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
const byte* start, const byte* end) {
- Decoder decoder(start, end);
+ WasmFeatures no_features = WasmFeatures::None();
+ WasmDecoder<Decoder::kValidate> decoder(nullptr, no_features, &no_features,
+ nullptr, start, end, 0);
return WasmDecoder<Decoder::kValidate>::AnalyzeLoopAssignment(
&decoder, start, static_cast<uint32_t>(num_locals), zone);
}
diff --git a/chromium/v8/src/wasm/function-body-decoder.h b/chromium/v8/src/wasm/function-body-decoder.h
index 4fab50817ca..2e14d844fa6 100644
--- a/chromium/v8/src/wasm/function-body-decoder.h
+++ b/chromium/v8/src/wasm/function-body-decoder.h
@@ -34,6 +34,8 @@ struct FunctionBody {
: sig(sig), offset(offset), start(start), end(end) {}
};
+enum class LoadTransformationKind : uint8_t { kSplat, kExtend };
+
V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmFeatures& enabled,
const WasmModule* module,
@@ -80,9 +82,10 @@ V8_EXPORT_PRIVATE unsigned OpcodeLength(const byte* pc, const byte* end);
// Be cautious with control opcodes: This function only covers their immediate,
// local stack effect (e.g. BrIf pops 1, Br pops 0). Those opcodes can have
// non-local stack effect though, which are not covered here.
-std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
- const FunctionSig* sig,
- const byte* pc, const byte* end);
+// TODO(clemensb): This is only used by the interpreter; move there.
+V8_EXPORT_PRIVATE std::pair<uint32_t, uint32_t> StackEffect(
+ const WasmModule* module, const FunctionSig* sig, const byte* pc,
+ const byte* end);
// A simple forward iterator for bytecodes.
class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
diff --git a/chromium/v8/src/wasm/function-compiler.cc b/chromium/v8/src/wasm/function-compiler.cc
index 6b25520d84f..e268667d287 100644
--- a/chromium/v8/src/wasm/function-compiler.cc
+++ b/chromium/v8/src/wasm/function-compiler.cc
@@ -48,7 +48,7 @@ class WasmInstructionBufferImpl {
DCHECK_LT(size(), new_size);
holder_->old_buffer_ = std::move(holder_->buffer_);
- holder_->buffer_ = OwnedVector<uint8_t>::New(new_size);
+ holder_->buffer_ = OwnedVector<uint8_t>::NewForOverwrite(new_size);
return std::make_unique<View>(holder_->buffer_.as_vector(), holder_);
}
@@ -58,7 +58,7 @@ class WasmInstructionBufferImpl {
};
explicit WasmInstructionBufferImpl(size_t size)
- : buffer_(OwnedVector<uint8_t>::New(size)) {}
+ : buffer_(OwnedVector<uint8_t>::NewForOverwrite(size)) {}
std::unique_ptr<AssemblerBuffer> CreateView() {
DCHECK_NOT_NULL(buffer_);
@@ -278,7 +278,8 @@ JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(
JSToWasmWrapperCompilationUnit::~JSToWasmWrapperCompilationUnit() = default;
void JSToWasmWrapperCompilationUnit::Execute() {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "CompileJSToWasmWrapper");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.CompileJSToWasmWrapper");
CompilationJob::Status status = job_->ExecuteJob(nullptr);
CHECK_EQ(status, CompilationJob::SUCCEEDED);
}
diff --git a/chromium/v8/src/wasm/function-compiler.h b/chromium/v8/src/wasm/function-compiler.h
index c66c748064a..27fd54eb3b2 100644
--- a/chromium/v8/src/wasm/function-compiler.h
+++ b/chromium/v8/src/wasm/function-compiler.h
@@ -31,6 +31,7 @@ struct WasmFunction;
class WasmInstructionBuffer final {
public:
+ WasmInstructionBuffer() = delete;
~WasmInstructionBuffer();
std::unique_ptr<AssemblerBuffer> CreateView();
std::unique_ptr<uint8_t[]> ReleaseBuffer();
@@ -44,7 +45,6 @@ class WasmInstructionBuffer final {
void operator delete(void* ptr) { ::operator delete(ptr); }
private:
- WasmInstructionBuffer() = delete;
DISALLOW_COPY_AND_ASSIGN(WasmInstructionBuffer);
};
diff --git a/chromium/v8/src/wasm/graph-builder-interface.cc b/chromium/v8/src/wasm/graph-builder-interface.cc
index 5d23dbf1836..dc8cbf20f05 100644
--- a/chromium/v8/src/wasm/graph-builder-interface.cc
+++ b/chromium/v8/src/wasm/graph-builder-interface.cc
@@ -16,7 +16,7 @@
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-opcodes-inl.h"
namespace v8 {
namespace internal {
@@ -28,18 +28,33 @@ namespace {
// as well as the current effect and control dependency in the TF graph.
// It maintains a control state that tracks whether the environment
// is reachable, has reached a control end, or has been merged.
-struct SsaEnv {
+struct SsaEnv : public ZoneObject {
enum State { kControlEnd, kUnreachable, kReached, kMerged };
State state;
TFNode* control;
TFNode* effect;
compiler::WasmInstanceCacheNodes instance_cache;
- TFNode** locals;
+ ZoneVector<TFNode*> locals;
+
+ SsaEnv(Zone* zone, State state, TFNode* control, TFNode* effect,
+ uint32_t locals_size)
+ : state(state), control(control), effect(effect), locals(zone) {
+ if (locals_size > 0) locals.resize(locals_size);
+ }
+
+ SsaEnv(const SsaEnv& other) V8_NOEXCEPT = default;
+ SsaEnv(SsaEnv&& other) V8_NOEXCEPT : state(other.state),
+ control(other.control),
+ effect(other.effect),
+ instance_cache(other.instance_cache),
+ locals(std::move(other.locals)) {
+ other.Kill(kUnreachable);
+ }
void Kill(State new_state = kControlEnd) {
state = new_state;
- locals = nullptr;
+ locals.clear();
control = nullptr;
effect = nullptr;
instance_cache = {};
@@ -98,22 +113,14 @@ class WasmGraphBuildingInterface {
: builder_(builder) {}
void StartFunction(FullDecoder* decoder) {
- SsaEnv* ssa_env =
- reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- uint32_t num_locals = decoder->num_locals();
- uint32_t env_count = num_locals;
- size_t size = sizeof(TFNode*) * env_count;
- ssa_env->state = SsaEnv::kReached;
- ssa_env->locals =
- size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
- : nullptr;
-
// The first '+ 1' is needed by TF Start node, the second '+ 1' is for the
// instance parameter.
TFNode* start = builder_->Start(
static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
- ssa_env->effect = start;
- ssa_env->control = start;
+ uint32_t num_locals = decoder->num_locals();
+ SsaEnv* ssa_env = new (decoder->zone())
+ SsaEnv(decoder->zone(), SsaEnv::kReached, start, start, num_locals);
+
// Initialize effect and control before initializing the locals default
// values (which might require instance loads) or loading the context.
builder_->SetEffectControl(start);
@@ -135,6 +142,8 @@ class WasmGraphBuildingInterface {
}
SetEnv(ssa_env);
LoadContextIntoSsa(ssa_env);
+
+ if (FLAG_trace_wasm) BUILD(TraceFunctionEntry, decoder->position());
}
// Reload the instance cache entries into the Ssa Environment.
@@ -174,7 +183,7 @@ class WasmGraphBuildingInterface {
void Try(FullDecoder* decoder, Control* block) {
SsaEnv* outer_env = ssa_env_;
- SsaEnv* catch_env = Split(decoder, outer_env);
+ SsaEnv* catch_env = Split(decoder->zone(), outer_env);
// Mark catch environment as unreachable, since only accessable
// through catch unwinding (i.e. landing pads).
catch_env->state = SsaEnv::kUnreachable;
@@ -192,7 +201,7 @@ class WasmGraphBuildingInterface {
TFNode* if_false = nullptr;
BUILD(BranchNoHint, cond.node, &if_true, &if_false);
SsaEnv* end_env = ssa_env_;
- SsaEnv* false_env = Split(decoder, ssa_env_);
+ SsaEnv* false_env = Split(decoder->zone(), ssa_env_);
false_env->control = if_false;
SsaEnv* true_env = Steal(decoder->zone(), ssa_env_);
true_env->control = if_true;
@@ -232,7 +241,8 @@ class WasmGraphBuildingInterface {
void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
const Value& rhs, Value* result) {
- auto node = BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
+ TFNode* node =
+ BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
if (result) result->node = node;
}
@@ -269,28 +279,41 @@ class WasmGraphBuildingInterface {
void DoReturn(FullDecoder* decoder, Vector<Value> values) {
base::SmallVector<TFNode*, 8> nodes(values.size());
GetNodes(nodes.begin(), values);
+ if (FLAG_trace_wasm) {
+ BUILD(TraceFunctionExit, VectorOf(nodes), decoder->position());
+ }
BUILD(Return, VectorOf(nodes));
}
void LocalGet(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
- if (!ssa_env_->locals) return; // unreachable
result->node = ssa_env_->locals[imm.index];
}
void LocalSet(FullDecoder* decoder, const Value& value,
const LocalIndexImmediate<validate>& imm) {
- if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[imm.index] = value.node;
}
void LocalTee(FullDecoder* decoder, const Value& value, Value* result,
const LocalIndexImmediate<validate>& imm) {
result->node = value.node;
- if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[imm.index] = value.node;
}
+ void AllocateLocals(FullDecoder* decoder, Vector<Value> local_values) {
+ ZoneVector<TFNode*>* locals = &ssa_env_->locals;
+ locals->insert(locals->begin(), local_values.size(), nullptr);
+ for (uint32_t i = 0; i < local_values.size(); i++) {
+ (*locals)[i] = local_values[i].node;
+ }
+ }
+
+ void DeallocateLocals(FullDecoder* decoder, uint32_t count) {
+ ZoneVector<TFNode*>* locals = &ssa_env_->locals;
+ locals->erase(locals->begin(), locals->begin() + count);
+ }
+
void GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
result->node = BUILD(GlobalGet, imm.index);
@@ -345,7 +368,7 @@ class WasmGraphBuildingInterface {
void BrIf(FullDecoder* decoder, const Value& cond, uint32_t depth) {
SsaEnv* fenv = ssa_env_;
- SsaEnv* tenv = Split(decoder, fenv);
+ SsaEnv* tenv = Split(decoder->zone(), fenv);
fenv->SetNotMerged();
BUILD(BranchNoHint, cond.node, &tenv->control, &fenv->control);
builder_->SetControl(fenv->control);
@@ -373,7 +396,7 @@ class WasmGraphBuildingInterface {
while (iterator.has_next()) {
uint32_t i = iterator.cur_index();
uint32_t target = iterator.next();
- SetEnv(Split(decoder, copy));
+ SetEnv(Split(decoder->zone(), copy));
builder_->SetControl(i == imm.table_count ? BUILD(IfDefault, sw)
: BUILD(IfValue, i, sw));
BrOrRet(decoder, target);
@@ -452,7 +475,7 @@ class WasmGraphBuildingInterface {
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
SsaEnv* non_null_env = ssa_env_;
- SsaEnv* null_env = Split(decoder, non_null_env);
+ SsaEnv* null_env = Split(decoder->zone(), non_null_env);
non_null_env->SetNotMerged();
BUILD(BrOnNull, ref_object.node, &null_env->control,
&non_null_env->control);
@@ -514,7 +537,7 @@ class WasmGraphBuildingInterface {
TFNode* exception_tag = BUILD(LoadExceptionTagFromTable, imm.index);
TFNode* compare = BUILD(ExceptionTagEqual, caught_tag, exception_tag);
BUILD(BranchNoHint, compare, &if_match, &if_no_match);
- SsaEnv* if_no_match_env = Split(decoder, ssa_env_);
+ SsaEnv* if_no_match_env = Split(decoder->zone(), ssa_env_);
SsaEnv* if_match_env = Steal(decoder->zone(), ssa_env_);
if_no_match_env->control = if_no_match;
if_match_env->control = if_match;
@@ -536,6 +559,7 @@ class WasmGraphBuildingInterface {
void Catch(FullDecoder* decoder, Control* block, Value* exception) {
DCHECK(block->is_try_catch());
+ DCHECK_EQ(decoder->control_at(0), block);
current_catch_ = block->previous_catch; // Pop try scope.
@@ -543,7 +567,7 @@ class WasmGraphBuildingInterface {
// exist. We only build a landing pad if some node in the try block can
// (possibly) throw. Otherwise the catch environments remain empty.
if (!block->try_info->might_throw()) {
- block->reachability = kSpecOnlyReachable;
+ decoder->SetSucceedingCodeDynamicallyUnreachable();
return;
}
@@ -630,14 +654,15 @@ class WasmGraphBuildingInterface {
}
void StructGet(FullDecoder* decoder, const Value& struct_object,
- const FieldIndexImmediate<validate>& field, Value* result) {
+ const FieldIndexImmediate<validate>& field, bool is_signed,
+ Value* result) {
using CheckForNull = compiler::WasmGraphBuilder::CheckForNull;
CheckForNull null_check = struct_object.type.kind() == ValueType::kRef
? CheckForNull::kWithoutNullCheck
: CheckForNull::kWithNullCheck;
result->node =
BUILD(StructGet, struct_object.node, field.struct_index.struct_type,
- field.index, null_check, decoder->position());
+ field.index, null_check, is_signed, decoder->position());
}
void StructSet(FullDecoder* decoder, const Value& struct_object,
@@ -660,9 +685,9 @@ class WasmGraphBuildingInterface {
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
const ArrayIndexImmediate<validate>& imm, const Value& index,
- Value* result) {
+ bool is_signed, Value* result) {
result->node = BUILD(ArrayGet, array_obj.node, imm.array_type, index.node,
- decoder->position());
+ is_signed, decoder->position());
}
void ArraySet(FullDecoder* decoder, const Value& array_obj,
@@ -676,6 +701,11 @@ class WasmGraphBuildingInterface {
result->node = BUILD(ArrayLen, array_obj.node, decoder->position());
}
+ void RttCanon(FullDecoder* decoder, const TypeIndexImmediate<validate>& imm,
+ Value* result) {
+ result->node = BUILD(RttCanon, imm.index);
+ }
+
void PassThrough(FullDecoder* decoder, const Value& from, Value* to) {
to->node = from.node;
}
@@ -755,7 +785,7 @@ class WasmGraphBuildingInterface {
SsaEnv* success_env = Steal(decoder->zone(), ssa_env_);
success_env->control = if_success;
- SsaEnv* exception_env = Split(decoder, success_env);
+ SsaEnv* exception_env = Split(decoder->zone(), success_env);
exception_env->control = if_exception;
exception_env->effect = if_exception;
SetEnv(exception_env);
@@ -777,6 +807,8 @@ class WasmGraphBuildingInterface {
TFNode* DefaultValue(ValueType type) {
switch (type.kind()) {
+ case ValueType::kI8:
+ case ValueType::kI16:
case ValueType::kI32:
return builder_->Int32Constant(0);
case ValueType::kI64:
@@ -787,14 +819,12 @@ class WasmGraphBuildingInterface {
return builder_->Float64Constant(0);
case ValueType::kS128:
return builder_->S128Zero();
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef:
case ValueType::kOptRef:
- case ValueType::kEqRef:
return builder_->RefNull();
- default:
+ case ValueType::kRtt:
+ case ValueType::kStmt:
+ case ValueType::kBottom:
+ case ValueType::kRef:
UNREACHABLE();
}
}
@@ -920,7 +950,7 @@ class WasmGraphBuildingInterface {
control());
}
- SetEnv(Split(decoder, ssa_env_));
+ SetEnv(Split(decoder->zone(), ssa_env_));
builder_->StackCheck(decoder->position());
return;
}
@@ -934,32 +964,19 @@ class WasmGraphBuildingInterface {
// Conservatively introduce phis for instance cache.
builder_->PrepareInstanceCacheForLoop(&ssa_env_->instance_cache, control());
- SetEnv(Split(decoder, ssa_env_));
+ SetEnv(Split(decoder->zone(), ssa_env_));
builder_->StackCheck(decoder->position());
}
// Create a complete copy of {from}.
- SsaEnv* Split(FullDecoder* decoder, SsaEnv* from) {
+ SsaEnv* Split(Zone* zone, SsaEnv* from) {
DCHECK_NOT_NULL(from);
if (from == ssa_env_) {
ssa_env_->control = control();
ssa_env_->effect = effect();
}
- SsaEnv* result =
- reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- size_t size = sizeof(TFNode*) * decoder->num_locals();
- result->control = from->control;
- result->effect = from->effect;
-
+ SsaEnv* result = new (zone) SsaEnv(*from);
result->state = SsaEnv::kReached;
- if (size > 0) {
- result->locals = reinterpret_cast<TFNode**>(decoder->zone()->New(size));
- memcpy(result->locals, from->locals, size);
- } else {
- result->locals = nullptr;
- }
- result->instance_cache = from->instance_cache;
-
return result;
}
@@ -971,25 +988,14 @@ class WasmGraphBuildingInterface {
ssa_env_->control = control();
ssa_env_->effect = effect();
}
- SsaEnv* result = reinterpret_cast<SsaEnv*>(zone->New(sizeof(SsaEnv)));
+ SsaEnv* result = new (zone) SsaEnv(std::move(*from));
result->state = SsaEnv::kReached;
- result->locals = from->locals;
- result->control = from->control;
- result->effect = from->effect;
- result->instance_cache = from->instance_cache;
- from->Kill(SsaEnv::kUnreachable);
return result;
}
// Create an unreachable environment.
SsaEnv* UnreachableEnv(Zone* zone) {
- SsaEnv* result = reinterpret_cast<SsaEnv*>(zone->New(sizeof(SsaEnv)));
- result->state = SsaEnv::kUnreachable;
- result->control = nullptr;
- result->effect = nullptr;
- result->locals = nullptr;
- result->instance_cache = {};
- return result;
+ return new (zone) SsaEnv(zone, SsaEnv::kUnreachable, nullptr, nullptr, 0);
}
void DoCall(FullDecoder* decoder, uint32_t table_index, TFNode* index_node,
diff --git a/chromium/v8/src/wasm/local-decl-encoder.cc b/chromium/v8/src/wasm/local-decl-encoder.cc
index 257f384bef3..aea6e573e9b 100644
--- a/chromium/v8/src/wasm/local-decl-encoder.cc
+++ b/chromium/v8/src/wasm/local-decl-encoder.cc
@@ -28,11 +28,17 @@ size_t LocalDeclEncoder::Emit(byte* buffer) const {
byte* pos = buffer;
LEBHelper::write_u32v(&pos, static_cast<uint32_t>(local_decls.size()));
for (auto& local_decl : local_decls) {
- LEBHelper::write_u32v(&pos, local_decl.first);
- *pos = local_decl.second.value_type_code();
+ uint32_t locals_count = local_decl.first;
+ ValueType locals_type = local_decl.second;
+ LEBHelper::write_u32v(&pos, locals_count);
+ *pos = locals_type.value_type_code();
++pos;
- if (local_decl.second.has_immediate()) {
- LEBHelper::write_u32v(&pos, local_decl.second.ref_index());
+ if (locals_type.has_depth()) {
+ *pos = locals_type.depth();
+ ++pos;
+ }
+ if (locals_type.encoding_needs_heap_type()) {
+ LEBHelper::write_u32v(&pos, locals_type.heap_type_code());
}
}
DCHECK_EQ(Size(), pos - buffer);
@@ -56,11 +62,12 @@ uint32_t LocalDeclEncoder::AddLocals(uint32_t count, ValueType type) {
size_t LocalDeclEncoder::Size() const {
size_t size = LEBHelper::sizeof_u32v(local_decls.size());
for (auto p : local_decls) {
- size +=
- LEBHelper::sizeof_u32v(p.first) + // number of locals
- 1 + // Opcode
- (p.second.has_immediate() ? LEBHelper::sizeof_u32v(p.second.ref_index())
- : 0); // immediate
+ size += LEBHelper::sizeof_u32v(p.first) + // number of locals
+ 1 + // Opcode
+ (p.second.has_depth() ? 1 : 0) + // Inheritance depth
+ (p.second.encoding_needs_heap_type()
+ ? LEBHelper::sizeof_u32v(p.second.heap_type_code())
+ : 0); // ref. index
}
return size;
}
diff --git a/chromium/v8/src/wasm/memory-tracing.h b/chromium/v8/src/wasm/memory-tracing.h
index 15457399c17..9ea605b3563 100644
--- a/chromium/v8/src/wasm/memory-tracing.h
+++ b/chromium/v8/src/wasm/memory-tracing.h
@@ -30,8 +30,10 @@ struct MemoryTracingInfo {
// Callback for tracing a memory operation for debugging.
// Triggered by --wasm-trace-memory.
-void TraceMemoryOperation(ExecutionTier, const MemoryTracingInfo* info,
- int func_index, int position, uint8_t* mem_start);
+V8_EXPORT_PRIVATE void TraceMemoryOperation(ExecutionTier,
+ const MemoryTracingInfo* info,
+ int func_index, int position,
+ uint8_t* mem_start);
} // namespace wasm
} // namespace internal
diff --git a/chromium/v8/src/wasm/module-compiler.cc b/chromium/v8/src/wasm/module-compiler.cc
index 9f6e91c73ea..94cc15cb11b 100644
--- a/chromium/v8/src/wasm/module-compiler.cc
+++ b/chromium/v8/src/wasm/module-compiler.cc
@@ -627,18 +627,26 @@ void BackgroundCompileToken::PublishCode(
NativeModule* native_module, Vector<std::unique_ptr<WasmCode>> code) {
WasmCodeRefScope code_ref_scope;
std::vector<WasmCode*> published_code = native_module->PublishCode(code);
- native_module->engine()->LogCode(VectorOf(published_code));
+ // Defer logging code in case wire bytes were not fully received yet.
+ if (native_module->HasWireBytes()) {
+ native_module->engine()->LogCode(VectorOf(published_code));
+ }
Impl(native_module->compilation_state())
->OnFinishedUnits(VectorOf(published_code));
}
void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
- if (detected.has_threads()) {
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmThreadOpcodes);
- }
- if (detected.has_simd()) {
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmSimdOpcodes);
+ using Feature = v8::Isolate::UseCounterFeature;
+ constexpr static std::pair<WasmFeature, Feature> kUseCounters[] = {
+ {kFeature_reftypes, Feature::kWasmRefTypes},
+ {kFeature_bulk_memory, Feature::kWasmBulkMemory},
+ {kFeature_mv, Feature::kWasmMultiValue},
+ {kFeature_simd, Feature::kWasmSimdOpcodes},
+ {kFeature_threads, Feature::kWasmThreadOpcodes}};
+
+ for (auto& feature : kUseCounters) {
+ if (detected.contains(feature.first)) isolate->CountUsage(feature.second);
}
}
@@ -802,6 +810,9 @@ class CompilationUnitBuilder {
ExecutionTierPair tiers = GetRequestedExecutionTiers(
native_module_->module(), compilation_state()->compile_mode(),
native_module_->enabled_features(), func_index);
+ // Compile everything for non-debugging initially. If needed, we will tier
+ // down when the module is fully compiled. Synchronization would be pretty
+ // difficult otherwise.
baseline_units_.emplace_back(func_index, tiers.baseline_tier, kNoDebugging);
if (tiers.baseline_tier != tiers.top_tier) {
tiering_units_.emplace_back(func_index, tiers.top_tier, kNoDebugging);
@@ -1038,15 +1049,13 @@ bool ExecuteJSToWasmWrapperCompilationUnits(
return true;
}
-bool NeedsDeterministicCompile() { return FLAG_single_threaded; }
-
// Run by the main thread and background tasks to take part in compilation.
// Returns whether any units were executed.
bool ExecuteCompilationUnits(
const std::shared_ptr<BackgroundCompileToken>& token, Counters* counters,
int task_id, CompileBaselineOnly baseline_only) {
TRACE_COMPILE("Compiling (task %d)...\n", task_id);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ExecuteCompilationUnits");
+ TRACE_EVENT0("v8.wasm", "wasm.ExecuteCompilationUnits");
// Execute JS to Wasm wrapper units first, so that they are ready to be
// finalized by the main thread when the kFinishedBaselineCompilation event is
@@ -1067,7 +1076,6 @@ bool ExecuteCompilationUnits(
// These fields are initialized in a {BackgroundCompileScope} before
// starting compilation.
double deadline = 0;
- const bool deterministic = NeedsDeterministicCompile();
base::Optional<CompilationEnv> env;
std::shared_ptr<WireBytesStorage> wire_bytes;
std::shared_ptr<const WasmModule> module;
@@ -1108,8 +1116,9 @@ bool ExecuteCompilationUnits(
auto publish_results = [&results_to_publish](
BackgroundCompileScope* compile_scope) {
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "PublishResults",
- "num_results", results_to_publish.size());
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.PublishCompilationResults", "num_results",
+ results_to_publish.size());
if (results_to_publish.empty()) return;
std::vector<std::unique_ptr<WasmCode>> unpublished_code =
compile_scope->native_module()->AddCompiledCode(
@@ -1161,7 +1170,8 @@ bool ExecuteCompilationUnits(
}
// Get next unit.
- if (deterministic || deadline < platform->MonotonicallyIncreasingTime()) {
+ if (FLAG_predictable ||
+ deadline < platform->MonotonicallyIncreasingTime()) {
unit = {};
} else {
unit = compile_scope.compilation_state()->GetNextCompilationUnit(
@@ -1419,9 +1429,15 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
Handle<FixedArray>* export_wrappers_out) {
const WasmModule* wasm_module = module.get();
+ OwnedVector<uint8_t> wire_bytes_copy =
+ OwnedVector<uint8_t>::Of(wire_bytes.module_bytes());
+ // Prefer {wire_bytes_copy} to {wire_bytes.module_bytes()} for the temporary
+ // cache key. When we eventually install the module in the cache, the wire
+ // bytes of the temporary key and the new key have the same base pointer and
+ // we can skip the full bytes comparison.
std::shared_ptr<NativeModule> native_module =
isolate->wasm_engine()->MaybeGetNativeModule(
- wasm_module->origin, wire_bytes.module_bytes(), isolate);
+ wasm_module->origin, wire_bytes_copy.as_vector(), isolate);
if (native_module) {
// TODO(thibaudm): Look into sharing export wrappers.
CompileJsToWasmWrappers(isolate, wasm_module, export_wrappers_out);
@@ -1435,8 +1451,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
if (wasm_module->has_shared_memory) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory);
}
- OwnedVector<uint8_t> wire_bytes_copy =
- OwnedVector<uint8_t>::Of(wire_bytes.module_bytes());
// Create a new {NativeModule} first.
const bool uses_liftoff = module->origin == kWasmOrigin && FLAG_liftoff;
@@ -1481,20 +1495,17 @@ void RecompileNativeModule(NativeModule* native_module,
}
});
- // We only wait for tier down. Tier up can happen in the background.
- if (tiering_state == kTieredDown) {
- // The main thread contributes to the compilation.
- constexpr Counters* kNoCounters = nullptr;
- while (ExecuteCompilationUnits(
- compilation_state->background_compile_token(), kNoCounters,
- kMainThreadTaskId, kBaselineOnly)) {
- // Continue executing compilation units.
- }
-
- // Now wait until baseline recompilation finished.
- recompilation_finished_semaphore->Wait();
- DCHECK(!compilation_state->failed());
+ // The main thread contributes to the compilation.
+ constexpr Counters* kNoCounters = nullptr;
+ while (ExecuteCompilationUnits(compilation_state->background_compile_token(),
+ kNoCounters, kMainThreadTaskId,
+ kBaselineOnly)) {
+ // Continue executing compilation units.
}
+
+ // Now wait until all compilation units finished.
+ recompilation_finished_semaphore->Wait();
+ DCHECK(!compilation_state->failed());
}
AsyncCompileJob::AsyncCompileJob(
@@ -1510,7 +1521,9 @@ AsyncCompileJob::AsyncCompileJob(
bytes_copy_(std::move(bytes_copy)),
wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length),
resolver_(std::move(resolver)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "new AsyncCompileJob");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.AsyncCompileJob");
+ CHECK(FLAG_wasm_async_compilation);
CHECK(!FLAG_jitless);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Platform* platform = V8::GetCurrentPlatform();
@@ -1536,7 +1549,7 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
std::shared_ptr<Counters> counters,
AccountingAllocator* allocator);
- ~AsyncStreamingProcessor();
+ ~AsyncStreamingProcessor() override;
bool ProcessModuleHeader(Vector<const uint8_t> bytes,
uint32_t offset) override;
@@ -1586,8 +1599,9 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
DCHECK_NULL(stream_);
- stream_.reset(new StreamingDecoder(std::make_unique<AsyncStreamingProcessor>(
- this, isolate_->async_counters(), isolate_->allocator())));
+ stream_ = StreamingDecoder::CreateAsyncStreamingDecoder(
+ std::make_unique<AsyncStreamingProcessor>(
+ this, isolate_->async_counters(), isolate_->allocator()));
return stream_;
}
@@ -1656,8 +1670,8 @@ void AsyncCompileJob::PrepareRuntimeObjects() {
// This function assumes that it is executed in a HandleScope, and that a
// context is set on the isolate.
void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "AsyncCompileJob::FinishCompile");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.FinishAsyncCompile");
bool is_after_deserialization = !module_object_.is_null();
auto compilation_state = Impl(native_module_->compilation_state());
if (!is_after_deserialization) {
@@ -1689,7 +1703,8 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
script->set_source_mapping_url(*src_map_str.ToHandleChecked());
}
{
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "Debug::OnAfterCompile");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.Debug.OnAfterCompile");
isolate_->debug()->OnAfterCompile(script);
}
@@ -1736,8 +1751,8 @@ void AsyncCompileJob::AsyncCompileFailed() {
}
void AsyncCompileJob::AsyncCompileSucceeded(Handle<WasmModuleObject> result) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "CompilationResultResolver::OnCompilationSucceeded");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.OnCompilationSucceeded");
resolver_->OnCompilationSucceeded(result);
}
@@ -1771,8 +1786,11 @@ class AsyncCompileJob::CompilationStateCallback {
case CompilationEvent::kFailedCompilation:
DCHECK(!last_event_.has_value());
if (job_->DecrementAndCheckFinisherCount()) {
+ // Don't update {job_->native_module_} to avoid data races with other
+ // compilation threads. Use a copy of the shared pointer instead.
+ std::shared_ptr<NativeModule> native_module = job_->native_module_;
job_->isolate_->wasm_engine()->UpdateNativeModuleCache(
- true, &job_->native_module_, job_->isolate_);
+ true, &native_module, job_->isolate_);
job_->DoSync<CompileFailed>();
}
break;
@@ -1781,8 +1799,6 @@ class AsyncCompileJob::CompilationStateCallback {
// {kFinishedTopTierCompilation}, hence don't remember this in
// {last_event_}.
return;
- default:
- UNREACHABLE();
}
#ifdef DEBUG
last_event_ = event;
@@ -1933,8 +1949,8 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
DisallowHeapAllocation no_allocation;
// Decode the module bytes.
TRACE_COMPILE("(1) Decoding module...\n");
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "AsyncCompileJob::DecodeModule");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.DecodeModule");
auto enabled_features = job->enabled_features_;
result = DecodeWasmModule(enabled_features, job->wire_bytes_.start(),
job->wire_bytes_.end(), false, kWasmOrigin,
@@ -2404,9 +2420,17 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
} else {
job_->native_module_->SetWireBytes(
{std::move(job_->bytes_copy_), job_->wire_bytes_.length()});
+ job_->native_module_->LogWasmCodes(job_->isolate_);
}
const bool needs_finish = job_->DecrementAndCheckFinisherCount();
DCHECK_IMPLIES(!has_code_section, needs_finish);
+ // We might need to recompile the module for debugging, if the debugger was
+ // enabled while streaming compilation was running. Since handling this while
+ // compiling via streaming is tricky, we just tier down now, before publishing
+ // the module.
+ if (job_->native_module_->IsTieredDown()) {
+ job_->native_module_->RecompileForTiering();
+ }
if (needs_finish) {
const bool failed = job_->native_module_->compilation_state()->failed();
if (!cache_hit) {
@@ -2434,6 +2458,7 @@ void AsyncStreamingProcessor::OnAbort() {
bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
Vector<const uint8_t> wire_bytes) {
+ TRACE_EVENT0("v8.wasm", "wasm.Deserialize");
// DeserializeNativeModule and FinishCompile assume that they are executed in
// a HandleScope, and that a context is set on the isolate.
HandleScope scope(job_->isolate_);
@@ -2453,7 +2478,6 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
}
int GetMaxBackgroundTasks() {
- if (NeedsDeterministicCompile()) return 0;
int num_worker_threads = V8::GetCurrentPlatform()->NumberOfWorkerThreads();
return std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads);
}
@@ -2569,36 +2593,54 @@ void CompilationStateImpl::InitializeRecompilation(
// Generate necessary compilation units on the fly.
CompilationUnitBuilder builder(native_module_);
+ // Information about compilation progress is shared between this class and the
+ // NativeModule. Before updating information here, consult the NativeModule to
+ // find all functions that need recompilation.
+ // Since the current tiering state is updated on the NativeModule before
+ // triggering recompilation, it's OK if the information is slightly outdated.
+ // If we compile functions twice, the NativeModule will ignore all redundant
+ // code (or code compiled for the wrong tier).
+ std::vector<int> recompile_function_indexes =
+ native_module_->FindFunctionsToRecompile(new_tiering_state);
+
{
base::MutexGuard guard(&callbacks_mutex_);
- // Restart recompilation if another recompilation is already happening.
- outstanding_recompilation_functions_ = 0;
- // If compilation hasn't started yet then code would be kept as tiered-down
- // and don't need to recompile.
+ callbacks_.emplace_back(std::move(recompilation_finished_callback));
+ tiering_state_ = new_tiering_state;
+
+ // If compilation progress is not initialized yet, then compilation didn't
+ // start yet, and new code will be kept tiered-down from the start. For
+ // streaming compilation, there is a special path to tier down later, when
+ // the module is complete. In any case, we don't need to recompile here.
if (compilation_progress_.size() > 0) {
const WasmModule* module = native_module_->module();
+ DCHECK_EQ(module->num_declared_functions, compilation_progress_.size());
+ DCHECK_GE(module->num_declared_functions,
+ recompile_function_indexes.size());
+ outstanding_recompilation_functions_ =
+ static_cast<int>(recompile_function_indexes.size());
+ // Restart recompilation if another recompilation is already happening.
+ for (auto& progress : compilation_progress_) {
+ progress = MissingRecompilationField::update(progress, false);
+ }
+ auto new_tier = new_tiering_state == kTieredDown
+ ? ExecutionTier::kLiftoff
+ : ExecutionTier::kTurbofan;
int imported = module->num_imported_functions;
- int declared = module->num_declared_functions;
- outstanding_recompilation_functions_ = declared;
- DCHECK_EQ(declared, compilation_progress_.size());
- for (int slot_index = 0; slot_index < declared; ++slot_index) {
- compilation_progress_[slot_index] = MissingRecompilationField::update(
- compilation_progress_[slot_index], true);
- builder.AddRecompilationUnit(imported + slot_index,
- new_tiering_state == kTieredDown
- ? ExecutionTier::kLiftoff
- : ExecutionTier::kTurbofan);
+ for (int function_index : recompile_function_indexes) {
+ DCHECK_LE(imported, function_index);
+ int slot_index = function_index - imported;
+ auto& progress = compilation_progress_[slot_index];
+ progress = MissingRecompilationField::update(progress, true);
+ builder.AddRecompilationUnit(function_index, new_tier);
}
}
- // Trigger callback if module needs no recompilation. Add to the list of
- // callbacks (to be called later) otherwise.
+ // Trigger callback if module needs no recompilation.
if (outstanding_recompilation_functions_ == 0) {
- recompilation_finished_callback(CompilationEvent::kFinishedRecompilation);
- } else {
- callbacks_.emplace_back(std::move(recompilation_finished_callback));
- tiering_state_ = new_tiering_state;
+ TriggerCallbacks(base::EnumSet<CompilationEvent>(
+ {CompilationEvent::kFinishedRecompilation}));
}
}
@@ -2661,8 +2703,9 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers(
// TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
// optimization we keep the code space unlocked to avoid repeated unlocking
// because many such wrapper are allocated in sequence below.
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "FinalizeJSToWasmWrappers",
- "num_wrappers", js_to_wasm_wrapper_units_.size());
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.FinalizeJSToWasmWrappers", "num_wrappers",
+ js_to_wasm_wrapper_units_.size());
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (auto& unit : js_to_wasm_wrapper_units_) {
Handle<Code> code = unit->Finalize(isolate);
@@ -2680,8 +2723,8 @@ CompilationStateImpl::GetNextCompilationUnit(
}
void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "OnFinishedUnits",
- "num_units", code_vector.size());
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.OnFinishedUnits", "num_units", code_vector.size());
base::MutexGuard guard(&callbacks_mutex_);
@@ -2804,13 +2847,13 @@ void CompilationStateImpl::TriggerCallbacks(
for (auto event :
{std::make_pair(CompilationEvent::kFinishedBaselineCompilation,
- "BaselineFinished"),
+ "wasm.BaselineFinished"),
std::make_pair(CompilationEvent::kFinishedTopTierCompilation,
- "TopTierFinished"),
+ "wasm.TopTierFinished"),
std::make_pair(CompilationEvent::kFinishedRecompilation,
- "RecompilationFinished")}) {
+ "wasm.RecompilationFinished")}) {
if (!triggered_events.contains(event.first)) continue;
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), event.second);
+ TRACE_EVENT0("v8.wasm", event.second);
for (auto& callback : callbacks_) {
callback(event.first);
}
@@ -2885,15 +2928,12 @@ void CompilationStateImpl::RestartBackgroundTasks() {
}
}
- if (baseline_compilation_finished() && recompilation_finished()) {
- for (auto& task : new_tasks) {
- V8::GetCurrentPlatform()->CallLowPriorityTaskOnWorkerThread(
- std::move(task));
- }
- } else {
- for (auto& task : new_tasks) {
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- }
+ // Spawn all tasts with default priority (avoid
+ // {CallLowPriorityTaskOnWorkerThread}) even for tier up, because low priority
+ // tasks will be severely delayed even if background threads are idle (see
+ // https://crbug.com/1094928).
+ for (auto& task : new_tasks) {
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
}
diff --git a/chromium/v8/src/wasm/module-compiler.h b/chromium/v8/src/wasm/module-compiler.h
index a3fc4037a21..845e7a343b7 100644
--- a/chromium/v8/src/wasm/module-compiler.h
+++ b/chromium/v8/src/wasm/module-compiler.h
@@ -63,7 +63,8 @@ WasmCode* CompileImportWrapper(
// Triggered by the WasmCompileLazy builtin. The return value indicates whether
// compilation was successful. Lazy compilation can fail only if validation is
// also lazy.
-bool CompileLazy(Isolate*, NativeModule*, int func_index);
+// TODO(clemensb): Stop calling this from the interpreter, and don't export.
+V8_EXPORT_PRIVATE bool CompileLazy(Isolate*, NativeModule*, int func_index);
int GetMaxBackgroundTasks();
diff --git a/chromium/v8/src/wasm/module-decoder.cc b/chromium/v8/src/wasm/module-decoder.cc
index e7ecd1396ba..defb3dea306 100644
--- a/chromium/v8/src/wasm/module-decoder.cc
+++ b/chromium/v8/src/wasm/module-decoder.cc
@@ -33,7 +33,7 @@ constexpr char kNameString[] = "name";
constexpr char kSourceMappingURLString[] = "sourceMappingURL";
constexpr char kCompilationHintsString[] = "compilationHints";
constexpr char kDebugInfoString[] = ".debug_info";
-constexpr char kExternalDebugInfoString[] = ".external_debug_info";
+constexpr char kExternalDebugInfoString[] = "external_debug_info";
const char* ExternalKindName(ImportExportKindCode kind) {
switch (kind) {
@@ -122,11 +122,13 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
return kWasmF32;
case WasmInitExpr::kF64Const:
return kWasmF64;
- case WasmInitExpr::kRefNullConst:
- return kWasmNullRef;
case WasmInitExpr::kRefFuncConst:
- return kWasmFuncRef;
- default:
+ return ValueType::Ref(kHeapFunc, kNonNullable);
+ case WasmInitExpr::kRefNullConst:
+ // It is not possible to retrieve the full {ValueType} of a {WasmInitExpr}
+ // of kind {kRefNullConst}. As WasmInitExpr of kind {krefNullConst} is
+ // only valid in globals, the {ValueType} has to be retrieved from the
+ // global definition itself.
UNREACHABLE();
}
}
@@ -406,7 +408,6 @@ class ModuleDecoderImpl : public Decoder {
void DecodeSection(SectionCode section_code, Vector<const uint8_t> bytes,
uint32_t offset, bool verify_functions = true) {
- VerifyFunctionDeclarations(section_code);
if (failed()) return;
Reset(bytes, offset);
TRACE("Section: %s\n", SectionName(section_code));
@@ -447,7 +448,7 @@ class ModuleDecoderImpl : public Decoder {
// if produced by compiler. Its presence likely means that Wasm was
// built in a debug mode.
case kExternalDebugInfoSectionCode:
- // .external_debug_info is a custom section containing a reference to an
+ // external_debug_info is a custom section containing a reference to an
// external symbol file.
case kCompilationHintsSectionCode:
// TODO(frgossen): report out of place compilation hints section as a
@@ -559,7 +560,8 @@ class ModuleDecoderImpl : public Decoder {
uint8_t kind = consume_u8("type kind");
switch (kind) {
case kWasmFunctionTypeCode: {
- const FunctionSig* s = consume_sig(module_->signature_zone.get());
+ const FunctionSig* s = consume_sig(module_->signature_zone.get(),
+ DeferIndexCheckMode::kDeferCheck);
module_->add_signature(s);
break;
}
@@ -589,6 +591,27 @@ class ModuleDecoderImpl : public Decoder {
}
}
module_->signature_map.Freeze();
+ VerifyDeferredTypeOffsets();
+ }
+
+ // TODO(7748): When typed function references are allowed, this should be
+ // deleted altogether and replaced by an inline in-bounds check.
+ void VerifyDeferredTypeOffsets() {
+ for (auto& type_offset : deferred_check_type_index_) {
+ uint32_t type_index = type_offset.first;
+ uint32_t code_offset = type_offset.second;
+ if (type_index >= module_->type_kinds.size()) {
+ errorf(code_offset, "reference to undeclared struct/array #%u",
+ type_index);
+ break;
+ }
+ uint8_t type = module_->type_kinds[type_index];
+ if (type == kWasmFunctionTypeCode) {
+ errorf(code_offset, "cannot build reference to function type index #%u",
+ type_index);
+ break;
+ }
+ }
}
void DecodeImportSection() {
@@ -637,12 +660,6 @@ class ModuleDecoderImpl : public Decoder {
WasmTable* table = &module_->tables.back();
table->imported = true;
ValueType type = consume_reference_type();
- if (!enabled_features_.has_anyref()) {
- if (type != kWasmFuncRef) {
- error(pc_ - 1, "invalid table type");
- break;
- }
- }
table->type = type;
uint8_t flags = validate_table_flags("element count");
consume_resizable_limits(
@@ -723,9 +740,9 @@ class ModuleDecoderImpl : public Decoder {
void DecodeTableSection() {
// TODO(ahaas): Set the correct limit to {kV8MaxWasmTables} once the
- // implementation of AnyRef landed.
+ // implementation of ExternRef landed.
uint32_t max_count =
- enabled_features_.has_anyref() ? 100000 : kV8MaxWasmTables;
+ enabled_features_.has_reftypes() ? 100000 : kV8MaxWasmTables;
uint32_t table_count = consume_count("table count", max_count);
for (uint32_t i = 0; ok() && i < table_count; i++) {
@@ -793,8 +810,14 @@ class ModuleDecoderImpl : public Decoder {
WasmFunction* func = nullptr;
exp->index =
consume_func_index(module_.get(), &func, "export function index");
+
+ if (failed()) break;
+ DCHECK_NOT_NULL(func);
+
module_->num_exported_functions++;
- if (func) func->exported = true;
+ func->exported = true;
+ // Exported functions are considered "declared".
+ func->declared = true;
break;
}
case kExternalTable: {
@@ -899,10 +922,11 @@ class ModuleDecoderImpl : public Decoder {
errorf(pos, "out of bounds table index %u", table_index);
break;
}
- if (!type.IsSubTypeOf(module_->tables[table_index].type)) {
+ if (!IsSubtypeOf(type, module_->tables[table_index].type,
+ this->module_.get())) {
errorf(pos,
"Invalid element segment. Table %u is not a super-type of %s",
- table_index, type.type_name());
+ table_index, type.type_name().c_str());
break;
}
}
@@ -1203,41 +1227,7 @@ class ModuleDecoderImpl : public Decoder {
return true;
}
- void VerifyFunctionDeclarations(SectionCode section_code) {
- // Since we will only know if a function was properly declared after all the
- // element sections have been parsed, but we need to verify the proper use
- // within global initialization, we are deferring those checks.
- if (deferred_funcref_error_offsets_.empty()) {
- // No verifications to do be done.
- return;
- }
- if (!ok()) {
- // Previous errors exist.
- return;
- }
- // TODO(ecmziegler): Adjust logic if module order changes (e.g. event
- // section).
- if (section_code <= kElementSectionCode &&
- section_code != kUnknownSectionCode) {
- // Before the element section and not at end of decoding.
- return;
- }
- for (auto& func_offset : deferred_funcref_error_offsets_) {
- DCHECK_LT(func_offset.first, module_->functions.size());
- if (!module_->functions[func_offset.first].declared) {
- errorf(func_offset.second, "undeclared reference to function #%u",
- func_offset.first);
- break;
- }
- }
- deferred_funcref_error_offsets_.clear();
- }
-
ModuleResult FinishDecoding(bool verify_functions = true) {
- // Ensure that function verifications were done even if no section followed
- // the global section.
- VerifyFunctionDeclarations(kUnknownSectionCode);
-
if (ok() && CheckMismatchedCounts()) {
CalculateGlobalOffsets(module_.get());
}
@@ -1298,7 +1288,7 @@ class ModuleDecoderImpl : public Decoder {
pc_ = start_;
expect_u8("type form", kWasmFunctionTypeCode);
if (!ok()) return FunctionResult{std::move(intermediate_error_)};
- function->sig = consume_sig(zone);
+ function->sig = consume_sig(zone, DeferIndexCheckMode::kNoCheck);
function->code = {off(pc_), static_cast<uint32_t>(end_ - pc_)};
if (ok())
@@ -1316,7 +1306,8 @@ class ModuleDecoderImpl : public Decoder {
const FunctionSig* DecodeFunctionSignature(Zone* zone, const byte* start) {
pc_ = start;
if (!expect_u8("type form", kWasmFunctionTypeCode)) return nullptr;
- const FunctionSig* result = consume_sig(zone);
+ const FunctionSig* result =
+ consume_sig(zone, DeferIndexCheckMode::kNoCheck);
return ok() ? result : nullptr;
}
@@ -1357,10 +1348,10 @@ class ModuleDecoderImpl : public Decoder {
kLastKnownModuleSection,
"not enough bits");
WasmError intermediate_error_;
- // Map from function index to wire byte offset of first funcref initialization
- // in global section. Used for deferred checking and proper error reporting if
- // these were not properly declared in the element section.
- std::unordered_map<uint32_t, int> deferred_funcref_error_offsets_;
+ // Set of type offsets discovered in field types during type section decoding.
+ // Since struct types may be recursive, this is used for checking and error
+ // reporting once the whole type section is parsed.
+ std::unordered_map<uint32_t, int> deferred_check_type_index_;
ModuleOrigin origin_;
bool has_seen_unordered_section(SectionCode section_code) {
@@ -1376,7 +1367,7 @@ class ModuleDecoderImpl : public Decoder {
}
bool AddTable(WasmModule* module) {
- if (enabled_features_.has_anyref()) return true;
+ if (enabled_features_.has_reftypes()) return true;
if (module->tables.size() > 0) {
error("At most one table is supported");
return false;
@@ -1401,7 +1392,7 @@ class ModuleDecoderImpl : public Decoder {
global->type = consume_value_type();
global->mutability = consume_mutability();
const byte* pos = pc();
- global->init = consume_init_expr(module, kWasmStmt);
+ global->init = consume_init_expr(module, global->type);
if (global->init.kind == WasmInitExpr::kGlobalIndex) {
uint32_t other_index = global->init.val.global_index;
if (other_index >= index) {
@@ -1413,14 +1404,8 @@ class ModuleDecoderImpl : public Decoder {
errorf(pos,
"type mismatch in global initialization "
"(from global #%u), expected %s, got %s",
- other_index, global->type.type_name(),
- module->globals[other_index].type.type_name());
- }
- } else {
- if (!TypeOf(module, global->init).IsSubTypeOf(global->type)) {
- errorf(pos, "type error in global initialization, expected %s, got %s",
- global->type.type_name(),
- TypeOf(module, global->init).type_name());
+ other_index, global->type.type_name().c_str(),
+ module->globals[other_index].type.type_name().c_str());
}
}
}
@@ -1433,7 +1418,7 @@ class ModuleDecoderImpl : public Decoder {
for (WasmGlobal& global : module->globals) {
if (global.mutability && global.imported) {
global.index = num_imported_mutable_globals++;
- } else if (global.type.IsReferenceType()) {
+ } else if (global.type.is_reference_type()) {
global.offset = tagged_offset;
// All entries in the tagged_globals_buffer have size 1.
tagged_offset++;
@@ -1675,24 +1660,36 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kExprRefNull: {
- if (enabled_features_.has_anyref() || enabled_features_.has_eh()) {
+ if (enabled_features_.has_reftypes() || enabled_features_.has_eh()) {
+ RefNullImmediate<Decoder::kValidate> imm(WasmFeatures::All(), this,
+ pc() - 1);
+ if (!imm.type.is_reference_type()) {
+ errorf(pc() - 1, "ref.null is not supported for %s",
+ imm.type.type_name().c_str());
+ break;
+ }
expr.kind = WasmInitExpr::kRefNullConst;
- len = 0;
+ len = imm.length;
+ if (expected != kWasmStmt &&
+ !IsSubtypeOf(imm.type, expected, module_.get())) {
+ errorf(pos, "type error in init expression, expected %s, got %s",
+ expected.type_name().c_str(), imm.type.type_name().c_str());
+ }
break;
}
V8_FALLTHROUGH;
}
case kExprRefFunc: {
- if (enabled_features_.has_anyref()) {
+ if (enabled_features_.has_reftypes()) {
FunctionIndexImmediate<Decoder::kValidate> imm(this, pc() - 1);
if (module->functions.size() <= imm.index) {
errorf(pc() - 1, "invalid function index: %u", imm.index);
break;
}
- // Defer check for declaration of function reference.
- deferred_funcref_error_offsets_.emplace(imm.index, pc_offset());
expr.kind = WasmInitExpr::kRefFuncConst;
expr.val.function_index = imm.index;
+ // Functions referenced in the globals section count as "declared".
+ module->functions[imm.index].declared = true;
len = imm.length;
break;
}
@@ -1708,9 +1705,13 @@ class ModuleDecoderImpl : public Decoder {
if (!expect_u8("end opcode", kExprEnd)) {
expr.kind = WasmInitExpr::kNone;
}
- if (expected != kWasmStmt && TypeOf(module, expr) != kWasmI32) {
+
+ // The type check of ref.null is special, and already done above.
+ if (expected != kWasmStmt && opcode != kExprRefNull &&
+ !IsSubtypeOf(TypeOf(module, expr), expected, module_.get())) {
errorf(pos, "type error in init expression, expected %s, got %s",
- expected.type_name(), TypeOf(module, expr).type_name());
+ expected.type_name().c_str(),
+ TypeOf(module, expr).type_name().c_str());
}
return expr;
}
@@ -1723,49 +1724,60 @@ class ModuleDecoderImpl : public Decoder {
}
ValueType consume_value_type() {
- ValueType result;
- uint32_t type_length = value_type_reader::read_value_type<kValidate>(
- this, this->pc(), &result,
+ uint32_t type_length;
+ ValueType result = value_type_reader::read_value_type<kValidate>(
+ this, this->pc(), &type_length,
origin_ == kWasmOrigin ? enabled_features_ : WasmFeatures::None());
- if (type_length == 0) error(pc_, "invalid value type");
- consume_bytes(type_length);
+ if (result == kWasmBottom) error(pc_, "invalid value type");
+ consume_bytes(type_length, "value type");
return result;
}
- // Reads a single 8-bit integer, interpreting it as a reference type.
- ValueType consume_reference_type() {
- byte val = consume_u8("reference type");
- ValueTypeCode t = static_cast<ValueTypeCode>(val);
- switch (t) {
- case kLocalFuncRef:
- return kWasmFuncRef;
- case kLocalAnyRef:
- if (!enabled_features_.has_anyref()) {
- error(pc_ - 1,
- "Invalid type. Set --experimental-wasm-anyref to use 'AnyRef'");
- }
- return kWasmAnyRef;
- case kLocalNullRef:
- if (!enabled_features_.has_anyref()) {
- error(
- pc_ - 1,
- "Invalid type. Set --experimental-wasm-anyref to use 'NullRef'");
- }
- return kWasmNullRef;
- case kLocalExnRef:
- if (!enabled_features_.has_eh()) {
- error(pc_ - 1,
- "Invalid type. Set --experimental-wasm-eh to use 'ExnRef'");
- }
- return kWasmExnRef;
+ ValueType consume_storage_type() {
+ uint8_t opcode = read_u8<kValidate>(this->pc());
+ switch (opcode) {
+ case kLocalI8:
+ consume_bytes(1, "i8");
+ return kWasmI8;
+ case kLocalI16:
+ consume_bytes(1, "i16");
+ return kWasmI16;
default:
- break;
+ // It is not a packed type, so it has to be a value type.
+ return consume_value_type();
+ }
+ }
+
+ // Reads a reference type for tables and element segment headers.
+ // Note that, unless extensions are enabled, only funcref is allowed.
+ ValueType consume_reference_type() {
+ if (!enabled_features_.has_reftypes()) {
+ uint8_t ref_type = consume_u8("reference type");
+ if (ref_type != kLocalFuncRef) {
+ error(pc_ - 1,
+ "invalid table type. Consider using experimental flags.");
+ return kWasmBottom;
+ }
+ return kWasmFuncRef;
+ } else {
+ const byte* position = pc();
+ ValueType result = consume_value_type();
+ if (!result.is_reference_type()) {
+ error(position, "expected reference type");
+ }
+ return result;
+ }
+ }
+
+ enum DeferIndexCheckMode { kNoCheck, kDeferCheck };
+
+ void defer_index_check(ValueType type) {
+ if (type.has_index()) {
+ deferred_check_type_index_.emplace(type.ref_index(), pc_offset());
}
- error(pc_ - 1, "invalid reference type");
- return kWasmStmt;
}
- const FunctionSig* consume_sig(Zone* zone) {
+ const FunctionSig* consume_sig(Zone* zone, DeferIndexCheckMode defer_check) {
// Parse parameter types.
uint32_t param_count =
consume_count("param count", kV8MaxWasmFunctionParams);
@@ -1773,6 +1785,9 @@ class ModuleDecoderImpl : public Decoder {
std::vector<ValueType> params;
for (uint32_t i = 0; ok() && i < param_count; ++i) {
ValueType param = consume_value_type();
+ if (defer_check == DeferIndexCheckMode::kDeferCheck) {
+ defer_index_check(param);
+ }
params.push_back(param);
}
std::vector<ValueType> returns;
@@ -1784,6 +1799,9 @@ class ModuleDecoderImpl : public Decoder {
if (failed()) return nullptr;
for (uint32_t i = 0; ok() && i < return_count; ++i) {
ValueType ret = consume_value_type();
+ if (defer_check == DeferIndexCheckMode::kDeferCheck) {
+ defer_index_check(ret);
+ }
returns.push_back(ret);
}
@@ -1802,22 +1820,29 @@ class ModuleDecoderImpl : public Decoder {
// TODO(7748): Introduce a proper maximum.
uint32_t field_count = consume_count("field count", 999);
if (failed()) return nullptr;
- std::vector<ValueType> fields;
+ ValueType* fields = zone->NewArray<ValueType>(field_count);
+ bool* mutabilities = zone->NewArray<bool>(field_count);
for (uint32_t i = 0; ok() && i < field_count; ++i) {
- ValueType field = consume_value_type();
- fields.push_back(field);
+ ValueType field = consume_storage_type();
+ defer_index_check(field);
+ fields[i] = field;
+ bool mutability = consume_mutability();
+ mutabilities[i] = mutability;
}
if (failed()) return nullptr;
- ValueType* buffer = zone->NewArray<ValueType>(field_count);
- for (uint32_t i = 0; i < field_count; i++) buffer[i] = fields[i];
uint32_t* offsets = zone->NewArray<uint32_t>(field_count);
- return new (zone) StructType(field_count, offsets, buffer);
+ return new (zone) StructType(field_count, offsets, fields, mutabilities);
}
const ArrayType* consume_array(Zone* zone) {
- ValueType field = consume_value_type();
+ ValueType field = consume_storage_type();
if (failed()) return nullptr;
- return new (zone) ArrayType(field);
+ defer_index_check(field);
+ bool mutability = consume_mutability();
+ if (!mutability) {
+ error(this->pc() - 1, "immutable arrays are not supported yet");
+ }
+ return new (zone) ArrayType(field, mutability);
}
// Consume the attribute field of an exception.
@@ -1837,15 +1862,16 @@ class ModuleDecoderImpl : public Decoder {
WasmInitExpr* offset) {
const byte* pos = pc();
uint8_t flag;
- if (enabled_features_.has_bulk_memory() || enabled_features_.has_anyref()) {
+ if (enabled_features_.has_bulk_memory() ||
+ enabled_features_.has_reftypes()) {
flag = consume_u8("flag");
} else {
uint32_t table_index = consume_u32v("table index");
- // The only valid flag value without bulk_memory or anyref is '0'.
+ // The only valid flag value without bulk_memory or externref is '0'.
if (table_index != 0) {
error(
"Element segments with table indices require "
- "--experimental-wasm-bulk-memory or --experimental-wasm-anyref");
+ "--experimental-wasm-bulk-memory or --experimental-wasm-reftypes");
return;
}
flag = 0;
@@ -1880,8 +1906,9 @@ class ModuleDecoderImpl : public Decoder {
*status == WasmElemSegment::kStatusActive;
if (*status == WasmElemSegment::kStatusDeclarative &&
- !enabled_features_.has_anyref()) {
- error("Declarative element segments require --experimental-wasm-anyref");
+ !enabled_features_.has_reftypes()) {
+ error(
+ "Declarative element segments require --experimental-wasm-reftypes");
return;
}
if (*status == WasmElemSegment::kStatusPassive &&
@@ -1896,10 +1923,10 @@ class ModuleDecoderImpl : public Decoder {
return;
}
if (flag != 0 && !enabled_features_.has_bulk_memory() &&
- !enabled_features_.has_anyref()) {
+ !enabled_features_.has_reftypes()) {
error(
"Invalid segment flag. Did you forget "
- "--experimental-wasm-bulk-memory or --experimental-wasm-anyref?");
+ "--experimental-wasm-bulk-memory or --experimental-wasm-reftypes?");
return;
}
if ((flag & kFullMask) != flag) {
@@ -1953,10 +1980,10 @@ class ModuleDecoderImpl : public Decoder {
}
} else if (flag == SegmentFlags::kActiveWithIndex) {
if (!(enabled_features_.has_bulk_memory() ||
- enabled_features_.has_anyref())) {
+ enabled_features_.has_reftypes())) {
error(
"Element segments with table indices require "
- "--experimental-wasm-bulk-memory or --experimental-wasm-anyref");
+ "--experimental-wasm-bulk-memory or --experimental-wasm-reftypes");
return;
}
} else if (flag != SegmentFlags::kActiveNoIndex) {
@@ -1999,9 +2026,13 @@ class ModuleDecoderImpl : public Decoder {
uint8_t opcode = consume_u8("element opcode");
if (failed()) return index;
switch (opcode) {
- case kExprRefNull:
+ case kExprRefNull: {
+ RefNullImmediate<kValidate> imm(WasmFeatures::All(), this,
+ this->pc() - 1);
+ consume_bytes(imm.length, "ref.null immediate");
index = WasmElemSegment::kNullIndex;
break;
+ }
case kExprRefFunc:
index = consume_element_func_index();
if (failed()) return index;
@@ -2134,7 +2165,7 @@ AsmJsOffsetsResult DecodeAsmJsOffsets(Vector<const uint8_t> encoded_offsets) {
Decoder decoder(encoded_offsets);
uint32_t functions_count = decoder.consume_u32v("functions count");
- // Sanity check.
+ // Consistency check.
DCHECK_GE(encoded_offsets.size(), functions_count);
functions.reserve(functions_count);
@@ -2297,7 +2328,8 @@ void GenerateNamesFromImportsAndExports(
names) {
DCHECK_NOT_NULL(names);
DCHECK(names->empty());
- DCHECK(kind == kExternalGlobal || kind == kExternalMemory);
+ DCHECK(kind == kExternalGlobal || kind == kExternalMemory ||
+ kind == kExternalTable);
// Extract from import table.
for (const WasmImport& imp : import_table) {
diff --git a/chromium/v8/src/wasm/module-instantiate.cc b/chromium/v8/src/wasm/module-instantiate.cc
index 9dfc1e16081..b48c9635880 100644
--- a/chromium/v8/src/wasm/module-instantiate.cc
+++ b/chromium/v8/src/wasm/module-instantiate.cc
@@ -16,6 +16,7 @@
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-subtyping.h"
#define TRACE(...) \
do { \
@@ -196,7 +197,7 @@ class InstanceBuilder {
void WriteGlobalValue(const WasmGlobal& global,
Handle<WasmGlobalObject> value);
- void WriteGlobalAnyRef(const WasmGlobal& global, Handle<Object> value);
+ void WriteGlobalExternRef(const WasmGlobal& global, Handle<Object> value);
void SanitizeImports();
@@ -304,7 +305,8 @@ InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
// Build an instance, in all of its glory.
MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "InstanceBuilder::Build");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.InstanceBuilder.Build");
// Check that an imports argument was provided, if the module requires it.
// No point in continuing otherwise.
if (!module_->import_table.empty() && ffi_.is_null()) {
@@ -472,7 +474,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// iteration below.
for (int i = 1; i < table_count; ++i) {
const WasmTable& table = module_->tables[i];
- if (table.type == kWasmFuncRef) {
+ if (table.type.heap_type() == kHeapFunc) {
Handle<WasmIndirectFunctionTable> table_obj =
WasmIndirectFunctionTable::New(isolate_, table.initial_size);
tables->set(i, *table_obj);
@@ -524,8 +526,10 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
auto table_object = handle(WasmTableObject::cast(instance->tables().get(
elem_segment.table_index)),
isolate_);
- size_t table_size = table_object->current_length();
- if (!base::IsInBounds(base, elem_segment.entries.size(), table_size)) {
+ uint32_t table_size = table_object->current_length();
+ if (!base::IsInBounds<uint32_t>(
+ base, static_cast<uint32_t>(elem_segment.entries.size()),
+ table_size)) {
thrower_->LinkError("table initializer is out of bounds");
return {};
}
@@ -537,8 +541,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
for (const WasmDataSegment& seg : module_->data_segments) {
if (!seg.active) continue;
uint32_t base = EvalUint32InitExpr(instance, seg.dest_addr);
- if (!base::IsInBounds(base, seg.source.length(),
- instance->memory_size())) {
+ if (!base::IsInBounds<uint64_t>(base, seg.source.length(),
+ instance->memory_size())) {
thrower_->LinkError("data segment is out of bounds");
return {};
}
@@ -616,8 +620,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
bool InstanceBuilder::ExecuteStartFunction() {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "InstanceBuilder::ExecuteStartFunction");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.ExecuteStartFunction");
if (start_function_.is_null()) return true; // No start function.
HandleScope scope(isolate_);
@@ -730,7 +734,8 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
if (size == 0) continue;
uint32_t dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
- DCHECK(base::IsInBounds(dest_offset, size, instance->memory_size()));
+ DCHECK(base::IsInBounds<uint64_t>(dest_offset, size,
+ instance->memory_size()));
byte* dest = instance->memory_start() + dest_offset;
const byte* src = wire_bytes.begin() + segment.source.offset();
memcpy(dest, src, size);
@@ -741,7 +746,7 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
TRACE("init [globals_start=%p + %u] = %lf, type = %s\n",
raw_buffer_ptr(untagged_globals_, 0), global.offset, num,
- global.type.type_name());
+ global.type.type_name().c_str());
switch (global.type.kind()) {
case ValueType::kI32:
WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
@@ -767,7 +772,7 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, int64_t num) {
TRACE("init [globals_start=%p + %u] = %" PRId64 ", type = %s\n",
raw_buffer_ptr(untagged_globals_, 0), global.offset, num,
- global.type.type_name());
+ global.type.type_name().c_str());
DCHECK_EQ(kWasmI64, global.type);
WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
}
@@ -801,27 +806,25 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
TRACE("%lf", num);
break;
}
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef:
+ case ValueType::kRtt:
case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kEqRef: {
- DCHECK_IMPLIES(global.type == kWasmNullRef, value->GetRef()->IsNull());
+ case ValueType::kOptRef: {
tagged_globals_->set(global.offset, *value->GetRef());
break;
}
case ValueType::kStmt:
case ValueType::kS128:
case ValueType::kBottom:
+ case ValueType::kI8:
+ case ValueType::kI16:
UNREACHABLE();
}
- TRACE(", type = %s (from WebAssembly.Global)\n", global.type.type_name());
+ TRACE(", type = %s (from WebAssembly.Global)\n",
+ global.type.type_name().c_str());
}
-void InstanceBuilder::WriteGlobalAnyRef(const WasmGlobal& global,
- Handle<Object> value) {
+void InstanceBuilder::WriteGlobalExternRef(const WasmGlobal& global,
+ Handle<Object> value) {
tagged_globals_->set(global.offset, *value, UPDATE_WRITE_BARRIER);
}
@@ -1046,7 +1049,7 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
return false;
}
- if (table.type == kWasmFuncRef &&
+ if (table.type.heap_type() == kHeapFunc &&
!InitializeImportedIndirectFunctionTable(instance, table_index,
import_index, table_object)) {
return false;
@@ -1113,13 +1116,14 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
Handle<WasmInstanceObject> instance, int import_index,
Handle<String> module_name, Handle<String> import_name,
const WasmGlobal& global, Handle<WasmGlobalObject> global_object) {
- if (global_object->is_mutable() != global.mutability) {
+ if (static_cast<bool>(global_object->is_mutable()) != global.mutability) {
ReportLinkError("imported global does not match the expected mutability",
import_index, module_name, import_name);
return false;
}
- bool is_sub_type = global_object->type().IsSubTypeOf(global.type);
+ bool is_sub_type =
+ IsSubtypeOf(global_object->type(), global.type, instance->module());
bool is_same_type = global_object->type() == global.type;
bool valid_type = global.mutability ? is_same_type : is_sub_type;
@@ -1132,12 +1136,13 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
DCHECK_LT(global.index, module_->num_imported_mutable_globals);
Handle<Object> buffer;
Address address_or_offset;
- if (global.type.IsReferenceType()) {
+ if (global.type.is_reference_type()) {
static_assert(sizeof(global_object->offset()) <= sizeof(Address),
"The offset into the globals buffer does not fit into "
"the imported_mutable_globals array");
buffer = handle(global_object->tagged_buffer(), isolate_);
- // For anyref globals we use a relative offset, not an absolute address.
+ // For externref globals we use a relative offset, not an absolute
+ // address.
address_or_offset = static_cast<Address>(global_object->offset());
} else {
buffer = handle(global_object->untagged_buffer(), isolate_);
@@ -1210,8 +1215,8 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
return false;
}
- if (global.type.IsReferenceType()) {
- if (global.type == kWasmFuncRef) {
+ if (global.type.is_reference_type()) {
+ if (global.type.heap_type() == kHeapFunc) {
if (!value->IsNull(isolate_) &&
!WasmExportedFunction::IsWasmExportedFunction(*value)) {
ReportLinkError(
@@ -1219,14 +1224,8 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
import_index, module_name, import_name);
return false;
}
- } else if (global.type == kWasmNullRef) {
- if (!value->IsNull(isolate_)) {
- ReportLinkError("imported nullref global must be null", import_index,
- module_name, import_name);
- return false;
- }
}
- WriteGlobalAnyRef(global, value);
+ WriteGlobalExternRef(global, value);
return true;
}
@@ -1412,7 +1411,7 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
global.init.val.f64_const);
break;
case WasmInitExpr::kRefNullConst:
- DCHECK(enabled_.has_anyref() || enabled_.has_eh());
+ DCHECK(enabled_.has_reftypes() || enabled_.has_eh());
if (global.imported) break; // We already initialized imported globals.
tagged_globals_->set(global.offset,
@@ -1420,7 +1419,7 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
SKIP_WRITE_BARRIER);
break;
case WasmInitExpr::kRefFuncConst: {
- DCHECK(enabled_.has_anyref());
+ DCHECK(enabled_.has_reftypes());
auto function = WasmInstanceObject::GetOrCreateWasmExternalFunction(
isolate_, instance, global.init.val.function_index);
tagged_globals_->set(global.offset, *function);
@@ -1432,8 +1431,8 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
uint32_t old_offset =
module_->globals[global.init.val.global_index].offset;
TRACE("init [globals+%u] = [globals+%d]\n", global.offset, old_offset);
- if (global.type.IsReferenceType()) {
- DCHECK(enabled_.has_anyref() || enabled_.has_eh());
+ if (global.type.is_reference_type()) {
+ DCHECK(enabled_.has_reftypes() || enabled_.has_eh());
tagged_globals_->set(new_offset, tagged_globals_->get(old_offset));
} else {
size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
@@ -1483,7 +1482,7 @@ bool InstanceBuilder::AllocateMemory() {
bool InstanceBuilder::NeedsWrappers() const {
if (module_->num_exported_functions > 0) return true;
for (auto& table : module_->tables) {
- if (table.type == kWasmFuncRef) return true;
+ if (table.type.heap_type() == kHeapFunc) return true;
}
return false;
}
@@ -1571,10 +1570,10 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
if (global.mutability && global.imported) {
Handle<FixedArray> buffers_array(
instance->imported_mutable_globals_buffers(), isolate_);
- if (global.type.IsReferenceType()) {
+ if (global.type.is_reference_type()) {
tagged_buffer = handle(
FixedArray::cast(buffers_array->get(global.index)), isolate_);
- // For anyref globals we store the relative offset in the
+ // For externref globals we store the relative offset in the
// imported_mutable_globals array instead of an absolute address.
Address addr = instance->imported_mutable_globals()[global.index];
DCHECK_LE(addr, static_cast<Address>(
@@ -1595,7 +1594,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
offset = static_cast<uint32_t>(global_addr - backing_store);
}
} else {
- if (global.type.IsReferenceType()) {
+ if (global.type.is_reference_type()) {
tagged_buffer = handle(instance->tagged_globals_buffer(), isolate_);
} else {
untagged_buffer =
@@ -1656,7 +1655,7 @@ void InstanceBuilder::InitializeIndirectFunctionTables(
for (int i = 0; i < static_cast<int>(module_->tables.size()); ++i) {
const WasmTable& table = module_->tables[i];
- if (table.type == kWasmFuncRef) {
+ if (table.type.heap_type() == kHeapFunc) {
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
instance, i, table.initial_size);
}
@@ -1672,11 +1671,12 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
// TODO(wasm): Move this functionality into wasm-objects, since it is used
// for both instantiation and in the implementation of the table.init
// instruction.
- if (!base::IsInBounds(dst, count, table_object->current_length()) ||
- !base::IsInBounds(src, count,
- instance->dropped_elem_segments()[segment_index] == 0
- ? elem_segment.entries.size()
- : 0)) {
+ if (!base::IsInBounds<uint64_t>(dst, count, table_object->current_length()) ||
+ !base::IsInBounds<uint64_t>(
+ src, count,
+ instance->dropped_elem_segments()[segment_index] == 0
+ ? elem_segment.entries.size()
+ : 0)) {
return false;
}
@@ -1686,7 +1686,7 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
int entry_index = static_cast<int>(dst + i);
if (func_index == WasmElemSegment::kNullIndex) {
- if (table_object->type() == kWasmFuncRef) {
+ if (table_object->type().heap_type() == kHeapFunc) {
IndirectFunctionTableEntry(instance, table_index, entry_index).clear();
}
WasmTableObject::Set(isolate, table_object, entry_index,
@@ -1697,15 +1697,15 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
const WasmFunction* function = &module->functions[func_index];
// Update the local dispatch table first if necessary.
- if (table_object->type() == kWasmFuncRef) {
+ if (table_object->type().heap_type() == kHeapFunc) {
uint32_t sig_id = module->signature_ids[function->sig_index];
IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, instance, func_index);
}
- // For AnyRef tables, we have to generate the WasmExternalFunction eagerly.
- // Later we cannot know if an entry is a placeholder or not.
- if (table_object->type() == kWasmAnyRef) {
+ // For ExternRef tables, we have to generate the WasmExternalFunction
+ // eagerly. Later we cannot know if an entry is a placeholder or not.
+ if (table_object->type().heap_type() == kHeapExtern) {
Handle<WasmExternalFunction> wasm_external_function =
WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
func_index);
@@ -1772,7 +1772,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
int table_count = static_cast<int>(module_->tables.size());
for (int index = 0; index < table_count; ++index) {
- if (module_->tables[index].type == kWasmFuncRef) {
+ if (module_->tables[index].type.heap_type() == kHeapFunc) {
auto table_object = handle(
WasmTableObject::cast(instance->tables().get(index)), isolate_);
diff --git a/chromium/v8/src/wasm/streaming-decoder.cc b/chromium/v8/src/wasm/streaming-decoder.cc
index c88a2c77b89..eb297807007 100644
--- a/chromium/v8/src/wasm/streaming-decoder.cc
+++ b/chromium/v8/src/wasm/streaming-decoder.cc
@@ -25,7 +25,203 @@ namespace v8 {
namespace internal {
namespace wasm {
-void StreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
+class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
+ public:
+ explicit AsyncStreamingDecoder(std::unique_ptr<StreamingProcessor> processor);
+
+ // The buffer passed into OnBytesReceived is owned by the caller.
+ void OnBytesReceived(Vector<const uint8_t> bytes) override;
+
+ void Finish() override;
+
+ void Abort() override;
+
+ // Notify the StreamingDecoder that compilation ended and the
+ // StreamingProcessor should not be called anymore.
+ void NotifyCompilationEnded() override { Fail(); }
+
+ void NotifyNativeModuleCreated(
+ const std::shared_ptr<NativeModule>& native_module) override;
+
+ private:
+ // The SectionBuffer is the data object for the content of a single section.
+ // It stores all bytes of the section (including section id and section
+ // length), and the offset where the actual payload starts.
+ class SectionBuffer : public WireBytesStorage {
+ public:
+ // id: The section id.
+ // payload_length: The length of the payload.
+ // length_bytes: The section length, as it is encoded in the module bytes.
+ SectionBuffer(uint32_t module_offset, uint8_t id, size_t payload_length,
+ Vector<const uint8_t> length_bytes)
+ : // ID + length + payload
+ module_offset_(module_offset),
+ bytes_(OwnedVector<uint8_t>::NewForOverwrite(
+ 1 + length_bytes.length() + payload_length)),
+ payload_offset_(1 + length_bytes.length()) {
+ bytes_.start()[0] = id;
+ memcpy(bytes_.start() + 1, &length_bytes.first(), length_bytes.length());
+ }
+
+ SectionCode section_code() const {
+ return static_cast<SectionCode>(bytes_.start()[0]);
+ }
+
+ Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
+ DCHECK_LE(module_offset_, ref.offset());
+ uint32_t offset_in_code_buffer = ref.offset() - module_offset_;
+ return bytes().SubVector(offset_in_code_buffer,
+ offset_in_code_buffer + ref.length());
+ }
+
+ uint32_t module_offset() const { return module_offset_; }
+ Vector<uint8_t> bytes() const { return bytes_.as_vector(); }
+ Vector<uint8_t> payload() const { return bytes() + payload_offset_; }
+ size_t length() const { return bytes_.size(); }
+ size_t payload_offset() const { return payload_offset_; }
+
+ private:
+ const uint32_t module_offset_;
+ const OwnedVector<uint8_t> bytes_;
+ const size_t payload_offset_;
+ };
+
+ // The decoding of a stream of wasm module bytes is organized in states. Each
+ // state provides a buffer to store the bytes required for the current state,
+ // information on how many bytes have already been received, how many bytes
+ // are needed, and a {Next} function which starts the next state once all
+ // bytes of the current state were received.
+ //
+ // The states change according to the following state diagram:
+ //
+ // Start
+ // |
+ // |
+ // v
+ // DecodeModuleHeader
+ // | _________________________________________
+ // | | |
+ // v v |
+ // DecodeSectionID --> DecodeSectionLength --> DecodeSectionPayload
+ // A |
+ // | | (if the section id == code)
+ // | v
+ // | DecodeNumberOfFunctions -- > DecodeFunctionLength
+ // | A |
+ // | | |
+ // | (after all functions were read) | v
+ // ------------------------------------- DecodeFunctionBody
+ //
+ class DecodingState {
+ public:
+ virtual ~DecodingState() = default;
+
+ // Reads the bytes for the current state and returns the number of read
+ // bytes.
+ virtual size_t ReadBytes(AsyncStreamingDecoder* streaming,
+ Vector<const uint8_t> bytes);
+
+ // Returns the next state of the streaming decoding.
+ virtual std::unique_ptr<DecodingState> Next(
+ AsyncStreamingDecoder* streaming) = 0;
+ // The buffer to store the received bytes.
+ virtual Vector<uint8_t> buffer() = 0;
+ // The number of bytes which were already received.
+ size_t offset() const { return offset_; }
+ void set_offset(size_t value) { offset_ = value; }
+ // A flag to indicate if finishing the streaming decoder is allowed without
+ // error.
+ virtual bool is_finishing_allowed() const { return false; }
+
+ private:
+ size_t offset_ = 0;
+ };
+
+ // Forward declarations of the concrete states. This is needed so that they
+ // can access private members of the AsyncStreamingDecoder.
+ class DecodeVarInt32;
+ class DecodeModuleHeader;
+ class DecodeSectionID;
+ class DecodeSectionLength;
+ class DecodeSectionPayload;
+ class DecodeNumberOfFunctions;
+ class DecodeFunctionLength;
+ class DecodeFunctionBody;
+
+ // Creates a buffer for the next section of the module.
+ SectionBuffer* CreateNewBuffer(uint32_t module_offset, uint8_t section_id,
+ size_t length,
+ Vector<const uint8_t> length_bytes);
+
+ std::unique_ptr<DecodingState> Error(const WasmError& error) {
+ if (ok()) processor_->OnError(error);
+ Fail();
+ return std::unique_ptr<DecodingState>(nullptr);
+ }
+
+ std::unique_ptr<DecodingState> Error(std::string message) {
+ return Error(WasmError{module_offset_ - 1, std::move(message)});
+ }
+
+ void ProcessModuleHeader() {
+ if (!ok()) return;
+ if (!processor_->ProcessModuleHeader(state_->buffer(), 0)) Fail();
+ }
+
+ void ProcessSection(SectionBuffer* buffer) {
+ if (!ok()) return;
+ if (!processor_->ProcessSection(
+ buffer->section_code(), buffer->payload(),
+ buffer->module_offset() +
+ static_cast<uint32_t>(buffer->payload_offset()))) {
+ Fail();
+ }
+ }
+
+ void StartCodeSection(int num_functions,
+ std::shared_ptr<WireBytesStorage> wire_bytes_storage,
+ int code_section_length) {
+ if (!ok()) return;
+ // The offset passed to {ProcessCodeSectionHeader} is an error offset and
+ // not the start offset of a buffer. Therefore we need the -1 here.
+ if (!processor_->ProcessCodeSectionHeader(
+ num_functions, module_offset() - 1, std::move(wire_bytes_storage),
+ code_section_length)) {
+ Fail();
+ }
+ }
+
+ void ProcessFunctionBody(Vector<const uint8_t> bytes,
+ uint32_t module_offset) {
+ if (!ok()) return;
+ if (!processor_->ProcessFunctionBody(bytes, module_offset)) Fail();
+ }
+
+ void Fail() {
+ // We reset the {processor_} field to represent failure. This also ensures
+ // that we do not accidentally call further methods on the processor after
+ // failure.
+ processor_.reset();
+ }
+
+ bool ok() const { return processor_ != nullptr; }
+
+ uint32_t module_offset() const { return module_offset_; }
+
+ std::unique_ptr<StreamingProcessor> processor_;
+ std::unique_ptr<DecodingState> state_;
+ std::vector<std::shared_ptr<SectionBuffer>> section_buffers_;
+ bool code_section_processed_ = false;
+ uint32_t module_offset_ = 0;
+ size_t total_size_ = 0;
+
+ // We need wire bytes in an array for deserializing cached modules.
+ std::vector<uint8_t> wire_bytes_for_deserializing_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncStreamingDecoder);
+};
+
+void AsyncStreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
if (deserializing()) {
wire_bytes_for_deserializing_.insert(wire_bytes_for_deserializing_.end(),
bytes.begin(), bytes.end());
@@ -50,8 +246,8 @@ void StreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
}
}
-size_t StreamingDecoder::DecodingState::ReadBytes(StreamingDecoder* streaming,
- Vector<const uint8_t> bytes) {
+size_t AsyncStreamingDecoder::DecodingState::ReadBytes(
+ AsyncStreamingDecoder* streaming, Vector<const uint8_t> bytes) {
Vector<uint8_t> remaining_buf = buffer() + offset();
size_t num_bytes = std::min(bytes.size(), remaining_buf.size());
TRACE_STREAMING("ReadBytes(%zu bytes)\n", num_bytes);
@@ -60,7 +256,7 @@ size_t StreamingDecoder::DecodingState::ReadBytes(StreamingDecoder* streaming,
return num_bytes;
}
-void StreamingDecoder::Finish() {
+void AsyncStreamingDecoder::Finish() {
TRACE_STREAMING("Finish\n");
if (!ok()) return;
@@ -82,7 +278,8 @@ void StreamingDecoder::Finish() {
return;
}
- OwnedVector<uint8_t> bytes = OwnedVector<uint8_t>::New(total_size_);
+ OwnedVector<uint8_t> bytes =
+ OwnedVector<uint8_t>::NewForOverwrite(total_size_);
uint8_t* cursor = bytes.start();
{
#define BYTES(x) (x & 0xFF), (x >> 8) & 0xFF, (x >> 16) & 0xFF, (x >> 24) & 0xFF
@@ -99,31 +296,20 @@ void StreamingDecoder::Finish() {
processor_->OnFinishedStream(std::move(bytes));
}
-void StreamingDecoder::Abort() {
+void AsyncStreamingDecoder::Abort() {
TRACE_STREAMING("Abort\n");
if (!ok()) return; // Failed already.
processor_->OnAbort();
Fail();
}
-void StreamingDecoder::SetModuleCompiledCallback(
- ModuleCompiledCallback callback) {
- DCHECK_NULL(module_compiled_callback_);
- module_compiled_callback_ = callback;
-}
-
-bool StreamingDecoder::SetCompiledModuleBytes(
- Vector<const uint8_t> compiled_module_bytes) {
- compiled_module_bytes_ = compiled_module_bytes;
- return true;
-}
-
namespace {
class TopTierCompiledCallback {
public:
- TopTierCompiledCallback(std::weak_ptr<NativeModule> native_module,
- StreamingDecoder::ModuleCompiledCallback callback)
+ TopTierCompiledCallback(
+ std::weak_ptr<NativeModule> native_module,
+ AsyncStreamingDecoder::ModuleCompiledCallback callback)
: native_module_(std::move(native_module)),
callback_(std::move(callback)) {}
@@ -142,7 +328,7 @@ class TopTierCompiledCallback {
private:
const std::weak_ptr<NativeModule> native_module_;
- const StreamingDecoder::ModuleCompiledCallback callback_;
+ const AsyncStreamingDecoder::ModuleCompiledCallback callback_;
#ifdef DEBUG
mutable bool called_ = false;
#endif
@@ -150,7 +336,7 @@ class TopTierCompiledCallback {
} // namespace
-void StreamingDecoder::NotifyNativeModuleCreated(
+void AsyncStreamingDecoder::NotifyNativeModuleCreated(
const std::shared_ptr<NativeModule>& native_module) {
if (!module_compiled_callback_) return;
auto* comp_state = native_module->compilation_state();
@@ -162,20 +348,21 @@ void StreamingDecoder::NotifyNativeModuleCreated(
// An abstract class to share code among the states which decode VarInts. This
// class takes over the decoding of the VarInt and then calls the actual decode
// code with the decoded value.
-class StreamingDecoder::DecodeVarInt32 : public DecodingState {
+class AsyncStreamingDecoder::DecodeVarInt32 : public DecodingState {
public:
explicit DecodeVarInt32(size_t max_value, const char* field_name)
: max_value_(max_value), field_name_(field_name) {}
Vector<uint8_t> buffer() override { return ArrayVector(byte_buffer_); }
- size_t ReadBytes(StreamingDecoder* streaming,
+ size_t ReadBytes(AsyncStreamingDecoder* streaming,
Vector<const uint8_t> bytes) override;
- std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+ std::unique_ptr<DecodingState> Next(
+ AsyncStreamingDecoder* streaming) override;
virtual std::unique_ptr<DecodingState> NextWithValue(
- StreamingDecoder* streaming) = 0;
+ AsyncStreamingDecoder* streaming) = 0;
protected:
uint8_t byte_buffer_[kMaxVarInt32Size];
@@ -187,11 +374,12 @@ class StreamingDecoder::DecodeVarInt32 : public DecodingState {
size_t bytes_consumed_ = 0;
};
-class StreamingDecoder::DecodeModuleHeader : public DecodingState {
+class AsyncStreamingDecoder::DecodeModuleHeader : public DecodingState {
public:
Vector<uint8_t> buffer() override { return ArrayVector(byte_buffer_); }
- std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+ std::unique_ptr<DecodingState> Next(
+ AsyncStreamingDecoder* streaming) override;
private:
// Checks if the magic bytes of the module header are correct.
@@ -202,7 +390,7 @@ class StreamingDecoder::DecodeModuleHeader : public DecodingState {
uint8_t byte_buffer_[kModuleHeaderSize];
};
-class StreamingDecoder::DecodeSectionID : public DecodingState {
+class AsyncStreamingDecoder::DecodeSectionID : public DecodingState {
public:
explicit DecodeSectionID(uint32_t module_offset)
: module_offset_(module_offset) {}
@@ -210,7 +398,8 @@ class StreamingDecoder::DecodeSectionID : public DecodingState {
Vector<uint8_t> buffer() override { return {&id_, 1}; }
bool is_finishing_allowed() const override { return true; }
- std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+ std::unique_ptr<DecodingState> Next(
+ AsyncStreamingDecoder* streaming) override;
private:
uint8_t id_ = 0;
@@ -218,7 +407,7 @@ class StreamingDecoder::DecodeSectionID : public DecodingState {
const uint32_t module_offset_;
};
-class StreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
+class AsyncStreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
public:
explicit DecodeSectionLength(uint8_t id, uint32_t module_offset)
: DecodeVarInt32(kV8MaxWasmModuleSize, "section length"),
@@ -226,7 +415,7 @@ class StreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
module_offset_(module_offset) {}
std::unique_ptr<DecodingState> NextWithValue(
- StreamingDecoder* streaming) override;
+ AsyncStreamingDecoder* streaming) override;
private:
const uint8_t section_id_;
@@ -234,33 +423,34 @@ class StreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
const uint32_t module_offset_;
};
-class StreamingDecoder::DecodeSectionPayload : public DecodingState {
+class AsyncStreamingDecoder::DecodeSectionPayload : public DecodingState {
public:
explicit DecodeSectionPayload(SectionBuffer* section_buffer)
: section_buffer_(section_buffer) {}
Vector<uint8_t> buffer() override { return section_buffer_->payload(); }
- std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+ std::unique_ptr<DecodingState> Next(
+ AsyncStreamingDecoder* streaming) override;
private:
SectionBuffer* const section_buffer_;
};
-class StreamingDecoder::DecodeNumberOfFunctions : public DecodeVarInt32 {
+class AsyncStreamingDecoder::DecodeNumberOfFunctions : public DecodeVarInt32 {
public:
explicit DecodeNumberOfFunctions(SectionBuffer* section_buffer)
: DecodeVarInt32(kV8MaxWasmFunctions, "functions count"),
section_buffer_(section_buffer) {}
std::unique_ptr<DecodingState> NextWithValue(
- StreamingDecoder* streaming) override;
+ AsyncStreamingDecoder* streaming) override;
private:
SectionBuffer* const section_buffer_;
};
-class StreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
+class AsyncStreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
public:
explicit DecodeFunctionLength(SectionBuffer* section_buffer,
size_t buffer_offset,
@@ -274,7 +464,7 @@ class StreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
}
std::unique_ptr<DecodingState> NextWithValue(
- StreamingDecoder* streaming) override;
+ AsyncStreamingDecoder* streaming) override;
private:
SectionBuffer* const section_buffer_;
@@ -282,7 +472,7 @@ class StreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
const size_t num_remaining_functions_;
};
-class StreamingDecoder::DecodeFunctionBody : public DecodingState {
+class AsyncStreamingDecoder::DecodeFunctionBody : public DecodingState {
public:
explicit DecodeFunctionBody(SectionBuffer* section_buffer,
size_t buffer_offset, size_t function_body_length,
@@ -300,7 +490,8 @@ class StreamingDecoder::DecodeFunctionBody : public DecodingState {
return remaining_buffer.SubVector(0, function_body_length_);
}
- std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+ std::unique_ptr<DecodingState> Next(
+ AsyncStreamingDecoder* streaming) override;
private:
SectionBuffer* const section_buffer_;
@@ -310,8 +501,8 @@ class StreamingDecoder::DecodeFunctionBody : public DecodingState {
const uint32_t module_offset_;
};
-size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
- StreamingDecoder* streaming, Vector<const uint8_t> bytes) {
+size_t AsyncStreamingDecoder::DecodeVarInt32::ReadBytes(
+ AsyncStreamingDecoder* streaming, Vector<const uint8_t> bytes) {
Vector<uint8_t> buf = buffer();
Vector<uint8_t> remaining_buf = buf + offset();
size_t new_bytes = std::min(bytes.size(), remaining_buf.size());
@@ -344,8 +535,8 @@ size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
return new_bytes;
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeVarInt32::Next(StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeVarInt32::Next(AsyncStreamingDecoder* streaming) {
if (!streaming->ok()) return nullptr;
if (value_ > max_value_) {
@@ -358,16 +549,17 @@ StreamingDecoder::DecodeVarInt32::Next(StreamingDecoder* streaming) {
return NextWithValue(streaming);
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeModuleHeader::Next(
+ AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeModuleHeader\n");
streaming->ProcessModuleHeader();
if (!streaming->ok()) return nullptr;
return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeSectionID::Next(AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionID: %s section\n",
SectionName(static_cast<SectionCode>(id_)));
if (id_ == SectionCode::kCodeSectionCode) {
@@ -383,9 +575,9 @@ StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) {
return std::make_unique<DecodeSectionLength>(id_, module_offset_);
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeSectionLength::NextWithValue(
- StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeSectionLength::NextWithValue(
+ AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionLength(%zu)\n", value_);
SectionBuffer* buf =
streaming->CreateNewBuffer(module_offset_, section_id_, value_,
@@ -410,17 +602,18 @@ StreamingDecoder::DecodeSectionLength::NextWithValue(
return std::make_unique<DecodeSectionPayload>(buf);
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeSectionPayload::Next(
+ AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionPayload\n");
streaming->ProcessSection(section_buffer_);
if (!streaming->ok()) return nullptr;
return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
- StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
+ AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeNumberOfFunctions(%zu)\n", value_);
// Copy the bytes we read into the section buffer.
Vector<uint8_t> payload_buf = section_buffer_->payload();
@@ -449,9 +642,9 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
value_);
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeFunctionLength::NextWithValue(
- StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeFunctionLength::NextWithValue(
+ AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeFunctionLength(%zu)\n", value_);
// Copy the bytes we consumed into the section buffer.
Vector<uint8_t> fun_length_buffer = section_buffer_->bytes() + buffer_offset_;
@@ -472,8 +665,9 @@ StreamingDecoder::DecodeFunctionLength::NextWithValue(
num_remaining_functions_, streaming->module_offset());
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeFunctionBody::Next(
+ AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeFunctionBody\n");
streaming->ProcessFunctionBody(buffer(), module_offset_);
if (!streaming->ok()) return nullptr;
@@ -490,13 +684,13 @@ StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
-StreamingDecoder::StreamingDecoder(
+AsyncStreamingDecoder::AsyncStreamingDecoder(
std::unique_ptr<StreamingProcessor> processor)
: processor_(std::move(processor)),
// A module always starts with a module header.
state_(new DecodeModuleHeader()) {}
-StreamingDecoder::SectionBuffer* StreamingDecoder::CreateNewBuffer(
+AsyncStreamingDecoder::SectionBuffer* AsyncStreamingDecoder::CreateNewBuffer(
uint32_t module_offset, uint8_t section_id, size_t length,
Vector<const uint8_t> length_bytes) {
// Section buffers are allocated in the same order they appear in the module,
@@ -506,6 +700,11 @@ StreamingDecoder::SectionBuffer* StreamingDecoder::CreateNewBuffer(
return section_buffers_.back().get();
}
+std::unique_ptr<StreamingDecoder> StreamingDecoder::CreateAsyncStreamingDecoder(
+ std::unique_ptr<StreamingProcessor> processor) {
+ return std::make_unique<AsyncStreamingDecoder>(std::move(processor));
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/wasm/streaming-decoder.h b/chromium/v8/src/wasm/streaming-decoder.h
index f3203e70274..bdf3218d1ef 100644
--- a/chromium/v8/src/wasm/streaming-decoder.h
+++ b/chromium/v8/src/wasm/streaming-decoder.h
@@ -12,6 +12,7 @@
#include "src/utils/vector.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-result.h"
namespace v8 {
@@ -66,220 +67,57 @@ class V8_EXPORT_PRIVATE StreamingProcessor {
// and function bodies.
class V8_EXPORT_PRIVATE StreamingDecoder {
public:
- explicit StreamingDecoder(std::unique_ptr<StreamingProcessor> processor);
+ virtual ~StreamingDecoder() = default;
// The buffer passed into OnBytesReceived is owned by the caller.
- void OnBytesReceived(Vector<const uint8_t> bytes);
+ virtual void OnBytesReceived(Vector<const uint8_t> bytes) = 0;
- void Finish();
+ virtual void Finish() = 0;
- void Abort();
+ virtual void Abort() = 0;
// Notify the StreamingDecoder that compilation ended and the
// StreamingProcessor should not be called anymore.
- void NotifyCompilationEnded() { Fail(); }
+ virtual void NotifyCompilationEnded() = 0;
// Caching support.
// Sets the callback that is called after the module is fully compiled.
using ModuleCompiledCallback =
std::function<void(const std::shared_ptr<NativeModule>&)>;
- void SetModuleCompiledCallback(ModuleCompiledCallback callback);
- // Passes previously compiled module bytes from the embedder's cache.
- bool SetCompiledModuleBytes(Vector<const uint8_t> compiled_module_bytes);
-
- void NotifyNativeModuleCreated(
- const std::shared_ptr<NativeModule>& native_module);
-
- Vector<const char> url() { return VectorOf(url_); }
- void SetUrl(Vector<const char> url) {
- url_.assign(url.begin(), url.length());
- }
-
- private:
- // TODO(ahaas): Put the whole private state of the StreamingDecoder into the
- // cc file (PIMPL design pattern).
-
- // The SectionBuffer is the data object for the content of a single section.
- // It stores all bytes of the section (including section id and section
- // length), and the offset where the actual payload starts.
- class SectionBuffer : public WireBytesStorage {
- public:
- // id: The section id.
- // payload_length: The length of the payload.
- // length_bytes: The section length, as it is encoded in the module bytes.
- SectionBuffer(uint32_t module_offset, uint8_t id, size_t payload_length,
- Vector<const uint8_t> length_bytes)
- : // ID + length + payload
- module_offset_(module_offset),
- bytes_(OwnedVector<uint8_t>::New(1 + length_bytes.length() +
- payload_length)),
- payload_offset_(1 + length_bytes.length()) {
- bytes_.start()[0] = id;
- memcpy(bytes_.start() + 1, &length_bytes.first(), length_bytes.length());
- }
-
- SectionCode section_code() const {
- return static_cast<SectionCode>(bytes_.start()[0]);
- }
-
- Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
- DCHECK_LE(module_offset_, ref.offset());
- uint32_t offset_in_code_buffer = ref.offset() - module_offset_;
- return bytes().SubVector(offset_in_code_buffer,
- offset_in_code_buffer + ref.length());
- }
-
- uint32_t module_offset() const { return module_offset_; }
- Vector<uint8_t> bytes() const { return bytes_.as_vector(); }
- Vector<uint8_t> payload() const { return bytes() + payload_offset_; }
- size_t length() const { return bytes_.size(); }
- size_t payload_offset() const { return payload_offset_; }
-
- private:
- const uint32_t module_offset_;
- const OwnedVector<uint8_t> bytes_;
- const size_t payload_offset_;
- };
-
- // The decoding of a stream of wasm module bytes is organized in states. Each
- // state provides a buffer to store the bytes required for the current state,
- // information on how many bytes have already been received, how many bytes
- // are needed, and a {Next} function which starts the next state once all
- // bytes of the current state were received.
- //
- // The states change according to the following state diagram:
- //
- // Start
- // |
- // |
- // v
- // DecodeModuleHeader
- // | _________________________________________
- // | | |
- // v v |
- // DecodeSectionID --> DecodeSectionLength --> DecodeSectionPayload
- // A |
- // | | (if the section id == code)
- // | v
- // | DecodeNumberOfFunctions -- > DecodeFunctionLength
- // | A |
- // | | |
- // | (after all functions were read) | v
- // ------------------------------------- DecodeFunctionBody
- //
- class DecodingState {
- public:
- virtual ~DecodingState() = default;
-
- // Reads the bytes for the current state and returns the number of read
- // bytes.
- virtual size_t ReadBytes(StreamingDecoder* streaming,
- Vector<const uint8_t> bytes);
-
- // Returns the next state of the streaming decoding.
- virtual std::unique_ptr<DecodingState> Next(
- StreamingDecoder* streaming) = 0;
- // The buffer to store the received bytes.
- virtual Vector<uint8_t> buffer() = 0;
- // The number of bytes which were already received.
- size_t offset() const { return offset_; }
- void set_offset(size_t value) { offset_ = value; }
- // A flag to indicate if finishing the streaming decoder is allowed without
- // error.
- virtual bool is_finishing_allowed() const { return false; }
- private:
- size_t offset_ = 0;
- };
-
- // Forward declarations of the concrete states. This is needed so that they
- // can access private members of the StreamingDecoder.
- class DecodeVarInt32;
- class DecodeModuleHeader;
- class DecodeSectionID;
- class DecodeSectionLength;
- class DecodeSectionPayload;
- class DecodeNumberOfFunctions;
- class DecodeFunctionLength;
- class DecodeFunctionBody;
-
- // Creates a buffer for the next section of the module.
- SectionBuffer* CreateNewBuffer(uint32_t module_offset, uint8_t section_id,
- size_t length,
- Vector<const uint8_t> length_bytes);
-
- std::unique_ptr<DecodingState> Error(const WasmError& error) {
- if (ok()) processor_->OnError(error);
- Fail();
- return std::unique_ptr<DecodingState>(nullptr);
- }
-
- std::unique_ptr<DecodingState> Error(std::string message) {
- return Error(WasmError{module_offset_ - 1, std::move(message)});
- }
-
- void ProcessModuleHeader() {
- if (!ok()) return;
- if (!processor_->ProcessModuleHeader(state_->buffer(), 0)) Fail();
+ void SetModuleCompiledCallback(ModuleCompiledCallback callback) {
+ module_compiled_callback_ = callback;
}
- void ProcessSection(SectionBuffer* buffer) {
- if (!ok()) return;
- if (!processor_->ProcessSection(
- buffer->section_code(), buffer->payload(),
- buffer->module_offset() +
- static_cast<uint32_t>(buffer->payload_offset()))) {
- Fail();
- }
+ // Passes previously compiled module bytes from the embedder's cache.
+ bool SetCompiledModuleBytes(Vector<const uint8_t> compiled_module_bytes) {
+ compiled_module_bytes_ = compiled_module_bytes;
+ return true;
}
- void StartCodeSection(int num_functions,
- std::shared_ptr<WireBytesStorage> wire_bytes_storage,
- int code_section_length) {
- if (!ok()) return;
- // The offset passed to {ProcessCodeSectionHeader} is an error offset and
- // not the start offset of a buffer. Therefore we need the -1 here.
- if (!processor_->ProcessCodeSectionHeader(
- num_functions, module_offset() - 1, std::move(wire_bytes_storage),
- code_section_length)) {
- Fail();
- }
- }
+ virtual void NotifyNativeModuleCreated(
+ const std::shared_ptr<NativeModule>& native_module) = 0;
- void ProcessFunctionBody(Vector<const uint8_t> bytes,
- uint32_t module_offset) {
- if (!ok()) return;
- if (!processor_->ProcessFunctionBody(bytes, module_offset)) Fail();
- }
+ Vector<const char> url() { return VectorOf(url_); }
- void Fail() {
- // We reset the {processor_} field to represent failure. This also ensures
- // that we do not accidentally call further methods on the processor after
- // failure.
- processor_.reset();
+ void SetUrl(Vector<const char> url) {
+ url_.assign(url.begin(), url.length());
}
- bool ok() const { return processor_ != nullptr; }
+ static std::unique_ptr<StreamingDecoder> CreateAsyncStreamingDecoder(
+ std::unique_ptr<StreamingProcessor> processor);
- uint32_t module_offset() const { return module_offset_; }
+ static std::unique_ptr<StreamingDecoder> CreateSyncStreamingDecoder(
+ Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
+ const char* api_method_name_for_errors,
+ std::shared_ptr<CompilationResultResolver> resolver);
+ protected:
bool deserializing() const { return !compiled_module_bytes_.empty(); }
- std::unique_ptr<StreamingProcessor> processor_;
- std::unique_ptr<DecodingState> state_;
- std::vector<std::shared_ptr<SectionBuffer>> section_buffers_;
- bool code_section_processed_ = false;
- uint32_t module_offset_ = 0;
- size_t total_size_ = 0;
std::string url_;
-
- // Caching support.
- ModuleCompiledCallback module_compiled_callback_ = nullptr;
- // We need wire bytes in an array for deserializing cached modules.
- std::vector<uint8_t> wire_bytes_for_deserializing_;
+ ModuleCompiledCallback module_compiled_callback_;
Vector<const uint8_t> compiled_module_bytes_;
-
- DISALLOW_COPY_AND_ASSIGN(StreamingDecoder);
};
} // namespace wasm
diff --git a/chromium/v8/src/wasm/struct-types.h b/chromium/v8/src/wasm/struct-types.h
index 6cd4271c24b..cee563b89f4 100644
--- a/chromium/v8/src/wasm/struct-types.h
+++ b/chromium/v8/src/wasm/struct-types.h
@@ -18,8 +18,11 @@ namespace wasm {
class StructType : public ZoneObject {
public:
StructType(uint32_t field_count, uint32_t* field_offsets,
- const ValueType* reps)
- : field_count_(field_count), field_offsets_(field_offsets), reps_(reps) {
+ const ValueType* reps, const bool* mutabilities)
+ : field_count_(field_count),
+ field_offsets_(field_offsets),
+ reps_(reps),
+ mutabilities_(mutabilities) {
InitializeOffsets();
}
@@ -30,15 +33,26 @@ class StructType : public ZoneObject {
return reps_[index];
}
+ bool mutability(uint32_t index) const {
+ DCHECK_LT(index, field_count_);
+ return mutabilities_[index];
+ }
+
// Iteration support.
base::iterator_range<const ValueType*> fields() const {
return {reps_, reps_ + field_count_};
}
+ base::iterator_range<const bool*> mutabilities() const {
+ return {mutabilities_, mutabilities_ + field_count_};
+ }
bool operator==(const StructType& other) const {
if (this == &other) return true;
if (field_count() != other.field_count()) return false;
- return std::equal(fields().begin(), fields().end(), other.fields().begin());
+ return std::equal(fields().begin(), fields().end(),
+ other.fields().begin()) &&
+ std::equal(mutabilities().begin(), mutabilities().end(),
+ other.mutabilities().begin());
}
bool operator!=(const StructType& other) const { return !(*this == other); }
@@ -70,17 +84,20 @@ class StructType : public ZoneObject {
: field_count_(field_count),
zone_(zone),
cursor_(0),
- buffer_(zone->NewArray<ValueType>(static_cast<int>(field_count))) {}
+ buffer_(zone->NewArray<ValueType>(static_cast<int>(field_count))),
+ mutabilities_(zone->NewArray<bool>(static_cast<int>(field_count))) {}
- void AddField(ValueType type) {
+ void AddField(ValueType type, bool mutability) {
DCHECK_LT(cursor_, field_count_);
+ mutabilities_[cursor_] = mutability;
buffer_[cursor_++] = type;
}
StructType* Build() {
DCHECK_EQ(cursor_, field_count_);
uint32_t* offsets = zone_->NewArray<uint32_t>(field_count_);
- return new (zone_) StructType(field_count_, offsets, buffer_);
+ return new (zone_)
+ StructType(field_count_, offsets, buffer_, mutabilities_);
}
private:
@@ -88,25 +105,30 @@ class StructType : public ZoneObject {
Zone* zone_;
uint32_t cursor_;
ValueType* buffer_;
+ bool* mutabilities_;
};
private:
uint32_t field_count_;
uint32_t* field_offsets_;
const ValueType* reps_;
+ const bool* mutabilities_;
};
class ArrayType : public ZoneObject {
public:
- constexpr explicit ArrayType(ValueType rep) : rep_(rep) {}
+ constexpr explicit ArrayType(ValueType rep, bool mutability)
+ : rep_(rep), mutability_(mutability) {}
ValueType element_type() const { return rep_; }
+ bool mutability() const { return mutability_; }
bool operator==(const ArrayType& other) const { return rep_ == other.rep_; }
bool operator!=(const ArrayType& other) const { return rep_ != other.rep_; }
private:
const ValueType rep_;
+ const bool mutability_;
};
} // namespace wasm
diff --git a/chromium/v8/src/wasm/sync-streaming-decoder.cc b/chromium/v8/src/wasm/sync-streaming-decoder.cc
new file mode 100644
index 00000000000..7152806d9d9
--- /dev/null
+++ b/chromium/v8/src/wasm/sync-streaming-decoder.cc
@@ -0,0 +1,112 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/isolate.h"
+#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-serialization.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class V8_EXPORT_PRIVATE SyncStreamingDecoder : public StreamingDecoder {
+ public:
+ SyncStreamingDecoder(Isolate* isolate, const WasmFeatures& enabled,
+ Handle<Context> context,
+ const char* api_method_name_for_errors,
+ std::shared_ptr<CompilationResultResolver> resolver)
+ : isolate_(isolate),
+ enabled_(enabled),
+ context_(context),
+ api_method_name_for_errors_(api_method_name_for_errors),
+ resolver_(resolver) {}
+
+ // The buffer passed into OnBytesReceived is owned by the caller.
+ void OnBytesReceived(Vector<const uint8_t> bytes) override {
+ buffer_.emplace_back(bytes.size());
+ CHECK_EQ(buffer_.back().size(), bytes.size());
+ std::memcpy(buffer_.back().data(), bytes.data(), bytes.size());
+ buffer_size_ += bytes.size();
+ }
+
+ void Finish() override {
+ // We copy all received chunks into one byte buffer.
+ auto bytes = std::make_unique<uint8_t[]>(buffer_size_);
+ uint8_t* destination = bytes.get();
+ for (auto& chunk : buffer_) {
+ std::memcpy(destination, chunk.data(), chunk.size());
+ destination += chunk.size();
+ }
+ CHECK_EQ(destination - bytes.get(), buffer_size_);
+
+ // Check if we can deserialize the module from cache.
+ if (deserializing()) {
+ HandleScope scope(isolate_);
+ SaveAndSwitchContext saved_context(isolate_, *context_);
+
+ MaybeHandle<WasmModuleObject> module_object = DeserializeNativeModule(
+ isolate_, compiled_module_bytes_,
+ Vector<const uint8_t>(bytes.get(), buffer_size_), url());
+
+ if (!module_object.is_null()) {
+ Handle<WasmModuleObject> module = module_object.ToHandleChecked();
+ resolver_->OnCompilationSucceeded(module);
+ return;
+ }
+ }
+
+ // Compile the received bytes synchronously.
+ ModuleWireBytes wire_bytes(bytes.get(), bytes.get() + buffer_size_);
+ ErrorThrower thrower(isolate_, api_method_name_for_errors_);
+ MaybeHandle<WasmModuleObject> module_object =
+ isolate_->wasm_engine()->SyncCompile(isolate_, enabled_, &thrower,
+ wire_bytes);
+ if (thrower.error()) {
+ resolver_->OnCompilationFailed(thrower.Reify());
+ return;
+ }
+ Handle<WasmModuleObject> module = module_object.ToHandleChecked();
+ if (module_compiled_callback_) {
+ module_compiled_callback_(module->shared_native_module());
+ }
+ resolver_->OnCompilationSucceeded(module);
+ }
+
+ void Abort() override {
+ // Abort is fully handled by the API, we only clear the buffer.
+ buffer_.clear();
+ }
+
+ void NotifyCompilationEnded() override { buffer_.clear(); }
+
+ void NotifyNativeModuleCreated(
+ const std::shared_ptr<NativeModule>&) override {
+ // This function is only called from the {AsyncCompileJob}.
+ UNREACHABLE();
+ }
+
+ private:
+ Isolate* isolate_;
+ const WasmFeatures enabled_;
+ Handle<Context> context_;
+ const char* api_method_name_for_errors_;
+ std::shared_ptr<CompilationResultResolver> resolver_;
+
+ std::vector<std::vector<uint8_t>> buffer_;
+ size_t buffer_size_ = 0;
+};
+
+std::unique_ptr<StreamingDecoder> StreamingDecoder::CreateSyncStreamingDecoder(
+ Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
+ const char* api_method_name_for_errors,
+ std::shared_ptr<CompilationResultResolver> resolver) {
+ return std::make_unique<SyncStreamingDecoder>(isolate, enabled, context,
+ api_method_name_for_errors,
+ std::move(resolver));
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/value-type.h b/chromium/v8/src/wasm/value-type.h
index 357dafbe2c7..3189629103a 100644
--- a/chromium/v8/src/wasm/value-type.h
+++ b/chromium/v8/src/wasm/value-type.h
@@ -20,44 +20,40 @@ namespace wasm {
// Type for holding simd values, defined in wasm-value.h.
class Simd128;
-// Type lattice: Given a fixed struct type S, the following lattice
-// defines the subtyping relation among types:
-// For every two types connected by a line, the top type is a
-// (direct) subtype of the bottom type.
-//
-// AnyRef
-// / \
-// / EqRef
-// / / \
-// FuncRef ExnRef OptRef(S)
-// \ | / \
-// I32 I64 F32 F64 NullRef Ref(S)
-// \ \ \ \ | /
-// ---------------------- Bottom ---------
// Format: kind, log2Size, code, machineType, shortName, typeName
//
// Some of these types are from proposals that are not standardized yet:
-// - "ref" types per https://github.com/WebAssembly/function-references
-// - "optref"/"eqref" per https://github.com/WebAssembly/gc
-//
-// TODO(7748): Extend this with struct and function subtyping.
-// Keep up to date with funcref vs. anyref subtyping.
-#define FOREACH_VALUE_TYPE(V) \
- V(Stmt, -1, Void, None, 'v', "<stmt>") \
- V(I32, 2, I32, Int32, 'i', "i32") \
- V(I64, 3, I64, Int64, 'l', "i64") \
- V(F32, 2, F32, Float32, 'f', "f32") \
- V(F64, 3, F64, Float64, 'd', "f64") \
- V(S128, 4, S128, Simd128, 's', "s128") \
- V(AnyRef, kSystemPointerSizeLog2, AnyRef, TaggedPointer, 'r', "anyref") \
- V(FuncRef, kSystemPointerSizeLog2, FuncRef, TaggedPointer, 'a', "funcref") \
- V(NullRef, kSystemPointerSizeLog2, NullRef, TaggedPointer, 'n', "nullref") \
- V(ExnRef, kSystemPointerSizeLog2, ExnRef, TaggedPointer, 'e', "exn") \
- V(Ref, kSystemPointerSizeLog2, Ref, TaggedPointer, '*', "ref") \
- V(OptRef, kSystemPointerSizeLog2, OptRef, TaggedPointer, 'o', "optref") \
- V(EqRef, kSystemPointerSizeLog2, EqRef, TaggedPointer, 'q', "eqref") \
+// - "ref"/"optref" (a.k.a. "ref null") per
+// https://github.com/WebAssembly/function-references
+// - "rtt" per https://github.com/WebAssembly/gc
+#define FOREACH_VALUE_TYPE(V) \
+ V(Stmt, -1, Void, None, 'v', "<stmt>") \
+ V(I32, 2, I32, Int32, 'i', "i32") \
+ V(I64, 3, I64, Int64, 'l', "i64") \
+ V(F32, 2, F32, Float32, 'f', "f32") \
+ V(F64, 3, F64, Float64, 'd', "f64") \
+ V(S128, 4, S128, Simd128, 's', "s128") \
+ V(I8, 0, I8, Int8, 'b', "i8") \
+ V(I16, 1, I16, Int16, 'h', "i16") \
+ V(Rtt, kSystemPointerSizeLog2, Rtt, TaggedPointer, 't', "rtt") \
+ V(Ref, kSystemPointerSizeLog2, Ref, TaggedPointer, 'r', "ref") \
+ V(OptRef, kSystemPointerSizeLog2, OptRef, TaggedPointer, 'n', "ref null") \
V(Bottom, -1, Void, None, '*', "<bot>")
+enum HeapType : uint32_t {
+ kHeapFunc = kV8MaxWasmTypes, // shorthand: c
+ kHeapExtern, // shorthand: e
+ kHeapEq, // shorthand: q
+ kHeapExn // shorthand: x
+};
+enum Nullability : bool { kNonNullable, kNullable };
+
+V8_INLINE constexpr bool is_generic_heap_type(HeapType ht) {
+ STATIC_ASSERT(kHeapExtern >= kHeapFunc && kHeapEq >= kHeapFunc &&
+ kHeapExn >= kHeapFunc);
+ return ht >= kHeapFunc;
+}
+
class ValueType {
public:
enum Kind : uint8_t {
@@ -66,37 +62,68 @@ class ValueType {
#undef DEF_ENUM
};
- constexpr bool has_immediate() const {
- return kind() == kRef || kind() == kOptRef;
+ constexpr bool is_reference_type() const {
+ return kind() == kRef || kind() == kOptRef || kind() == kRtt;
+ }
+
+ constexpr bool is_packed() const { return kind() == kI8 || kind() == kI16; }
+
+ constexpr bool is_nullable() const { return kind() == kOptRef; }
+
+ constexpr bool is_reference_to(HeapType htype) const {
+ return (kind() == kRef || kind() == kOptRef) && heap_type() == htype;
+ }
+
+ constexpr ValueType Unpacked() const {
+ return is_packed() ? Primitive(kI32) : *this;
+ }
+
+ constexpr bool has_index() const {
+ return is_reference_type() && !is_generic_heap_type(heap_type());
}
+ constexpr bool has_depth() const { return kind() == kRtt; }
constexpr ValueType() : bit_field_(KindField::encode(kStmt)) {}
- explicit constexpr ValueType(Kind kind)
- : bit_field_(KindField::encode(kind)) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(!has_immediate());
-#endif
+ static constexpr ValueType Primitive(Kind kind) {
+ CONSTEXPR_DCHECK(kind == kBottom || kind <= kI16);
+ return ValueType(KindField::encode(kind));
}
- constexpr ValueType(Kind kind, uint32_t ref_index)
- : bit_field_(KindField::encode(kind) | RefIndexField::encode(ref_index)) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(has_immediate());
-#endif
+ static constexpr ValueType Ref(HeapType heap_type, Nullability nullability) {
+ return ValueType(
+ KindField::encode(nullability == kNullable ? kOptRef : kRef) |
+ HeapTypeField::encode(heap_type));
+ }
+
+ static constexpr ValueType Rtt(HeapType heap_type,
+ uint8_t inheritance_depth) {
+ return ValueType(KindField::encode(kRtt) |
+ HeapTypeField::encode(heap_type) |
+ DepthField::encode(inheritance_depth));
+ }
+
+ static constexpr ValueType FromRawBitField(uint32_t bit_field) {
+ return ValueType(bit_field);
}
constexpr Kind kind() const { return KindField::decode(bit_field_); }
+ constexpr HeapType heap_type() const {
+ CONSTEXPR_DCHECK(is_reference_type());
+ return HeapTypeField::decode(bit_field_);
+ }
+ constexpr uint8_t depth() const {
+ CONSTEXPR_DCHECK(has_depth());
+ return DepthField::decode(bit_field_);
+ }
constexpr uint32_t ref_index() const {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(has_immediate());
-#endif
- return RefIndexField::decode(bit_field_);
+ CONSTEXPR_DCHECK(has_index());
+ return static_cast<uint32_t>(heap_type());
}
+ constexpr uint32_t raw_bit_field() const { return bit_field_; }
+
constexpr int element_size_log2() const {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_NE(kStmt, kind());
- DCHECK_NE(kBottom, kind());
-#endif
+ CONSTEXPR_DCHECK(kStmt != kind());
+ CONSTEXPR_DCHECK(kBottom != kind());
constexpr int kElementSizeLog2[] = {
#define ELEM_SIZE_LOG2(kind, log2Size, ...) log2Size,
@@ -116,59 +143,8 @@ class ValueType {
return bit_field_ != other.bit_field_;
}
- // TODO(7748): Extend this with struct and function subtyping.
- // Keep up to date with funcref vs. anyref subtyping.
- constexpr bool IsSubTypeOf(ValueType other) const {
- return (*this == other) || (other.kind() == kAnyRef && IsReferenceType()) ||
- (kind() == kNullRef && other.kind() != kRef &&
- other.IsReferenceType()) ||
- (other.kind() == kEqRef &&
- (kind() == kExnRef || kind() == kOptRef || kind() == kRef)) ||
- (kind() == kRef && other.kind() == kOptRef &&
- ref_index() == other.ref_index());
- }
-
- constexpr bool IsReferenceType() const {
- return kind() == kAnyRef || kind() == kFuncRef || kind() == kNullRef ||
- kind() == kExnRef || kind() == kRef || kind() == kOptRef ||
- kind() == kEqRef;
- }
-
- // TODO(7748): Extend this with struct and function subtyping.
- // Keep up to date with funcref vs. anyref subtyping.
- static ValueType CommonSubType(ValueType a, ValueType b) {
- if (a == b) return a;
- // The only sub type of any value type is {bot}.
- if (!a.IsReferenceType() || !b.IsReferenceType()) {
- return ValueType(kBottom);
- }
- if (a.IsSubTypeOf(b)) return a;
- if (b.IsSubTypeOf(a)) return b;
- // {a} and {b} are not each other's subtype.
- // If one of them is not nullable, their greatest subtype is bottom,
- // otherwise null.
- if (a.kind() == kRef || b.kind() == kRef) return ValueType(kBottom);
- return ValueType(kNullRef);
- }
-
- constexpr ValueTypeCode value_type_code() const {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_NE(kBottom, kind());
-#endif
-
- constexpr ValueTypeCode kValueTypeCode[] = {
-#define TYPE_CODE(kind, log2Size, code, ...) kLocal##code,
- FOREACH_VALUE_TYPE(TYPE_CODE)
-#undef TYPE_CODE
- };
-
- return kValueTypeCode[kind()];
- }
-
constexpr MachineType machine_type() const {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_NE(kBottom, kind());
-#endif
+ CONSTEXPR_DCHECK(kBottom != kind());
constexpr MachineType kMachineType[] = {
#define MACH_TYPE(kind, log2Size, code, machineType, ...) \
@@ -184,22 +160,70 @@ class ValueType {
return machine_type().representation();
}
+ constexpr ValueTypeCode value_type_code() const {
+ CONSTEXPR_DCHECK(kind() != kBottom);
+ switch (kind()) {
+ case kOptRef:
+ switch (heap_type()) {
+ case kHeapFunc:
+ return kLocalFuncRef;
+ case kHeapExtern:
+ return kLocalExternRef;
+ case kHeapEq:
+ return kLocalEqRef;
+ case kHeapExn:
+ return kLocalExnRef;
+ default:
+ return kLocalOptRef;
+ }
+ case kRef:
+ return kLocalRef;
+ case kStmt:
+ return kLocalVoid;
+ case kRtt:
+ return kLocalRtt;
+ default:
+ return static_cast<ValueTypeCode>(kLocalI32 - (kind() - kI32));
+ }
+ }
+
+ constexpr bool encoding_needs_heap_type() const {
+ return kind() == kRef || kind() == kRtt ||
+ (kind() == kOptRef && !is_generic_heap_type(heap_type()));
+ }
+
+ constexpr uint32_t heap_type_code() const {
+ CONSTEXPR_DCHECK(encoding_needs_heap_type());
+ switch (heap_type()) {
+ case kHeapFunc:
+ return kLocalFuncRef;
+ case kHeapExn:
+ return kLocalExnRef;
+ case kHeapExtern:
+ return kLocalExternRef;
+ case kHeapEq:
+ return kLocalEqRef;
+ default:
+ return static_cast<uint32_t>(heap_type());
+ }
+ }
+
static ValueType For(MachineType type) {
switch (type.representation()) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
- return ValueType(kI32);
+ return Primitive(kI32);
case MachineRepresentation::kWord64:
- return ValueType(kI64);
+ return Primitive(kI64);
case MachineRepresentation::kFloat32:
- return ValueType(kF32);
+ return Primitive(kF32);
case MachineRepresentation::kFloat64:
- return ValueType(kF64);
+ return Primitive(kF64);
case MachineRepresentation::kTaggedPointer:
- return ValueType(kAnyRef);
+ return Ref(kHeapExtern, kNullable);
case MachineRepresentation::kSimd128:
- return ValueType(kS128);
+ return Primitive(kS128);
default:
UNREACHABLE();
}
@@ -215,20 +239,62 @@ class ValueType {
return kShortName[kind()];
}
- constexpr const char* type_name() const {
+ const std::string type_name() const {
+ std::ostringstream buf;
+ switch (kind()) {
+ case kRef:
+ buf << "(ref " << heap_name() << ")";
+ break;
+ case kOptRef:
+ if (is_generic_heap_type(heap_type())) {
+ // We prefer the shorthand to be backwards-compatible with previous
+ // proposals.
+ buf << heap_name() << "ref";
+ } else {
+ buf << "(ref null " << heap_name() << ")";
+ }
+ break;
+ case kRtt:
+ buf << "(rtt " << depth() << " " << heap_name() + ")";
+ break;
+ default:
+ buf << kind_name();
+ }
+ return buf.str();
+ }
+
+ private:
+ using KindField = base::BitField<Kind, 0, 5>;
+ using HeapTypeField = base::BitField<HeapType, 5, 20>;
+ using DepthField = base::BitField<uint8_t, 25, 7>;
+
+ constexpr explicit ValueType(uint32_t bit_field) : bit_field_(bit_field) {}
+
+ constexpr const char* kind_name() const {
constexpr const char* kTypeName[] = {
-#define TYPE_NAME(kind, log2Size, code, machineType, shortName, typeName, ...) \
+#define KIND_NAME(kind, log2Size, code, machineType, shortName, typeName, ...) \
typeName,
- FOREACH_VALUE_TYPE(TYPE_NAME)
+ FOREACH_VALUE_TYPE(KIND_NAME)
#undef TYPE_NAME
};
return kTypeName[kind()];
}
- private:
- using KindField = base::BitField<Kind, 0, 8>;
- using RefIndexField = base::BitField<uint32_t, 8, 24>;
+ const std::string heap_name() const {
+ switch (heap_type()) {
+ case kHeapFunc:
+ return std::string("func");
+ case kHeapExtern:
+ return std::string("extern");
+ case kHeapEq:
+ return std::string("eq");
+ case kHeapExn:
+ return std::string("exn");
+ default:
+ return std::to_string(static_cast<uint32_t>(heap_type()));
+ }
+ }
uint32_t bit_field_;
};
@@ -245,18 +311,20 @@ inline std::ostream& operator<<(std::ostream& oss, ValueType type) {
return oss << type.type_name();
}
-constexpr ValueType kWasmI32 = ValueType(ValueType::kI32);
-constexpr ValueType kWasmI64 = ValueType(ValueType::kI64);
-constexpr ValueType kWasmF32 = ValueType(ValueType::kF32);
-constexpr ValueType kWasmF64 = ValueType(ValueType::kF64);
-constexpr ValueType kWasmAnyRef = ValueType(ValueType::kAnyRef);
-constexpr ValueType kWasmEqRef = ValueType(ValueType::kEqRef);
-constexpr ValueType kWasmExnRef = ValueType(ValueType::kExnRef);
-constexpr ValueType kWasmFuncRef = ValueType(ValueType::kFuncRef);
-constexpr ValueType kWasmNullRef = ValueType(ValueType::kNullRef);
-constexpr ValueType kWasmS128 = ValueType(ValueType::kS128);
-constexpr ValueType kWasmStmt = ValueType(ValueType::kStmt);
-constexpr ValueType kWasmBottom = ValueType(ValueType::kBottom);
+constexpr ValueType kWasmI32 = ValueType::Primitive(ValueType::kI32);
+constexpr ValueType kWasmI64 = ValueType::Primitive(ValueType::kI64);
+constexpr ValueType kWasmF32 = ValueType::Primitive(ValueType::kF32);
+constexpr ValueType kWasmF64 = ValueType::Primitive(ValueType::kF64);
+constexpr ValueType kWasmS128 = ValueType::Primitive(ValueType::kS128);
+constexpr ValueType kWasmI8 = ValueType::Primitive(ValueType::kI8);
+constexpr ValueType kWasmI16 = ValueType::Primitive(ValueType::kI16);
+constexpr ValueType kWasmStmt = ValueType::Primitive(ValueType::kStmt);
+constexpr ValueType kWasmBottom = ValueType::Primitive(ValueType::kBottom);
+// Established wasm shorthands:
+constexpr ValueType kWasmFuncRef = ValueType::Ref(kHeapFunc, kNullable);
+constexpr ValueType kWasmExnRef = ValueType::Ref(kHeapExn, kNullable);
+constexpr ValueType kWasmExternRef = ValueType::Ref(kHeapExtern, kNullable);
+constexpr ValueType kWasmEqRef = ValueType::Ref(kHeapEq, kNullable);
#define FOREACH_WASMVALUE_CTYPES(V) \
V(kI32, int32_t) \
@@ -332,7 +400,7 @@ class LoadType {
};
static constexpr ValueType kValueType[] = {
-#define VALUE_TYPE(type, ...) ValueType(ValueType::k##type),
+#define VALUE_TYPE(type, ...) ValueType::Primitive(ValueType::k##type),
FOREACH_LOAD_TYPE(VALUE_TYPE)
#undef VALUE_TYPE
};
@@ -403,7 +471,7 @@ class StoreType {
};
static constexpr ValueType kValueType[] = {
-#define VALUE_TYPE(type, ...) ValueType(ValueType::k##type),
+#define VALUE_TYPE(type, ...) ValueType::Primitive(ValueType::k##type),
FOREACH_STORE_TYPE(VALUE_TYPE)
#undef VALUE_TYPE
};
diff --git a/chromium/v8/src/wasm/wasm-code-manager.cc b/chromium/v8/src/wasm/wasm-code-manager.cc
index 5477a18f33d..f79b98e5687 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.cc
+++ b/chromium/v8/src/wasm/wasm-code-manager.cc
@@ -83,7 +83,7 @@ base::AddressRegion DisjointAllocationPool::Merge(
auto below = above;
--below;
- // Sanity check:
+ // Consistency check:
DCHECK(above == regions_.end() || below->end() < above->begin());
// Adjacent to {below}: merge and done.
@@ -327,6 +327,12 @@ void WasmCode::Print(const char* name) const {
StdoutStream os;
os << "--- WebAssembly code ---\n";
Disassemble(name, os);
+ if (native_module_->HasDebugInfo()) {
+ if (auto* debug_side_table =
+ native_module_->GetDebugInfo()->GetDebugSideTableIfExists(this)) {
+ debug_side_table->Print(os);
+ }
+ }
os << "--- End code ---\n";
}
@@ -849,13 +855,13 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
code->is_off_heap_trampoline() ? 0 : code->relocation_size();
OwnedVector<byte> reloc_info;
if (relocation_size > 0) {
- reloc_info = OwnedVector<byte>::New(relocation_size);
- memcpy(reloc_info.start(), code->relocation_start(), relocation_size);
+ reloc_info = OwnedVector<byte>::Of(
+ Vector<byte>{code->relocation_start(), relocation_size});
}
Handle<ByteArray> source_pos_table(code->SourcePositionTable(),
code->GetIsolate());
OwnedVector<byte> source_pos =
- OwnedVector<byte>::New(source_pos_table->length());
+ OwnedVector<byte>::NewForOverwrite(source_pos_table->length());
if (source_pos_table->length() > 0) {
source_pos_table->copy_out(0, source_pos.start(),
source_pos_table->length());
@@ -923,7 +929,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
WasmCode::kFunction, // kind
ExecutionTier::kNone, // tier
kNoDebugging}}; // for_debugging
- new_code->MaybePrint(nullptr);
+ new_code->MaybePrint();
new_code->Validate();
return PublishCode(std::move(new_code));
@@ -1347,7 +1353,9 @@ class NativeModuleWireBytesStorage final : public WireBytesStorage {
: wire_bytes_(std::move(wire_bytes)) {}
Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
- return wire_bytes_->as_vector().SubVector(ref.offset(), ref.end_offset());
+ return std::atomic_load(&wire_bytes_)
+ ->as_vector()
+ .SubVector(ref.offset(), ref.end_offset());
}
private:
@@ -1358,7 +1366,7 @@ class NativeModuleWireBytesStorage final : public WireBytesStorage {
void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
auto shared_wire_bytes =
std::make_shared<OwnedVector<const uint8_t>>(std::move(wire_bytes));
- wire_bytes_ = shared_wire_bytes;
+ std::atomic_store(&wire_bytes_, shared_wire_bytes);
if (!shared_wire_bytes->empty()) {
compilation_state_->SetWireBytesStorage(
std::make_shared<NativeModuleWireBytesStorage>(
@@ -1851,7 +1859,7 @@ bool NativeModule::IsTieredDown() {
return tiering_state_ == kTieredDown;
}
-void NativeModule::TriggerRecompilation() {
+void NativeModule::RecompileForTiering() {
// Read the tiering state under the lock, then trigger recompilation after
// releasing the lock. If the tiering state was changed when the triggered
// compilation units finish, code installation will handle that correctly.
@@ -1863,24 +1871,51 @@ void NativeModule::TriggerRecompilation() {
RecompileNativeModule(this, current_state);
}
+std::vector<int> NativeModule::FindFunctionsToRecompile(
+ TieringState new_tiering_state) {
+ base::MutexGuard guard(&allocation_mutex_);
+ std::vector<int> function_indexes;
+ int imported = module()->num_imported_functions;
+ int declared = module()->num_declared_functions;
+ for (int slot_index = 0; slot_index < declared; ++slot_index) {
+ int function_index = imported + slot_index;
+ WasmCode* code = code_table_[slot_index];
+ bool code_is_good = new_tiering_state == kTieredDown
+ ? code && code->for_debugging()
+ : code && code->tier() == ExecutionTier::kTurbofan;
+ if (!code_is_good) function_indexes.push_back(function_index);
+ }
+ return function_indexes;
+}
+
void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
// Free the code space.
code_allocator_.FreeCode(codes);
- base::MutexGuard guard(&allocation_mutex_);
- // Remove debug side tables for all removed code objects.
- if (debug_info_) debug_info_->RemoveDebugSideTables(codes);
- // Free the {WasmCode} objects. This will also unregister trap handler data.
- for (WasmCode* code : codes) {
- DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
- owned_code_.erase(code->instruction_start());
+ DebugInfo* debug_info = nullptr;
+ {
+ base::MutexGuard guard(&allocation_mutex_);
+ debug_info = debug_info_.get();
+ // Free the {WasmCode} objects. This will also unregister trap handler data.
+ for (WasmCode* code : codes) {
+ DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
+ owned_code_.erase(code->instruction_start());
+ }
}
+ // Remove debug side tables for all removed code objects, after releasing our
+ // lock. This is to avoid lock order inversion.
+ if (debug_info) debug_info->RemoveDebugSideTables(codes);
}
size_t NativeModule::GetNumberOfCodeSpacesForTesting() const {
return code_allocator_.GetNumCodeSpaces();
}
+bool NativeModule::HasDebugInfo() const {
+ base::MutexGuard guard(&allocation_mutex_);
+ return debug_info_ != nullptr;
+}
+
DebugInfo* NativeModule::GetDebugInfo() {
base::MutexGuard guard(&allocation_mutex_);
if (!debug_info_) debug_info_ = std::make_unique<DebugInfo>(this);
diff --git a/chromium/v8/src/wasm/wasm-code-manager.h b/chromium/v8/src/wasm/wasm-code-manager.h
index 443f6f36059..d76adccad76 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.h
+++ b/chromium/v8/src/wasm/wasm-code-manager.h
@@ -71,8 +71,9 @@ struct WasmModule;
V(WasmStackOverflow) \
V(WasmThrow) \
V(WasmRethrow) \
+ V(WasmTraceEnter) \
+ V(WasmTraceExit) \
V(WasmTraceMemory) \
- V(AllocateHeapNumber) \
V(ArgumentsAdaptorTrampoline) \
V(BigIntToI32Pair) \
V(BigIntToI64) \
@@ -566,7 +567,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
- Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
+ Vector<const uint8_t> wire_bytes() const {
+ return std::atomic_load(&wire_bytes_)->as_vector();
+ }
const WasmModule* module() const { return module_.get(); }
std::shared_ptr<const WasmModule> shared_module() const { return module_; }
size_t committed_code_space() const {
@@ -574,6 +577,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
}
WasmEngine* engine() const { return engine_; }
+ bool HasWireBytes() const {
+ auto wire_bytes = std::atomic_load(&wire_bytes_);
+ return wire_bytes && !wire_bytes->empty();
+ }
void SetWireBytes(OwnedVector<const uint8_t> wire_bytes);
WasmCode* Lookup(Address) const;
@@ -600,18 +607,23 @@ class V8_EXPORT_PRIVATE NativeModule final {
Vector<WasmCompilationResult>);
// Set a new tiering state, but don't trigger any recompilation yet; use
- // {TriggerRecompilation} for that. The two steps are split because In some
+ // {RecompileForTiering} for that. The two steps are split because In some
// scenarios we need to drop locks before triggering recompilation.
void SetTieringState(TieringState);
// Check whether this modules is tiered down for debugging.
bool IsTieredDown();
- // Trigger a full recompilation of this module, in the tier set previously via
- // {SetTieringState}. When tiering down, the calling thread contributes to
- // compilation and only returns once recompilation is done. Tiering up happens
- // concurrently, so this method might return before it is complete.
- void TriggerRecompilation();
+ // Fully recompile this module in the tier set previously via
+ // {SetTieringState}. The calling thread contributes to compilation and only
+ // returns once recompilation is done.
+ void RecompileForTiering();
+
+ // Find all functions that need to be recompiled for a new tier. Note that
+ // compilation jobs might run concurrently, so this method only considers the
+ // compilation state of this native module at the time of the call.
+ // Returns a vector of function indexes to recompile.
+ std::vector<int> FindFunctionsToRecompile(TieringState);
// Free a set of functions of this module. Uncommits whole pages if possible.
// The given vector must be ordered by the instruction start address, and all
@@ -623,6 +635,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Retrieve the number of separately reserved code spaces for this module.
size_t GetNumberOfCodeSpacesForTesting() const;
+ // Check whether there is DebugInfo for this NativeModule.
+ bool HasDebugInfo() const;
+
// Get or create the debug info for this NativeModule.
DebugInfo* GetDebugInfo();
diff --git a/chromium/v8/src/wasm/wasm-constants.h b/chromium/v8/src/wasm/wasm-constants.h
index b860ae692ca..9304f116fcc 100644
--- a/chromium/v8/src/wasm/wasm-constants.h
+++ b/chromium/v8/src/wasm/wasm-constants.h
@@ -18,22 +18,28 @@ namespace wasm {
constexpr uint32_t kWasmMagic = 0x6d736100;
constexpr uint32_t kWasmVersion = 0x01;
-// Binary encoding of local types.
+// Binary encoding of value and heap types.
enum ValueTypeCode : uint8_t {
+ // Current wasm types
kLocalVoid = 0x40,
kLocalI32 = 0x7f,
kLocalI64 = 0x7e,
kLocalF32 = 0x7d,
kLocalF64 = 0x7c,
+ // Simd proposal
kLocalS128 = 0x7b,
+ // reftypes, typed-funcref, and GC proposals
+ kLocalI8 = 0x7a,
+ kLocalI16 = 0x79,
kLocalFuncRef = 0x70,
- kLocalAnyRef = 0x6f,
- kLocalNullRef = 0x6e,
- kLocalRef = 0x6d, // GC proposal
- kLocalOptRef = 0x6c, // GC proposal
- kLocalEqRef = 0x6b, // GC proposal
- kLocalI31Ref = 0x6a, // GC proposal
- kLocalRttRef = 0x69, // GC proposal
+ kLocalExternRef = 0x6f,
+ // kLocalAny = 0x6e, // TODO(7748): Implement
+ kLocalEqRef = 0x6d,
+ kLocalOptRef = 0x6c,
+ kLocalRef = 0x6b,
+ // kLocalI31 = 0x6a, // TODO(7748): Implement
+ kLocalRtt = 0x69,
+ // Exception handling proposal
kLocalExnRef = 0x68,
};
// Binary encoding of other types.
diff --git a/chromium/v8/src/wasm/wasm-debug-evaluate.cc b/chromium/v8/src/wasm/wasm-debug-evaluate.cc
index 019ae5f73ec..a8c4cf2c40d 100644
--- a/chromium/v8/src/wasm/wasm-debug-evaluate.cc
+++ b/chromium/v8/src/wasm/wasm-debug-evaluate.cc
@@ -9,6 +9,7 @@
#include "src/api/api-inl.h"
#include "src/codegen/machine-type.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/execution/frames-inl.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-arguments.h"
@@ -33,15 +34,15 @@ static bool CheckSignature(ValueType return_type,
const FunctionSig* sig, ErrorThrower* thrower) {
if (sig->return_count() != 1 && return_type != kWasmBottom) {
thrower->CompileError("Invalid return type. Got none, expected %s",
- return_type.type_name());
+ return_type.type_name().c_str());
return false;
}
if (sig->return_count() == 1) {
if (sig->GetReturn(0) != return_type) {
thrower->CompileError("Invalid return type. Got %s, expected %s",
- sig->GetReturn(0).type_name(),
- return_type.type_name());
+ sig->GetReturn(0).type_name().c_str(),
+ return_type.type_name().c_str());
return false;
}
}
@@ -56,7 +57,8 @@ static bool CheckSignature(ValueType return_type,
if (sig->GetParam(p) != argument_type) {
thrower->CompileError(
"Invalid argument type for argument %zu. Got %s, expected %s", p,
- sig->GetParam(p).type_name(), argument_type.type_name());
+ sig->GetParam(p).type_name().c_str(),
+ argument_type.type_name().c_str());
return false;
}
++p;
@@ -202,8 +204,8 @@ class DebugEvaluatorProxy {
DCHECK(frame_->is_wasm());
wasm::DebugInfo* debug_info =
WasmFrame::cast(frame_)->native_module()->GetDebugInfo();
- return debug_info->GetLocalValue(local, isolate_, frame_->pc(),
- frame_->fp(), frame_->callee_fp());
+ return debug_info->GetLocalValue(local, frame_->pc(), frame_->fp(),
+ frame_->callee_fp());
}
uint32_t GetArgAsUInt32(const v8::FunctionCallbackInfo<v8::Value>& args,
@@ -350,10 +352,10 @@ Maybe<std::string> DebugEvaluateImpl(
Handle<WasmExportedFunction> entry_point =
Handle<WasmExportedFunction>::cast(entry_point_obj);
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(evaluator_instance);
+ // TODO(wasm): Cache this code.
Handle<Code> wasm_entry =
- WasmDebugInfo::GetCWasmEntry(debug_info, entry_point->sig());
+ compiler::CompileCWasmEntry(isolate, entry_point->sig());
+
CWasmArgumentsPacker packer(4 /* uint32_t return value, no parameters. */);
Execution::CallWasm(isolate, wasm_entry, entry_point->GetWasmCallTarget(),
evaluator_instance, packer.argv());
diff --git a/chromium/v8/src/wasm/wasm-debug-evaluate.h b/chromium/v8/src/wasm/wasm-debug-evaluate.h
index 31eba51a3cc..f4e3aef1754 100644
--- a/chromium/v8/src/wasm/wasm-debug-evaluate.h
+++ b/chromium/v8/src/wasm/wasm-debug-evaluate.h
@@ -7,7 +7,6 @@
#include "src/base/macros.h"
#include "src/handles/maybe-handles.h"
-#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
diff --git a/chromium/v8/src/wasm/wasm-debug.cc b/chromium/v8/src/wasm/wasm-debug.cc
index a8fd6505f0e..61f3492af96 100644
--- a/chromium/v8/src/wasm/wasm-debug.cc
+++ b/chromium/v8/src/wasm/wasm-debug.cc
@@ -4,6 +4,7 @@
#include "src/wasm/wasm-debug.h"
+#include <iomanip>
#include <unordered_map>
#include "src/base/optional.h"
@@ -15,15 +16,14 @@
#include "src/execution/frames-inl.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/utils/identity-map.h"
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-opcodes-inl.h"
#include "src/wasm/wasm-value.h"
#include "src/zone/accounting-allocator.h"
@@ -49,29 +49,102 @@ Handle<String> PrintFToOneByteString(Isolate* isolate, const char* format,
: isolate->factory()->NewStringFromOneByte(name).ToHandleChecked();
}
+MaybeHandle<JSObject> CreateFunctionTablesObject(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ auto tables = handle(instance->tables(), isolate);
+ if (tables->length() == 0) return MaybeHandle<JSObject>();
+
+ const char* table_label = "table%d";
+ Handle<JSObject> tables_obj = isolate->factory()->NewJSObjectWithNullProto();
+ for (int table_index = 0; table_index < tables->length(); ++table_index) {
+ auto func_table =
+ handle(WasmTableObject::cast(tables->get(table_index)), isolate);
+ if (func_table->type().heap_type() != kHeapFunc) continue;
+
+ Handle<String> table_name;
+ if (!WasmInstanceObject::GetTableNameOrNull(isolate, instance, table_index)
+ .ToHandle(&table_name)) {
+ table_name =
+ PrintFToOneByteString<true>(isolate, table_label, table_index);
+ }
+
+ Handle<JSObject> func_table_obj =
+ isolate->factory()->NewJSObjectWithNullProto();
+ JSObject::AddProperty(isolate, tables_obj, table_name, func_table_obj,
+ NONE);
+ for (int i = 0; i < func_table->current_length(); ++i) {
+ Handle<Object> func = WasmTableObject::Get(isolate, func_table, i);
+ DCHECK(!WasmCapiFunction::IsWasmCapiFunction(*func));
+ if (func->IsNull(isolate)) continue;
+
+ Handle<String> func_name;
+ Handle<JSObject> func_obj =
+ isolate->factory()->NewJSObjectWithNullProto();
+
+ if (WasmExportedFunction::IsWasmExportedFunction(*func)) {
+ auto target_func = Handle<WasmExportedFunction>::cast(func);
+ auto target_instance = handle(target_func->instance(), isolate);
+ auto module = handle(target_instance->module_object(), isolate);
+ func_name = WasmModuleObject::GetFunctionName(
+ isolate, module, target_func->function_index());
+ } else if (WasmJSFunction::IsWasmJSFunction(*func)) {
+ auto target_func = Handle<JSFunction>::cast(func);
+ func_name = JSFunction::GetName(target_func);
+ if (func_name->length() == 0) {
+ func_name = isolate->factory()->InternalizeUtf8String("anonymous");
+ }
+ }
+ JSObject::AddProperty(isolate, func_obj, func_name, func, NONE);
+ JSObject::AddDataElement(func_table_obj, i, func_obj, NONE);
+ }
+ }
+ return tables_obj;
+}
+
Handle<Object> WasmValueToValueObject(Isolate* isolate, WasmValue value) {
+ Handle<ByteArray> bytes;
switch (value.type().kind()) {
- case ValueType::kI32:
- if (Smi::IsValid(value.to<int32_t>()))
- return handle(Smi::FromInt(value.to<int32_t>()), isolate);
- return PrintFToOneByteString<false>(isolate, "%d", value.to<int32_t>());
+ case ValueType::kI32: {
+ int32_t val = value.to_i32();
+ bytes = isolate->factory()->NewByteArray(sizeof(val));
+ memcpy(bytes->GetDataStartAddress(), &val, sizeof(val));
+ break;
+ }
case ValueType::kI64: {
- int64_t i64 = value.to<int64_t>();
- int32_t i32 = static_cast<int32_t>(i64);
- if (i32 == i64 && Smi::IsValid(i32))
- return handle(Smi::FromIntptr(i32), isolate);
- return PrintFToOneByteString<false>(isolate, "%" PRId64, i64);
+ int64_t val = value.to_i64();
+ bytes = isolate->factory()->NewByteArray(sizeof(val));
+ memcpy(bytes->GetDataStartAddress(), &val, sizeof(val));
+ break;
+ }
+ case ValueType::kF32: {
+ float val = value.to_f32();
+ bytes = isolate->factory()->NewByteArray(sizeof(val));
+ memcpy(bytes->GetDataStartAddress(), &val, sizeof(val));
+ break;
+ }
+ case ValueType::kF64: {
+ double val = value.to_f64();
+ bytes = isolate->factory()->NewByteArray(sizeof(val));
+ memcpy(bytes->GetDataStartAddress(), &val, sizeof(val));
+ break;
}
- case ValueType::kF32:
- return isolate->factory()->NewNumber(value.to<float>());
- case ValueType::kF64:
- return isolate->factory()->NewNumber(value.to<double>());
- case ValueType::kAnyRef:
- return value.to_anyref();
- default:
+ case ValueType::kOptRef: {
+ if (value.type().heap_type() == kHeapExtern) {
+ return isolate->factory()->NewWasmValue(
+ static_cast<int32_t>(kHeapExtern), value.to_externref());
+ } else {
+ // TODO(7748): Implement.
+ UNIMPLEMENTED();
+ }
+ }
+ default: {
+ // TODO(7748): Implement.
UNIMPLEMENTED();
- return isolate->factory()->undefined_value();
+ }
}
+ return isolate->factory()->NewWasmValue(
+ static_cast<int32_t>(value.type().kind()), bytes);
}
MaybeHandle<String> GetLocalNameString(Isolate* isolate,
@@ -87,176 +160,10 @@ MaybeHandle<String> GetLocalNameString(Isolate* isolate,
return isolate->factory()->NewStringFromUtf8(name);
}
-class InterpreterHandle {
- Isolate* isolate_;
- const WasmModule* module_;
- WasmInterpreter interpreter_;
- std::unordered_map<Address, uint32_t> activations_;
-
- uint32_t StartActivation(Address frame_pointer) {
- WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
- uint32_t activation_id = thread->StartActivation();
- DCHECK_EQ(0, activations_.count(frame_pointer));
- activations_.insert(std::make_pair(frame_pointer, activation_id));
- return activation_id;
- }
-
- void FinishActivation(Address frame_pointer, uint32_t activation_id) {
- WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
- thread->FinishActivation(activation_id);
- DCHECK_EQ(1, activations_.count(frame_pointer));
- activations_.erase(frame_pointer);
- }
-
- bool HasActivation(Address frame_pointer) {
- return activations_.count(frame_pointer);
- }
-
- std::pair<uint32_t, uint32_t> GetActivationFrameRange(
- WasmInterpreter::Thread* thread, Address frame_pointer) {
- DCHECK_EQ(1, activations_.count(frame_pointer));
- uint32_t activation_id = activations_.find(frame_pointer)->second;
- uint32_t num_activations = static_cast<uint32_t>(activations_.size() - 1);
- uint32_t frame_base = thread->ActivationFrameBase(activation_id);
- uint32_t frame_limit = activation_id == num_activations
- ? thread->GetFrameCount()
- : thread->ActivationFrameBase(activation_id + 1);
- DCHECK_LE(frame_base, frame_limit);
- DCHECK_LE(frame_limit, thread->GetFrameCount());
- return {frame_base, frame_limit};
- }
-
- static ModuleWireBytes GetBytes(WasmDebugInfo debug_info) {
- // Return raw pointer into heap. The WasmInterpreter will make its own copy
- // of this data anyway, and there is no heap allocation in-between.
- NativeModule* native_module =
- debug_info.wasm_instance().module_object().native_module();
- return ModuleWireBytes{native_module->wire_bytes()};
- }
-
- public:
- InterpreterHandle(Isolate* isolate, Handle<WasmDebugInfo> debug_info)
- : isolate_(isolate),
- module_(debug_info->wasm_instance().module_object().module()),
- interpreter_(isolate, module_, GetBytes(*debug_info),
- handle(debug_info->wasm_instance(), isolate)) {}
-
- WasmInterpreter* interpreter() { return &interpreter_; }
- const WasmModule* module() const { return module_; }
-
- // Returns true if exited regularly, false if a trap/exception occurred and
- // was not handled inside this activation. In the latter case, a pending
- // exception will have been set on the isolate.
- bool Execute(Handle<WasmInstanceObject> instance_object,
- Address frame_pointer, uint32_t func_index,
- Vector<WasmValue> argument_values,
- Vector<WasmValue> return_values) {
- DCHECK_GE(module()->functions.size(), func_index);
- const FunctionSig* sig = module()->functions[func_index].sig;
- DCHECK_EQ(sig->parameter_count(), argument_values.size());
- DCHECK_EQ(sig->return_count(), return_values.size());
-
- uint32_t activation_id = StartActivation(frame_pointer);
-
- WasmCodeRefScope code_ref_scope;
- WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
- thread->InitFrame(&module()->functions[func_index],
- argument_values.begin());
- bool finished = false;
- while (!finished) {
- // TODO(clemensb): Add occasional StackChecks.
- WasmInterpreter::State state = thread->Run();
- switch (state) {
- case WasmInterpreter::State::PAUSED:
- UNREACHABLE();
- case WasmInterpreter::State::FINISHED:
- // Perfect, just break the switch and exit the loop.
- finished = true;
- break;
- case WasmInterpreter::State::TRAPPED: {
- MessageTemplate message_id =
- WasmOpcodes::TrapReasonToMessageId(thread->GetTrapReason());
- Handle<JSObject> exception =
- isolate_->factory()->NewWasmRuntimeError(message_id);
- JSObject::AddProperty(isolate_, exception,
- isolate_->factory()->wasm_uncatchable_symbol(),
- isolate_->factory()->true_value(), NONE);
- auto result = thread->RaiseException(isolate_, exception);
- if (result == WasmInterpreter::Thread::HANDLED) break;
- // If no local handler was found, we fall-thru to {STOPPED}.
- DCHECK_EQ(WasmInterpreter::State::STOPPED, thread->state());
- V8_FALLTHROUGH;
- }
- case WasmInterpreter::State::STOPPED:
- // An exception happened, and the current activation was unwound
- // without hitting a local exception handler. All that remains to be
- // done is finish the activation and let the exception propagate.
- DCHECK_EQ(thread->ActivationFrameBase(activation_id),
- thread->GetFrameCount());
- DCHECK(isolate_->has_pending_exception());
- FinishActivation(frame_pointer, activation_id);
- return false;
- // RUNNING should never occur here.
- case WasmInterpreter::State::RUNNING:
- default:
- UNREACHABLE();
- }
- }
-
- // Copy back the return value.
-#ifdef DEBUG
- const int max_count = WasmFeatures::FromIsolate(isolate_).has_mv()
- ? kV8MaxWasmFunctionMultiReturns
- : kV8MaxWasmFunctionReturns;
-#endif
- DCHECK_GE(max_count, sig->return_count());
- for (unsigned i = 0; i < sig->return_count(); ++i) {
- return_values[i] = thread->GetReturnValue(i);
- }
-
- FinishActivation(frame_pointer, activation_id);
-
- return true;
- }
-
- std::vector<std::pair<uint32_t, int>> GetInterpretedStack(
- Address frame_pointer) {
- DCHECK_EQ(1, interpreter()->GetThreadCount());
- WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
-
- std::pair<uint32_t, uint32_t> frame_range =
- GetActivationFrameRange(thread, frame_pointer);
-
- std::vector<std::pair<uint32_t, int>> stack;
- stack.reserve(frame_range.second - frame_range.first);
- for (uint32_t fp = frame_range.first; fp < frame_range.second; ++fp) {
- auto frame = thread->GetFrame(fp);
- stack.emplace_back(frame->function()->func_index, frame->pc());
- }
- return stack;
- }
-
- int NumberOfActiveFrames(Address frame_pointer) {
- if (!HasActivation(frame_pointer)) return 0;
-
- DCHECK_EQ(1, interpreter()->GetThreadCount());
- WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
-
- std::pair<uint32_t, uint32_t> frame_range =
- GetActivationFrameRange(thread, frame_pointer);
-
- return frame_range.second - frame_range.first;
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InterpreterHandle);
-};
-
// Generate a sorted and deduplicated list of byte offsets for this function's
// current positions on the stack.
std::vector<int> StackFramePositions(int func_index, Isolate* isolate) {
std::vector<int> byte_offsets;
- WasmCodeRefScope code_ref_scope;
for (StackTraceFrameIterator it(isolate); !it.done(); it.Advance()) {
if (!it.is_wasm()) continue;
WasmFrame* frame = WasmFrame::cast(it.frame());
@@ -304,11 +211,43 @@ Address FindNewPC(WasmCode* wasm_code, int byte_offset,
} // namespace
+void DebugSideTable::Print(std::ostream& os) const {
+ os << "Debug side table (" << num_locals_ << " locals, " << entries_.size()
+ << " entries):\n";
+ for (auto& entry : entries_) entry.Print(os);
+ os << "\n";
+}
+
+void DebugSideTable::Entry::Print(std::ostream& os) const {
+ os << std::setw(6) << std::hex << pc_offset_ << std::dec << " [";
+ for (auto& value : values_) {
+ os << " " << value.type.type_name() << ":";
+ switch (value.kind) {
+ case kConstant:
+ os << "const#" << value.i32_const;
+ break;
+ case kRegister:
+ os << "reg#" << value.reg_code;
+ break;
+ case kStack:
+ os << "stack#" << value.stack_offset;
+ break;
+ }
+ }
+ os << " ]\n";
+}
+
Handle<JSObject> GetModuleScopeObject(Handle<WasmInstanceObject> instance) {
Isolate* isolate = instance->GetIsolate();
Handle<JSObject> module_scope_object =
isolate->factory()->NewJSObjectWithNullProto();
+
+ Handle<String> instance_name =
+ isolate->factory()->InternalizeString(StaticCharVector("instance"));
+ JSObject::AddProperty(isolate, module_scope_object, instance_name, instance,
+ NONE);
+
if (instance->has_memory_object()) {
Handle<String> name;
// TODO(duongn): extend the logic when multiple memories are supported.
@@ -327,6 +266,14 @@ Handle<JSObject> GetModuleScopeObject(Handle<WasmInstanceObject> instance) {
NONE);
}
+ Handle<JSObject> function_tables_obj;
+ if (CreateFunctionTablesObject(instance).ToHandle(&function_tables_obj)) {
+ Handle<String> tables_name = isolate->factory()->InternalizeString(
+ StaticCharVector("function tables"));
+ JSObject::AddProperty(isolate, module_scope_object, tables_name,
+ function_tables_obj, NONE);
+ }
+
auto& globals = instance->module()->globals;
if (globals.size() > 0) {
Handle<JSObject> globals_obj =
@@ -357,29 +304,29 @@ class DebugInfoImpl {
explicit DebugInfoImpl(NativeModule* native_module)
: native_module_(native_module) {}
- int GetNumLocals(Isolate* isolate, Address pc) {
- FrameInspectionScope scope(this, isolate, pc);
+ int GetNumLocals(Address pc) {
+ FrameInspectionScope scope(this, pc);
if (!scope.is_inspectable()) return 0;
return scope.debug_side_table->num_locals();
}
- WasmValue GetLocalValue(int local, Isolate* isolate, Address pc, Address fp,
+ WasmValue GetLocalValue(int local, Address pc, Address fp,
Address debug_break_fp) {
- FrameInspectionScope scope(this, isolate, pc);
+ FrameInspectionScope scope(this, pc);
return GetValue(scope.debug_side_table_entry, local, fp, debug_break_fp);
}
- int GetStackDepth(Isolate* isolate, Address pc) {
- FrameInspectionScope scope(this, isolate, pc);
+ int GetStackDepth(Address pc) {
+ FrameInspectionScope scope(this, pc);
if (!scope.is_inspectable()) return 0;
int num_locals = static_cast<int>(scope.debug_side_table->num_locals());
int value_count = scope.debug_side_table_entry->num_values();
return value_count - num_locals;
}
- WasmValue GetStackValue(int index, Isolate* isolate, Address pc, Address fp,
+ WasmValue GetStackValue(int index, Address pc, Address fp,
Address debug_break_fp) {
- FrameInspectionScope scope(this, isolate, pc);
+ FrameInspectionScope scope(this, pc);
int num_locals = static_cast<int>(scope.debug_side_table->num_locals());
int value_count = scope.debug_side_table_entry->num_values();
if (num_locals + index >= value_count) return {};
@@ -389,7 +336,7 @@ class DebugInfoImpl {
Handle<JSObject> GetLocalScopeObject(Isolate* isolate, Address pc, Address fp,
Address debug_break_fp) {
- FrameInspectionScope scope(this, isolate, pc);
+ FrameInspectionScope scope(this, pc);
Handle<JSObject> local_scope_object =
isolate->factory()->NewJSObjectWithNullProto();
@@ -401,40 +348,32 @@ class DebugInfoImpl {
// Fill parameters and locals.
int num_locals = static_cast<int>(scope.debug_side_table->num_locals());
DCHECK_LE(static_cast<int>(function->sig->parameter_count()), num_locals);
- if (num_locals > 0) {
- Handle<JSObject> locals_obj =
- isolate->factory()->NewJSObjectWithNullProto();
- Handle<String> locals_name =
- isolate->factory()->InternalizeString(StaticCharVector("locals"));
- JSObject::AddProperty(isolate, local_scope_object, locals_name,
- locals_obj, NONE);
- for (int i = 0; i < num_locals; ++i) {
- Handle<Name> name;
- if (!GetLocalNameString(isolate, native_module_, function->func_index,
- i)
- .ToHandle(&name)) {
- name = PrintFToOneByteString<true>(isolate, "var%d", i);
- }
- WasmValue value =
- GetValue(scope.debug_side_table_entry, i, fp, debug_break_fp);
- Handle<Object> value_obj = WasmValueToValueObject(isolate, value);
- // {name} can be a string representation of an element index.
- LookupIterator::Key lookup_key{isolate, name};
- LookupIterator it(isolate, locals_obj, lookup_key, locals_obj,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.IsFound()) continue;
- Object::AddDataProperty(&it, value_obj, NONE,
- Just(ShouldThrow::kThrowOnError),
- StoreOrigin::kNamed)
- .Check();
+ for (int i = 0; i < num_locals; ++i) {
+ Handle<Name> name;
+ if (!GetLocalNameString(isolate, native_module_, function->func_index, i)
+ .ToHandle(&name)) {
+ name = PrintFToOneByteString<true>(isolate, "var%d", i);
}
+ WasmValue value =
+ GetValue(scope.debug_side_table_entry, i, fp, debug_break_fp);
+ Handle<Object> value_obj = WasmValueToValueObject(isolate, value);
+ // {name} can be a string representation of an element index.
+ LookupIterator::Key lookup_key{isolate, name};
+ LookupIterator it(isolate, local_scope_object, lookup_key,
+ local_scope_object,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (it.IsFound()) continue;
+ Object::AddDataProperty(&it, value_obj, NONE,
+ Just(ShouldThrow::kThrowOnError),
+ StoreOrigin::kNamed)
+ .Check();
}
return local_scope_object;
}
Handle<JSObject> GetStackScopeObject(Isolate* isolate, Address pc, Address fp,
Address debug_break_fp) {
- FrameInspectionScope scope(this, isolate, pc);
+ FrameInspectionScope scope(this, pc);
Handle<JSObject> stack_scope_obj =
isolate->factory()->NewJSObjectWithNullProto();
@@ -468,10 +407,7 @@ class DebugInfoImpl {
WasmCode* RecompileLiftoffWithBreakpoints(
int func_index, Vector<int> offsets, Vector<int> extra_source_positions) {
- // During compilation, we cannot hold the lock, since compilation takes the
- // {NativeModule} lock, which could lead to deadlocks.
- mutex_.AssertUnheld();
-
+ DCHECK(!mutex_.TryLock()); // Mutex is held externally.
// Recompile the function with Liftoff, setting the new breakpoints.
// Not thread-safe. The caller is responsible for locking {mutex_}.
CompilationEnv env = native_module_->CreateCompilationEnv();
@@ -484,9 +420,11 @@ class DebugInfoImpl {
ForDebugging for_debugging =
offsets.size() == 1 && offsets[0] == 0 ? kForStepping : kForDebugging;
+ Counters* counters = nullptr;
+ WasmFeatures unused_detected;
WasmCompilationResult result = ExecuteLiftoffCompilation(
native_module_->engine()->allocator(), &env, body, func_index,
- for_debugging, nullptr, nullptr, offsets, &debug_sidetable,
+ for_debugging, counters, &unused_detected, offsets, &debug_sidetable,
extra_source_positions);
// Liftoff compilation failure is a FATAL error. We rely on complete Liftoff
// support for debugging.
@@ -497,62 +435,99 @@ class DebugInfoImpl {
native_module_->AddCompiledCode(std::move(result)));
DCHECK(new_code->is_inspectable());
- bool added =
- debug_side_tables_.emplace(new_code, std::move(debug_sidetable)).second;
- DCHECK(added);
- USE(added);
+ DCHECK_EQ(0, debug_side_tables_.count(new_code));
+ debug_side_tables_.emplace(new_code, std::move(debug_sidetable));
return new_code;
}
- void SetBreakpoint(int func_index, int offset, Isolate* current_isolate) {
- std::vector<int> breakpoints_copy;
- {
- // Hold the mutex while modifying the set of breakpoints, but release it
- // before compiling the new code (see comment in
- // {RecompileLiftoffWithBreakpoints}). This needs to be revisited once we
- // support setting different breakpoints in different isolates
- // (https://crbug.com/v8/10351).
- base::MutexGuard guard(&mutex_);
+ void SetBreakpoint(int func_index, int offset, Isolate* isolate) {
+ // Put the code ref scope outside of the mutex, so we don't unnecessarily
+ // hold the mutex while freeing code.
+ WasmCodeRefScope wasm_code_ref_scope;
- // offset == 0 indicates flooding and should not happen here.
- DCHECK_NE(0, offset);
+ // Generate additional source positions for current stack frame positions.
+ // These source positions are used to find return addresses in the new code.
+ std::vector<int> stack_frame_positions =
+ StackFramePositions(func_index, isolate);
- std::vector<int>& breakpoints = breakpoints_per_function_[func_index];
- auto insertion_point =
- std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
- if (insertion_point != breakpoints.end() && *insertion_point == offset) {
- // The breakpoint is already set.
- return;
- }
- breakpoints.insert(insertion_point, offset);
- breakpoints_copy = breakpoints;
+ // Hold the mutex while modifying breakpoints, to ensure consistency when
+ // multiple isolates set/remove breakpoints at the same time.
+ base::MutexGuard guard(&mutex_);
+
+ // offset == 0 indicates flooding and should not happen here.
+ DCHECK_NE(0, offset);
+
+ // Get the set of previously set breakpoints, to check later whether a new
+ // breakpoint was actually added.
+ std::vector<int> all_breakpoints = FindAllBreakpoints(func_index);
+
+ auto& isolate_data = per_isolate_data_[isolate];
+ std::vector<int>& breakpoints =
+ isolate_data.breakpoints_per_function[func_index];
+ auto insertion_point =
+ std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
+ if (insertion_point != breakpoints.end() && *insertion_point == offset) {
+ // The breakpoint is already set for this isolate.
+ return;
}
+ breakpoints.insert(insertion_point, offset);
+
+ DCHECK(std::is_sorted(all_breakpoints.begin(), all_breakpoints.end()));
+ // Find the insertion position within {all_breakpoints}.
+ insertion_point = std::lower_bound(all_breakpoints.begin(),
+ all_breakpoints.end(), offset);
+ bool breakpoint_exists =
+ insertion_point != all_breakpoints.end() && *insertion_point == offset;
+ // If the breakpoint was already set before *and* we don't need any special
+ // positions for OSR, then we can just reuse the old code. Otherwise,
+ // recompile it. In any case, rewrite this isolate's stack to make sure that
+ // it uses up-to-date code containing the breakpoint.
+ WasmCode* new_code;
+ if (breakpoint_exists && stack_frame_positions.empty()) {
+ new_code = native_module_->GetCode(func_index);
+ } else {
+ // Add the new offset to the set of all breakpoints, then recompile.
+ if (!breakpoint_exists) all_breakpoints.insert(insertion_point, offset);
+ new_code =
+ RecompileLiftoffWithBreakpoints(func_index, VectorOf(all_breakpoints),
+ VectorOf(stack_frame_positions));
+ }
+ UpdateReturnAddresses(isolate, new_code, isolate_data.stepping_frame);
+ }
- UpdateBreakpoints(func_index, VectorOf(breakpoints_copy), current_isolate);
+ std::vector<int> FindAllBreakpoints(int func_index) {
+ DCHECK(!mutex_.TryLock()); // Mutex must be held externally.
+ std::set<int> breakpoints;
+ for (auto& data : per_isolate_data_) {
+ auto it = data.second.breakpoints_per_function.find(func_index);
+ if (it == data.second.breakpoints_per_function.end()) continue;
+ for (int offset : it->second) breakpoints.insert(offset);
+ }
+ return {breakpoints.begin(), breakpoints.end()};
}
void UpdateBreakpoints(int func_index, Vector<int> breakpoints,
- Isolate* current_isolate) {
+ Isolate* isolate, StackFrameId stepping_frame) {
+ DCHECK(!mutex_.TryLock()); // Mutex is held externally.
// Generate additional source positions for current stack frame positions.
// These source positions are used to find return addresses in the new code.
std::vector<int> stack_frame_positions =
- StackFramePositions(func_index, current_isolate);
+ StackFramePositions(func_index, isolate);
- WasmCodeRefScope wasm_code_ref_scope;
WasmCode* new_code = RecompileLiftoffWithBreakpoints(
func_index, breakpoints, VectorOf(stack_frame_positions));
- UpdateReturnAddresses(current_isolate, new_code);
+ UpdateReturnAddresses(isolate, new_code, stepping_frame);
}
- void FloodWithBreakpoints(WasmFrame* frame, Isolate* current_isolate,
- ReturnLocation return_location) {
+ void FloodWithBreakpoints(WasmFrame* frame, ReturnLocation return_location) {
// 0 is an invalid offset used to indicate flooding.
int offset = 0;
WasmCodeRefScope wasm_code_ref_scope;
DCHECK(frame->wasm_code()->is_liftoff());
// Generate an additional source position for the current byte offset.
int byte_offset = frame->byte_offset();
+ base::MutexGuard guard(&mutex_);
WasmCode* new_code = RecompileLiftoffWithBreakpoints(
frame->function_index(), VectorOf(&offset, 1),
VectorOf(&byte_offset, 1));
@@ -579,37 +554,55 @@ class DebugInfoImpl {
return_location = kAfterWasmCall;
}
- FloodWithBreakpoints(frame, isolate, return_location);
- stepping_frame_ = frame->id();
+ FloodWithBreakpoints(frame, return_location);
+
+ base::MutexGuard guard(&mutex_);
+ per_isolate_data_[isolate].stepping_frame = frame->id();
}
- void ClearStepping() { stepping_frame_ = NO_ID; }
+ void ClearStepping(Isolate* isolate) {
+ base::MutexGuard guard(&mutex_);
+ auto it = per_isolate_data_.find(isolate);
+ if (it != per_isolate_data_.end()) it->second.stepping_frame = NO_ID;
+ }
bool IsStepping(WasmFrame* frame) {
Isolate* isolate = frame->wasm_instance().GetIsolate();
- StepAction last_step_action = isolate->debug()->last_step_action();
- return stepping_frame_ == frame->id() || last_step_action == StepIn;
+ if (isolate->debug()->last_step_action() == StepIn) return true;
+ base::MutexGuard guard(&mutex_);
+ auto it = per_isolate_data_.find(isolate);
+ return it != per_isolate_data_.end() &&
+ it->second.stepping_frame == frame->id();
}
- void RemoveBreakpoint(int func_index, int position,
- Isolate* current_isolate) {
- std::vector<int> breakpoints_copy;
- {
- base::MutexGuard guard(&mutex_);
- const auto& function = native_module_->module()->functions[func_index];
- int offset = position - function.code.offset();
-
- std::vector<int>& breakpoints = breakpoints_per_function_[func_index];
- DCHECK_LT(0, offset);
- auto insertion_point =
- std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
- if (insertion_point == breakpoints.end()) return;
- if (*insertion_point != offset) return;
- breakpoints.erase(insertion_point);
- breakpoints_copy = breakpoints;
- }
+ void RemoveBreakpoint(int func_index, int position, Isolate* isolate) {
+ // Put the code ref scope outside of the mutex, so we don't unnecessarily
+ // hold the mutex while freeing code.
+ WasmCodeRefScope wasm_code_ref_scope;
- UpdateBreakpoints(func_index, VectorOf(breakpoints_copy), current_isolate);
+ // Hold the mutex while modifying breakpoints, to ensure consistency when
+ // multiple isolates set/remove breakpoints at the same time.
+ base::MutexGuard guard(&mutex_);
+
+ const auto& function = native_module_->module()->functions[func_index];
+ int offset = position - function.code.offset();
+
+ auto& isolate_data = per_isolate_data_[isolate];
+ std::vector<int>& breakpoints =
+ isolate_data.breakpoints_per_function[func_index];
+ DCHECK_LT(0, offset);
+ auto insertion_point =
+ std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
+ if (insertion_point == breakpoints.end()) return;
+ if (*insertion_point != offset) return;
+ breakpoints.erase(insertion_point);
+
+ std::vector<int> remaining = FindAllBreakpoints(func_index);
+ // If the breakpoint is still set in another isolate, don't remove it.
+ DCHECK(std::is_sorted(remaining.begin(), remaining.end()));
+ if (std::binary_search(remaining.begin(), remaining.end(), offset)) return;
+ UpdateBreakpoints(func_index, VectorOf(remaining), isolate,
+ isolate_data.stepping_frame);
}
void RemoveDebugSideTables(Vector<WasmCode* const> codes) {
@@ -619,15 +612,55 @@ class DebugInfoImpl {
}
}
+ DebugSideTable* GetDebugSideTableIfExists(const WasmCode* code) const {
+ base::MutexGuard guard(&mutex_);
+ auto it = debug_side_tables_.find(code);
+ return it == debug_side_tables_.end() ? nullptr : it->second.get();
+ }
+
+ static bool HasRemovedBreakpoints(const std::vector<int>& removed,
+ const std::vector<int>& remaining) {
+ DCHECK(std::is_sorted(remaining.begin(), remaining.end()));
+ for (int offset : removed) {
+ // Return true if we removed a breakpoint which is not part of remaining.
+ if (!std::binary_search(remaining.begin(), remaining.end(), offset)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void RemoveIsolate(Isolate* isolate) {
+ // Put the code ref scope outside of the mutex, so we don't unnecessarily
+ // hold the mutex while freeing code.
+ WasmCodeRefScope wasm_code_ref_scope;
+
+ base::MutexGuard guard(&mutex_);
+ auto per_isolate_data_it = per_isolate_data_.find(isolate);
+ if (per_isolate_data_it == per_isolate_data_.end()) return;
+ std::unordered_map<int, std::vector<int>> removed_per_function =
+ std::move(per_isolate_data_it->second.breakpoints_per_function);
+ per_isolate_data_.erase(per_isolate_data_it);
+ for (auto& entry : removed_per_function) {
+ int func_index = entry.first;
+ std::vector<int>& removed = entry.second;
+ std::vector<int> remaining = FindAllBreakpoints(func_index);
+ if (HasRemovedBreakpoints(removed, remaining)) {
+ RecompileLiftoffWithBreakpoints(func_index, VectorOf(remaining), {});
+ }
+ }
+ }
+
private:
struct FrameInspectionScope {
- FrameInspectionScope(DebugInfoImpl* debug_info, Isolate* isolate,
- Address pc)
- : code(isolate->wasm_engine()->code_manager()->LookupCode(pc)),
+ FrameInspectionScope(DebugInfoImpl* debug_info, Address pc)
+ : code(debug_info->native_module_->engine()->code_manager()->LookupCode(
+ pc)),
pc_offset(static_cast<int>(pc - code->instruction_start())),
debug_side_table(
code->is_inspectable()
- ? debug_info->GetDebugSideTable(code, isolate->allocator())
+ ? debug_info->GetDebugSideTable(
+ code, debug_info->native_module_->engine()->allocator())
: nullptr),
debug_side_table_entry(debug_side_table
? debug_side_table->GetEntry(pc_offset)
@@ -667,11 +700,17 @@ class DebugInfoImpl {
GenerateLiftoffDebugSideTable(allocator, &env, func_body);
DebugSideTable* ret = debug_side_table.get();
- // Install into cache and return.
+ // Check cache again, maybe another thread concurrently generated a debug
+ // side table already.
{
base::MutexGuard guard(&mutex_);
- debug_side_tables_[code] = std::move(debug_side_table);
+ auto& slot = debug_side_tables_[code];
+ if (slot != nullptr) return slot.get();
+ slot = std::move(debug_side_table);
}
+
+ // Print the code together with the debug table, if requested.
+ code->MaybePrint();
return ret;
}
@@ -741,15 +780,15 @@ class DebugInfoImpl {
// After installing a Liftoff code object with a different set of breakpoints,
// update return addresses on the stack so that execution resumes in the new
// code. The frame layout itself should be independent of breakpoints.
- // TODO(thibaudm): update other threads as well.
- void UpdateReturnAddresses(Isolate* isolate, WasmCode* new_code) {
+ void UpdateReturnAddresses(Isolate* isolate, WasmCode* new_code,
+ StackFrameId stepping_frame) {
// The first return location is after the breakpoint, others are after wasm
// calls.
ReturnLocation return_location = kAfterBreakpoint;
for (StackTraceFrameIterator it(isolate); !it.done();
it.Advance(), return_location = kAfterWasmCall) {
// We still need the flooded function for stepping.
- if (it.frame()->id() == stepping_frame_) continue;
+ if (it.frame()->id() == stepping_frame) continue;
if (!it.is_wasm()) continue;
WasmFrame* frame = WasmFrame::cast(it.frame());
if (frame->native_module() != new_code->native_module()) continue;
@@ -788,25 +827,32 @@ class DebugInfoImpl {
return static_cast<size_t>(position) == code.end_offset() - 1;
}
+ // Isolate-specific data, for debugging modules that are shared by multiple
+ // isolates.
+ struct PerIsolateDebugData {
+ // Keeps track of the currently set breakpoints (by offset within that
+ // function).
+ std::unordered_map<int, std::vector<int>> breakpoints_per_function;
+
+ // Store the frame ID when stepping, to avoid overwriting that frame when
+ // setting or removing a breakpoint.
+ StackFrameId stepping_frame = NO_ID;
+ };
+
NativeModule* const native_module_;
// {mutex_} protects all fields below.
mutable base::Mutex mutex_;
// DebugSideTable per code object, lazily initialized.
- std::unordered_map<WasmCode*, std::unique_ptr<DebugSideTable>>
+ std::unordered_map<const WasmCode*, std::unique_ptr<DebugSideTable>>
debug_side_tables_;
// Names of locals, lazily decoded from the wire bytes.
std::unique_ptr<LocalNames> local_names_;
- // Keeps track of the currently set breakpoints (by offset within that
- // function).
- std::unordered_map<int, std::vector<int>> breakpoints_per_function_;
-
- // Store the frame ID when stepping, to avoid overwriting that frame when
- // setting or removing a breakpoint.
- StackFrameId stepping_frame_ = NO_ID;
+ // Isolate-specific data.
+ std::unordered_map<Isolate*, PerIsolateDebugData> per_isolate_data_;
DISALLOW_COPY_AND_ASSIGN(DebugInfoImpl);
};
@@ -816,22 +862,18 @@ DebugInfo::DebugInfo(NativeModule* native_module)
DebugInfo::~DebugInfo() = default;
-int DebugInfo::GetNumLocals(Isolate* isolate, Address pc) {
- return impl_->GetNumLocals(isolate, pc);
-}
+int DebugInfo::GetNumLocals(Address pc) { return impl_->GetNumLocals(pc); }
-WasmValue DebugInfo::GetLocalValue(int local, Isolate* isolate, Address pc,
- Address fp, Address debug_break_fp) {
- return impl_->GetLocalValue(local, isolate, pc, fp, debug_break_fp);
+WasmValue DebugInfo::GetLocalValue(int local, Address pc, Address fp,
+ Address debug_break_fp) {
+ return impl_->GetLocalValue(local, pc, fp, debug_break_fp);
}
-int DebugInfo::GetStackDepth(Isolate* isolate, Address pc) {
- return impl_->GetStackDepth(isolate, pc);
-}
+int DebugInfo::GetStackDepth(Address pc) { return impl_->GetStackDepth(pc); }
-WasmValue DebugInfo::GetStackValue(int index, Isolate* isolate, Address pc,
- Address fp, Address debug_break_fp) {
- return impl_->GetStackValue(index, isolate, pc, fp, debug_break_fp);
+WasmValue DebugInfo::GetStackValue(int index, Address pc, Address fp,
+ Address debug_break_fp) {
+ return impl_->GetStackValue(index, pc, fp, debug_break_fp);
}
Handle<JSObject> DebugInfo::GetLocalScopeObject(Isolate* isolate, Address pc,
@@ -859,7 +901,9 @@ void DebugInfo::PrepareStep(Isolate* isolate, StackFrameId break_frame_id) {
impl_->PrepareStep(isolate, break_frame_id);
}
-void DebugInfo::ClearStepping() { impl_->ClearStepping(); }
+void DebugInfo::ClearStepping(Isolate* isolate) {
+ impl_->ClearStepping(isolate);
+}
bool DebugInfo::IsStepping(WasmFrame* frame) {
return impl_->IsStepping(frame);
@@ -874,65 +918,16 @@ void DebugInfo::RemoveDebugSideTables(Vector<WasmCode* const> code) {
impl_->RemoveDebugSideTables(code);
}
-} // namespace wasm
-
-Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
- DCHECK(!instance->has_debug_info());
- Factory* factory = instance->GetIsolate()->factory();
- Handle<Cell> stack_cell = factory->NewCell(factory->empty_fixed_array());
- Handle<WasmDebugInfo> debug_info = Handle<WasmDebugInfo>::cast(
- factory->NewStruct(WASM_DEBUG_INFO_TYPE, AllocationType::kOld));
- debug_info->set_wasm_instance(*instance);
- debug_info->set_interpreter_reference_stack(*stack_cell);
- instance->set_debug_info(*debug_info);
- return debug_info;
+DebugSideTable* DebugInfo::GetDebugSideTableIfExists(
+ const WasmCode* code) const {
+ return impl_->GetDebugSideTableIfExists(code);
}
-wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
- Handle<WasmInstanceObject> instance_obj) {
- Handle<WasmDebugInfo> debug_info = WasmDebugInfo::New(instance_obj);
- Isolate* isolate = instance_obj->GetIsolate();
- // Use the maximum stack size to estimate the maximum size of the interpreter.
- // The interpreter keeps its own stack internally, and the size of the stack
- // should dominate the overall size of the interpreter. We multiply by '2' to
- // account for the growing strategy for the backing store of the stack.
- size_t interpreter_size = FLAG_stack_size * KB * 2;
- auto interp_handle = Managed<wasm::InterpreterHandle>::Allocate(
- isolate, interpreter_size, isolate, debug_info);
- debug_info->set_interpreter_handle(*interp_handle);
- return interp_handle->raw()->interpreter();
+void DebugInfo::RemoveIsolate(Isolate* isolate) {
+ return impl_->RemoveIsolate(isolate);
}
-// static
-Handle<Code> WasmDebugInfo::GetCWasmEntry(Handle<WasmDebugInfo> debug_info,
- const wasm::FunctionSig* sig) {
- Isolate* isolate = debug_info->GetIsolate();
- DCHECK_EQ(debug_info->has_c_wasm_entries(),
- debug_info->has_c_wasm_entry_map());
- if (!debug_info->has_c_wasm_entries()) {
- auto entries = isolate->factory()->NewFixedArray(4, AllocationType::kOld);
- debug_info->set_c_wasm_entries(*entries);
- size_t map_size = 0; // size estimate not so important here.
- auto managed_map = Managed<wasm::SignatureMap>::Allocate(isolate, map_size);
- debug_info->set_c_wasm_entry_map(*managed_map);
- }
- Handle<FixedArray> entries(debug_info->c_wasm_entries(), isolate);
- wasm::SignatureMap* map = debug_info->c_wasm_entry_map().raw();
- int32_t index = map->Find(*sig);
- if (index == -1) {
- index = static_cast<int32_t>(map->FindOrInsert(*sig));
- if (index == entries->length()) {
- entries =
- isolate->factory()->CopyFixedArrayAndGrow(entries, entries->length());
- debug_info->set_c_wasm_entries(*entries);
- }
- DCHECK(entries->get(index).IsUndefined(isolate));
- Handle<Code> new_entry_code =
- compiler::CompileCWasmEntry(isolate, sig).ToHandleChecked();
- entries->set(index, *new_entry_code);
- }
- return handle(Code::cast(entries->get(index)), isolate);
-}
+} // namespace wasm
namespace {
diff --git a/chromium/v8/src/wasm/wasm-debug.h b/chromium/v8/src/wasm/wasm-debug.h
index 1eacd6ff526..6050cb3a58b 100644
--- a/chromium/v8/src/wasm/wasm-debug.h
+++ b/chromium/v8/src/wasm/wasm-debug.h
@@ -91,6 +91,8 @@ class DebugSideTable {
return values_[index].reg_code;
}
+ void Print(std::ostream&) const;
+
private:
int pc_offset_;
std::vector<Value> values_;
@@ -120,6 +122,8 @@ class DebugSideTable {
int num_locals() const { return num_locals_; }
+ void Print(std::ostream&) const;
+
private:
struct EntryPositionLess {
bool operator()(const Entry& a, const Entry& b) const {
@@ -145,11 +149,11 @@ class V8_EXPORT_PRIVATE DebugInfo {
// For the frame inspection methods below:
// {fp} is the frame pointer of the Liftoff frame, {debug_break_fp} that of
// the {WasmDebugBreak} frame (if any).
- int GetNumLocals(Isolate*, Address pc);
- WasmValue GetLocalValue(int local, Isolate*, Address pc, Address fp,
+ int GetNumLocals(Address pc);
+ WasmValue GetLocalValue(int local, Address pc, Address fp,
Address debug_break_fp);
- int GetStackDepth(Isolate*, Address pc);
- WasmValue GetStackValue(int index, Isolate*, Address pc, Address fp,
+ int GetStackDepth(Address pc);
+ WasmValue GetStackValue(int index, Address pc, Address fp,
Address debug_break_fp);
Handle<JSObject> GetLocalScopeObject(Isolate*, Address pc, Address fp,
@@ -164,7 +168,7 @@ class V8_EXPORT_PRIVATE DebugInfo {
void PrepareStep(Isolate*, StackFrameId);
- void ClearStepping();
+ void ClearStepping(Isolate*);
bool IsStepping(WasmFrame*);
@@ -172,6 +176,12 @@ class V8_EXPORT_PRIVATE DebugInfo {
void RemoveDebugSideTables(Vector<WasmCode* const>);
+ // Return the debug side table for the given code object, but only if it has
+ // already been created. This will never trigger generation of the table.
+ DebugSideTable* GetDebugSideTableIfExists(const WasmCode*) const;
+
+ void RemoveIsolate(Isolate*);
+
private:
std::unique_ptr<DebugInfoImpl> impl_;
};
diff --git a/chromium/v8/src/wasm/wasm-engine.cc b/chromium/v8/src/wasm/wasm-engine.cc
index 324d1b1d49c..133825122fd 100644
--- a/chromium/v8/src/wasm/wasm-engine.cc
+++ b/chromium/v8/src/wasm/wasm-engine.cc
@@ -22,6 +22,7 @@
#include "src/wasm/module-decoder.h"
#include "src/wasm/module-instantiate.h"
#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -54,7 +55,7 @@ class LogCodesTask : public Task {
DCHECK_NOT_NULL(isolate);
}
- ~LogCodesTask() {
+ ~LogCodesTask() override {
// If the platform deletes this task before executing it, we also deregister
// it to avoid use-after-free from still-running background threads.
if (!cancelled()) DeregisterTask();
@@ -343,9 +344,8 @@ struct WasmEngine::IsolateInfo {
}
#endif
- // All native modules that are being used by this Isolate (currently only
- // grows, never shrinks).
- std::set<NativeModule*> native_modules;
+ // All native modules that are being used by this Isolate.
+ std::unordered_map<NativeModule*, std::weak_ptr<NativeModule>> native_modules;
// Scripts created for each native module in this isolate.
std::unordered_map<NativeModule*, WeakScriptHandle> scripts;
@@ -409,6 +409,7 @@ WasmEngine::~WasmEngine() {
bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
const ModuleWireBytes& bytes) {
+ TRACE_EVENT0("v8.wasm", "wasm.SyncValidate");
// TODO(titzer): remove dependency on the isolate.
if (bytes.start() == nullptr || bytes.length() == 0) return false;
ModuleResult result =
@@ -421,6 +422,7 @@ MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
Vector<const byte> asm_js_offset_table_bytes,
Handle<HeapNumber> uses_bitset, LanguageMode language_mode) {
+ TRACE_EVENT0("v8.wasm", "wasm.SyncCompileTranslatedAsmJs");
ModuleOrigin origin = language_mode == LanguageMode::kSloppy
? kAsmJsSloppyOrigin
: kAsmJsStrictOrigin;
@@ -464,6 +466,7 @@ Handle<WasmModuleObject> WasmEngine::FinalizeTranslatedAsmJs(
MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
const ModuleWireBytes& bytes) {
+ TRACE_EVENT0("v8.wasm", "wasm.SyncCompile");
ModuleResult result =
DecodeWasmModule(enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
isolate->counters(), allocator());
@@ -509,6 +512,7 @@ MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
MaybeHandle<JSArrayBuffer> memory) {
+ TRACE_EVENT0("v8.wasm", "wasm.SyncInstantiate");
return InstantiateToInstanceObject(isolate, thrower, module_object, imports,
memory);
}
@@ -517,6 +521,7 @@ void WasmEngine::AsyncInstantiate(
Isolate* isolate, std::unique_ptr<InstantiationResultResolver> resolver,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports) {
ErrorThrower thrower(isolate, "WebAssembly.instantiate()");
+ TRACE_EVENT0("v8.wasm", "wasm.AsyncInstantiate");
// Instantiate a TryCatch so that caught exceptions won't progagate out.
// They will still be set as pending exceptions on the isolate.
// TODO(clemensb): Avoid TryCatch, use Execution::TryCall internally to invoke
@@ -552,6 +557,7 @@ void WasmEngine::AsyncCompile(
std::shared_ptr<CompilationResultResolver> resolver,
const ModuleWireBytes& bytes, bool is_shared,
const char* api_method_name_for_errors) {
+ TRACE_EVENT0("v8.wasm", "wasm.AsyncCompile");
if (!FLAG_wasm_async_compilation) {
// Asynchronous compilation disabled; fall back on synchronous compilation.
ErrorThrower thrower(isolate, api_method_name_for_errors);
@@ -600,10 +606,15 @@ std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation(
Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
const char* api_method_name,
std::shared_ptr<CompilationResultResolver> resolver) {
- AsyncCompileJob* job =
- CreateAsyncCompileJob(isolate, enabled, std::unique_ptr<byte[]>(nullptr),
- 0, context, api_method_name, std::move(resolver));
- return job->CreateStreamingDecoder();
+ TRACE_EVENT0("v8.wasm", "wasm.StartStreamingCompilation");
+ if (FLAG_wasm_async_compilation) {
+ AsyncCompileJob* job = CreateAsyncCompileJob(
+ isolate, enabled, std::unique_ptr<byte[]>(nullptr), 0, context,
+ api_method_name, std::move(resolver));
+ return job->CreateStreamingDecoder();
+ }
+ return StreamingDecoder::CreateSyncStreamingDecoder(
+ isolate, enabled, context, api_method_name, std::move(resolver));
}
void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
@@ -616,25 +627,27 @@ void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
}
void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
- std::vector<NativeModule*> native_modules;
+ std::vector<std::shared_ptr<NativeModule>> native_modules;
{
base::MutexGuard lock(&mutex_);
if (isolates_[isolate]->keep_tiered_down) return;
isolates_[isolate]->keep_tiered_down = true;
- for (auto* native_module : isolates_[isolate]->native_modules) {
- native_modules.push_back(native_module);
- native_module->SetTieringState(kTieredDown);
+ for (auto& entry : isolates_[isolate]->native_modules) {
+ entry.first->SetTieringState(kTieredDown);
+ if (auto shared_ptr = entry.second.lock()) {
+ native_modules.emplace_back(std::move(shared_ptr));
+ }
}
}
- for (auto* native_module : native_modules) {
- native_module->TriggerRecompilation();
+ for (auto& native_module : native_modules) {
+ native_module->RecompileForTiering();
}
}
void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) {
// Only trigger recompilation after releasing the mutex, otherwise we risk
// deadlocks because of lock inversion.
- std::vector<NativeModule*> native_modules_to_recompile;
+ std::vector<std::shared_ptr<NativeModule>> native_modules_to_recompile;
{
base::MutexGuard lock(&mutex_);
isolates_[isolate]->keep_tiered_down = false;
@@ -646,17 +659,20 @@ void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) {
}
return false;
};
- for (auto* native_module : isolates_[isolate]->native_modules) {
+ for (auto& entry : isolates_[isolate]->native_modules) {
+ auto* native_module = entry.first;
if (!native_module->IsTieredDown()) continue;
// Only start tier-up if no other isolate needs this modules in tiered
// down state.
if (test_keep_tiered_down(native_module)) continue;
native_module->SetTieringState(kTieredUp);
- native_modules_to_recompile.push_back(native_module);
+ if (auto shared_ptr = entry.second.lock()) {
+ native_modules_to_recompile.emplace_back(std::move(shared_ptr));
+ }
}
}
- for (auto* native_module : native_modules_to_recompile) {
- native_module->TriggerRecompilation();
+ for (auto& native_module : native_modules_to_recompile) {
+ native_module->RecompileForTiering();
}
}
@@ -762,11 +778,12 @@ Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
Handle<FixedArray> export_wrappers;
CompileJsToWasmWrappers(isolate, native_module->module(), &export_wrappers);
Handle<WasmModuleObject> module_object = WasmModuleObject::New(
- isolate, std::move(shared_native_module), script, export_wrappers);
+ isolate, shared_native_module, script, export_wrappers);
{
base::MutexGuard lock(&mutex_);
DCHECK_EQ(1, isolates_.count(isolate));
- isolates_[isolate]->native_modules.insert(native_module);
+ isolates_[isolate]->native_modules.emplace(native_module,
+ std::move(shared_native_module));
DCHECK_EQ(1, native_modules_.count(native_module));
native_modules_[native_module]->isolates.insert(isolate);
}
@@ -885,8 +902,8 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
WasmEngine* engine = isolate->wasm_engine();
base::MutexGuard lock(&engine->mutex_);
DCHECK_EQ(1, engine->isolates_.count(isolate));
- for (auto* native_module : engine->isolates_[isolate]->native_modules) {
- native_module->SampleCodeSize(counters, NativeModule::kSampling);
+ for (auto& entry : engine->isolates_[isolate]->native_modules) {
+ entry.first->SampleCodeSize(counters, NativeModule::kSampling);
}
};
isolate->heap()->AddGCEpilogueCallback(callback, v8::kGCTypeMarkSweepCompact,
@@ -910,7 +927,8 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
DCHECK_NE(isolates_.end(), it);
std::unique_ptr<IsolateInfo> info = std::move(it->second);
isolates_.erase(it);
- for (NativeModule* native_module : info->native_modules) {
+ for (auto& entry : info->native_modules) {
+ auto* native_module = entry.first;
DCHECK_EQ(1, native_modules_.count(native_module));
DCHECK_EQ(1, native_modules_[native_module]->isolates.count(isolate));
auto* info = native_modules_[native_module].get();
@@ -920,6 +938,9 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
current_gc_info_->dead_code.erase(code);
}
}
+ if (native_module->HasDebugInfo()) {
+ native_module->GetDebugInfo()->RemoveIsolate(isolate);
+ }
}
if (current_gc_info_) {
if (RemoveIsolateFromCurrentGC(isolate)) PotentiallyFinishCurrentGC();
@@ -1002,7 +1023,7 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
DCHECK(pair.second); // inserted new entry.
pair.first->second.get()->isolates.insert(isolate);
auto& modules_per_isolate = isolates_[isolate]->native_modules;
- modules_per_isolate.insert(native_module.get());
+ modules_per_isolate.emplace(native_module.get(), native_module);
if (isolates_[isolate]->keep_tiered_down) {
native_module->SetTieringState(kTieredDown);
}
@@ -1025,14 +1046,15 @@ std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
native_module_info = std::make_unique<NativeModuleInfo>();
}
native_module_info->isolates.insert(isolate);
- isolates_[isolate]->native_modules.insert(native_module.get());
+ isolates_[isolate]->native_modules.emplace(native_module.get(),
+ native_module);
if (isolates_[isolate]->keep_tiered_down) {
native_module->SetTieringState(kTieredDown);
recompile_module = true;
}
}
// Potentially recompile the module for tier down, after releasing the mutex.
- if (recompile_module) native_module->TriggerRecompilation();
+ if (recompile_module) native_module->RecompileForTiering();
return native_module;
}
@@ -1054,14 +1076,15 @@ bool WasmEngine::UpdateNativeModuleCache(
DCHECK_EQ(1, native_modules_.count(native_module->get()));
native_modules_[native_module->get()]->isolates.insert(isolate);
DCHECK_EQ(1, isolates_.count(isolate));
- isolates_[isolate]->native_modules.insert(native_module->get());
+ isolates_[isolate]->native_modules.emplace(native_module->get(),
+ *native_module);
if (isolates_[isolate]->keep_tiered_down) {
native_module->get()->SetTieringState(kTieredDown);
recompile_module = true;
}
}
// Potentially recompile the module for tier down, after releasing the mutex.
- if (recompile_module) native_module->get()->TriggerRecompilation();
+ if (recompile_module) native_module->get()->RecompileForTiering();
return false;
}
@@ -1154,7 +1177,7 @@ void WasmEngine::SampleTopTierCodeSizeInAllIsolates(
void WasmEngine::ReportLiveCodeForGC(Isolate* isolate,
Vector<WasmCode*> live_code) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ReportLiveCodeForGC");
+ TRACE_EVENT0("v8.wasm", "wasm.ReportLiveCodeForGC");
TRACE_CODE_GC("Isolate %d reporting %zu live code objects.\n", isolate->id(),
live_code.size());
base::MutexGuard guard(&mutex_);
@@ -1227,7 +1250,7 @@ void WasmEngine::FreeDeadCode(const DeadCodeMap& dead_code) {
}
void WasmEngine::FreeDeadCodeLocked(const DeadCodeMap& dead_code) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "FreeDeadCode");
+ TRACE_EVENT0("v8.wasm", "wasm.FreeDeadCode");
DCHECK(!mutex_.TryLock());
for (auto& dead_code_entry : dead_code) {
NativeModule* native_module = dead_code_entry.first;
diff --git a/chromium/v8/src/wasm/wasm-external-refs.cc b/chromium/v8/src/wasm/wasm-external-refs.cc
index 6dbb9393849..43617a8599a 100644
--- a/chromium/v8/src/wasm/wasm-external-refs.cc
+++ b/chromium/v8/src/wasm/wasm-external-refs.cc
@@ -230,6 +230,82 @@ int32_t float64_to_uint64_wrapper(Address data) {
return 0;
}
+void float32_to_int64_sat_wrapper(Address data) {
+ float input = ReadUnalignedValue<float>(data);
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within int64 range which are actually
+ // not within int64 range.
+ if (input < static_cast<float>(std::numeric_limits<int64_t>::max()) &&
+ input >= static_cast<float>(std::numeric_limits<int64_t>::min())) {
+ WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
+ return;
+ }
+ if (std::isnan(input)) {
+ WriteUnalignedValue<int64_t>(data, 0);
+ return;
+ }
+ if (input < 0.0) {
+ WriteUnalignedValue<int64_t>(data, std::numeric_limits<int64_t>::min());
+ return;
+ }
+ WriteUnalignedValue<int64_t>(data, std::numeric_limits<int64_t>::max());
+}
+
+void float32_to_uint64_sat_wrapper(Address data) {
+ float input = ReadUnalignedValue<float>(data);
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within uint64 range which are actually
+ // not within uint64 range.
+ if (input < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
+ input >= 0.0) {
+ WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
+ return;
+ }
+ if (input >= std::numeric_limits<uint64_t>::max()) {
+ WriteUnalignedValue<uint64_t>(data, std::numeric_limits<uint64_t>::max());
+ return;
+ }
+ WriteUnalignedValue<uint64_t>(data, 0);
+}
+
+void float64_to_int64_sat_wrapper(Address data) {
+ double input = ReadUnalignedValue<double>(data);
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within int64 range which are actually
+ // not within int64 range.
+ if (input < static_cast<double>(std::numeric_limits<int64_t>::max()) &&
+ input >= static_cast<double>(std::numeric_limits<int64_t>::min())) {
+ WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
+ return;
+ }
+ if (std::isnan(input)) {
+ WriteUnalignedValue<int64_t>(data, 0);
+ return;
+ }
+ if (input < 0.0) {
+ WriteUnalignedValue<int64_t>(data, std::numeric_limits<int64_t>::min());
+ return;
+ }
+ WriteUnalignedValue<int64_t>(data, std::numeric_limits<int64_t>::max());
+}
+
+void float64_to_uint64_sat_wrapper(Address data) {
+ double input = ReadUnalignedValue<double>(data);
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within int64 range which are actually
+ // not within int64 range.
+ if (input < static_cast<double>(std::numeric_limits<uint64_t>::max()) &&
+ input >= 0.0) {
+ WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
+ return;
+ }
+ if (input >= std::numeric_limits<uint64_t>::max()) {
+ WriteUnalignedValue<uint64_t>(data, std::numeric_limits<uint64_t>::max());
+ return;
+ }
+ WriteUnalignedValue<uint64_t>(data, 0);
+}
+
int32_t int64_div_wrapper(Address data) {
int64_t dividend = ReadUnalignedValue<int64_t>(data);
int64_t divisor = ReadUnalignedValue<int64_t>(data + sizeof(dividend));
@@ -325,6 +401,28 @@ void float64_pow_wrapper(Address data) {
WriteUnalignedValue<double>(data, base::ieee754::pow(x, y));
}
+template <typename T, T (*float_round_op)(T)>
+void simd_float_round_wrapper(Address data) {
+ constexpr int n = kSimd128Size / sizeof(T);
+ for (int i = 0; i < n; i++) {
+ WriteUnalignedValue<T>(
+ data + (i * sizeof(T)),
+ float_round_op(ReadUnalignedValue<T>(data + (i * sizeof(T)))));
+ }
+}
+
+void f32x4_ceil_wrapper(Address data) {
+ simd_float_round_wrapper<float, &ceilf>(data);
+}
+
+void f32x4_floor_wrapper(Address data) {
+ simd_float_round_wrapper<float, &floorf>(data);
+}
+
+void f32x4_trunc_wrapper(Address data) {
+ simd_float_round_wrapper<float, &truncf>(data);
+}
+
namespace {
class ThreadNotInWasmScope {
// Asan on Windows triggers exceptions to allocate shadow memory lazily. When
@@ -402,13 +500,13 @@ int32_t memory_init_wrapper(Address data) {
uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t src = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t seg_index = ReadAndIncrementOffset<uint32_t>(data, &offset);
- size_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
- size_t mem_size = instance.memory_size();
- if (!base::IsInBounds(dst, size, mem_size)) return kOutOfBounds;
+ uint64_t mem_size = instance.memory_size();
+ if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;
- size_t seg_size = instance.data_segment_sizes()[seg_index];
- if (!base::IsInBounds(src, size, seg_size)) return kOutOfBounds;
+ uint32_t seg_size = instance.data_segment_sizes()[seg_index];
+ if (!base::IsInBounds<uint32_t>(src, size, seg_size)) return kOutOfBounds;
byte* seg_start =
reinterpret_cast<byte*>(instance.data_segment_starts()[seg_index]);
@@ -427,11 +525,11 @@ int32_t memory_copy_wrapper(Address data) {
WasmInstanceObject instance = WasmInstanceObject::cast(raw_instance);
uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t src = ReadAndIncrementOffset<uint32_t>(data, &offset);
- size_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
- size_t mem_size = instance.memory_size();
- if (!base::IsInBounds(dst, size, mem_size)) return kOutOfBounds;
- if (!base::IsInBounds(src, size, mem_size)) return kOutOfBounds;
+ uint64_t mem_size = instance.memory_size();
+ if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;
+ if (!base::IsInBounds<uint64_t>(src, size, mem_size)) return kOutOfBounds;
// Use std::memmove, because the ranges can overlap.
std::memmove(EffectiveAddress(instance, dst), EffectiveAddress(instance, src),
@@ -452,10 +550,10 @@ int32_t memory_fill_wrapper(Address data) {
uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint8_t value =
static_cast<uint8_t>(ReadAndIncrementOffset<uint32_t>(data, &offset));
- size_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
- size_t mem_size = instance.memory_size();
- if (!base::IsInBounds(dst, size, mem_size)) return kOutOfBounds;
+ uint64_t mem_size = instance.memory_size();
+ if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;
std::memset(EffectiveAddress(instance, dst), value, size);
return kSuccess;
diff --git a/chromium/v8/src/wasm/wasm-external-refs.h b/chromium/v8/src/wasm/wasm-external-refs.h
index 0a2d5f30602..b41d44e4435 100644
--- a/chromium/v8/src/wasm/wasm-external-refs.h
+++ b/chromium/v8/src/wasm/wasm-external-refs.h
@@ -45,6 +45,14 @@ V8_EXPORT_PRIVATE int32_t float64_to_int64_wrapper(Address data);
V8_EXPORT_PRIVATE int32_t float64_to_uint64_wrapper(Address data);
+V8_EXPORT_PRIVATE void float32_to_int64_sat_wrapper(Address data);
+
+V8_EXPORT_PRIVATE void float32_to_uint64_sat_wrapper(Address data);
+
+V8_EXPORT_PRIVATE void float64_to_int64_sat_wrapper(Address data);
+
+V8_EXPORT_PRIVATE void float64_to_uint64_sat_wrapper(Address data);
+
V8_EXPORT_PRIVATE int32_t int64_div_wrapper(Address data);
V8_EXPORT_PRIVATE int32_t int64_mod_wrapper(Address data);
@@ -71,6 +79,12 @@ V8_EXPORT_PRIVATE void word64_ror_wrapper(Address data);
V8_EXPORT_PRIVATE void float64_pow_wrapper(Address data);
+V8_EXPORT_PRIVATE void f32x4_ceil_wrapper(Address data);
+
+V8_EXPORT_PRIVATE void f32x4_floor_wrapper(Address data);
+
+V8_EXPORT_PRIVATE void f32x4_trunc_wrapper(Address data);
+
// The return type is {int32_t} instead of {bool} to enforce the compiler to
// zero-extend the result in the return register.
int32_t memory_init_wrapper(Address data);
diff --git a/chromium/v8/src/wasm/wasm-feature-flags.h b/chromium/v8/src/wasm/wasm-feature-flags.h
index ab8eb612a85..2450608f141 100644
--- a/chromium/v8/src/wasm/wasm-feature-flags.h
+++ b/chromium/v8/src/wasm/wasm-feature-flags.h
@@ -33,7 +33,12 @@
/* Official proposal: https://github.com/WebAssembly/gc */ \
/* Prototype engineering spec: https://bit.ly/3cWcm6Q */ \
/* V8 side owner: jkummerow */ \
- V(gc, "garbage collection", false)
+ V(gc, "garbage collection", false) \
+ \
+ /* Typed function references proposal. */ \
+ /* Official proposal: https://github.com/WebAssembly/function-references */ \
+ /* V8 side owner: ahaas */ \
+ V(typed_funcref, "typed function references", false)
// #############################################################################
// Staged features (disabled by default, but enabled via --wasm-staging (also
@@ -44,24 +49,18 @@
// be shipped with enough lead time to the next branch to allow for
// stabilization.
#define FOREACH_WASM_STAGING_FEATURE_FLAG(V) /* (force 80 columns) */ \
- /* Reference Types, a.k.a. anyref proposal. */ \
- /* https://github.com/WebAssembly/reference-types */ \
- /* V8 side owner: ahaas */ \
- /* Staged in v7.8. */ \
- V(anyref, "anyref opcodes", false) \
- \
- /* JS BitInt to wasm i64 integration. */ \
- /* https://github.com/WebAssembly/JS-BigInt-integration */ \
- /* V8 side owner: ahaas, ssauleau@igalia.com */ \
- /* Staged in v7.9. */ \
- V(bigint, "JS BigInt support", false) \
- \
/* Multi-value proposal. */ \
/* https://github.com/WebAssembly/multi-value */ \
/* V8 side owner: thibaudm */ \
/* Staged in v8.0. */ \
V(mv, "multi-value support", false) \
\
+ /* Reference Types, a.k.a. reftypes proposal. */ \
+ /* https://github.com/WebAssembly/reference-types */ \
+ /* V8 side owner: ahaas */ \
+ /* Staged in v7.8. */ \
+ V(reftypes, "reference type opcodes", false) \
+ \
/* Threads proposal. */ \
/* https://github.com/webassembly/threads */ \
/* NOTE: This is enabled via chromium flag on desktop systems since v7.4 */ \
@@ -80,6 +79,14 @@
// Shipped features (enabled by default). Remove the feature flag once they hit
// stable and are expected to stay enabled.
#define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V) /* (force 80 columns) */ \
+ /* JS BigInt to wasm i64 integration. */ \
+ /* https://github.com/WebAssembly/JS-BigInt-integration */ \
+ /* V8 side owner: ahaas, ssauleau@igalia.com */ \
+ /* Shipped in v8.5. */ \
+ /* ITS: https://groups.google.com/a/chromium.org/g/blink-dev/c/ */ \
+ /* g4QKRUQV1-0/m/jdWjD1uZAAAJ */ \
+ V(bigint, "JS BigInt support", true) \
+ \
/* Bulk memory operations. */ \
/* https://github.com/webassembly/bulk-memory-operations */ \
/* V8 side owner: binji */ \
diff --git a/chromium/v8/src/wasm/wasm-interpreter.cc b/chromium/v8/src/wasm/wasm-interpreter.cc
deleted file mode 100644
index 96255ef8180..00000000000
--- a/chromium/v8/src/wasm/wasm-interpreter.cc
+++ /dev/null
@@ -1,4456 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <atomic>
-#include <type_traits>
-
-#include "src/wasm/wasm-interpreter.h"
-
-#include "src/base/overflowing-math.h"
-#include "src/codegen/assembler-inl.h"
-#include "src/compiler/wasm-compiler.h"
-#include "src/numbers/conversions.h"
-#include "src/objects/objects-inl.h"
-#include "src/utils/boxed-float.h"
-#include "src/utils/identity-map.h"
-#include "src/utils/utils.h"
-#include "src/wasm/decoder.h"
-#include "src/wasm/function-body-decoder-impl.h"
-#include "src/wasm/function-body-decoder.h"
-#include "src/wasm/memory-tracing.h"
-#include "src/wasm/module-compiler.h"
-#include "src/wasm/wasm-arguments.h"
-#include "src/wasm/wasm-engine.h"
-#include "src/wasm/wasm-external-refs.h"
-#include "src/wasm/wasm-limits.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-opcodes.h"
-#include "src/zone/accounting-allocator.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-using base::ReadLittleEndianValue;
-using base::ReadUnalignedValue;
-using base::WriteLittleEndianValue;
-using base::WriteUnalignedValue;
-
-#define TRACE(...) \
- do { \
- if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
- } while (false)
-
-#if V8_TARGET_BIG_ENDIAN
-#define LANE(i, type) ((sizeof(type.val) / sizeof(type.val[0])) - (i)-1)
-#else
-#define LANE(i, type) (i)
-#endif
-
-#define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
-
-#define FOREACH_SIMPLE_BINOP(V) \
- V(I32Add, uint32_t, +) \
- V(I32Sub, uint32_t, -) \
- V(I32Mul, uint32_t, *) \
- V(I32And, uint32_t, &) \
- V(I32Ior, uint32_t, |) \
- V(I32Xor, uint32_t, ^) \
- V(I32Eq, uint32_t, ==) \
- V(I32Ne, uint32_t, !=) \
- V(I32LtU, uint32_t, <) \
- V(I32LeU, uint32_t, <=) \
- V(I32GtU, uint32_t, >) \
- V(I32GeU, uint32_t, >=) \
- V(I32LtS, int32_t, <) \
- V(I32LeS, int32_t, <=) \
- V(I32GtS, int32_t, >) \
- V(I32GeS, int32_t, >=) \
- V(I64Add, uint64_t, +) \
- V(I64Sub, uint64_t, -) \
- V(I64Mul, uint64_t, *) \
- V(I64And, uint64_t, &) \
- V(I64Ior, uint64_t, |) \
- V(I64Xor, uint64_t, ^) \
- V(I64Eq, uint64_t, ==) \
- V(I64Ne, uint64_t, !=) \
- V(I64LtU, uint64_t, <) \
- V(I64LeU, uint64_t, <=) \
- V(I64GtU, uint64_t, >) \
- V(I64GeU, uint64_t, >=) \
- V(I64LtS, int64_t, <) \
- V(I64LeS, int64_t, <=) \
- V(I64GtS, int64_t, >) \
- V(I64GeS, int64_t, >=) \
- V(F32Add, float, +) \
- V(F32Sub, float, -) \
- V(F32Eq, float, ==) \
- V(F32Ne, float, !=) \
- V(F32Lt, float, <) \
- V(F32Le, float, <=) \
- V(F32Gt, float, >) \
- V(F32Ge, float, >=) \
- V(F64Add, double, +) \
- V(F64Sub, double, -) \
- V(F64Eq, double, ==) \
- V(F64Ne, double, !=) \
- V(F64Lt, double, <) \
- V(F64Le, double, <=) \
- V(F64Gt, double, >) \
- V(F64Ge, double, >=) \
- V(F32Mul, float, *) \
- V(F64Mul, double, *) \
- V(F32Div, float, /) \
- V(F64Div, double, /)
-
-#define FOREACH_OTHER_BINOP(V) \
- V(I32DivS, int32_t) \
- V(I32DivU, uint32_t) \
- V(I32RemS, int32_t) \
- V(I32RemU, uint32_t) \
- V(I32Shl, uint32_t) \
- V(I32ShrU, uint32_t) \
- V(I32ShrS, int32_t) \
- V(I64DivS, int64_t) \
- V(I64DivU, uint64_t) \
- V(I64RemS, int64_t) \
- V(I64RemU, uint64_t) \
- V(I64Shl, uint64_t) \
- V(I64ShrU, uint64_t) \
- V(I64ShrS, int64_t) \
- V(I32Ror, int32_t) \
- V(I32Rol, int32_t) \
- V(I64Ror, int64_t) \
- V(I64Rol, int64_t) \
- V(F32Min, float) \
- V(F32Max, float) \
- V(F64Min, double) \
- V(F64Max, double) \
- V(I32AsmjsDivS, int32_t) \
- V(I32AsmjsDivU, uint32_t) \
- V(I32AsmjsRemS, int32_t) \
- V(I32AsmjsRemU, uint32_t) \
- V(F32CopySign, Float32) \
- V(F64CopySign, Float64)
-
-#define FOREACH_I32CONV_FLOATOP(V) \
- V(I32SConvertF32, int32_t, float) \
- V(I32SConvertF64, int32_t, double) \
- V(I32UConvertF32, uint32_t, float) \
- V(I32UConvertF64, uint32_t, double)
-
-#define FOREACH_OTHER_UNOP(V) \
- V(I32Clz, uint32_t) \
- V(I32Ctz, uint32_t) \
- V(I32Popcnt, uint32_t) \
- V(I32Eqz, uint32_t) \
- V(I64Clz, uint64_t) \
- V(I64Ctz, uint64_t) \
- V(I64Popcnt, uint64_t) \
- V(I64Eqz, uint64_t) \
- V(F32Abs, Float32) \
- V(F32Neg, Float32) \
- V(F32Ceil, float) \
- V(F32Floor, float) \
- V(F32Trunc, float) \
- V(F32NearestInt, float) \
- V(F64Abs, Float64) \
- V(F64Neg, Float64) \
- V(F64Ceil, double) \
- V(F64Floor, double) \
- V(F64Trunc, double) \
- V(F64NearestInt, double) \
- V(I32ConvertI64, int64_t) \
- V(I64SConvertF32, float) \
- V(I64SConvertF64, double) \
- V(I64UConvertF32, float) \
- V(I64UConvertF64, double) \
- V(I64SConvertI32, int32_t) \
- V(I64UConvertI32, uint32_t) \
- V(F32SConvertI32, int32_t) \
- V(F32UConvertI32, uint32_t) \
- V(F32SConvertI64, int64_t) \
- V(F32UConvertI64, uint64_t) \
- V(F32ConvertF64, double) \
- V(F32ReinterpretI32, int32_t) \
- V(F64SConvertI32, int32_t) \
- V(F64UConvertI32, uint32_t) \
- V(F64SConvertI64, int64_t) \
- V(F64UConvertI64, uint64_t) \
- V(F64ConvertF32, float) \
- V(F64ReinterpretI64, int64_t) \
- V(I32AsmjsSConvertF32, float) \
- V(I32AsmjsUConvertF32, float) \
- V(I32AsmjsSConvertF64, double) \
- V(I32AsmjsUConvertF64, double) \
- V(F32Sqrt, float) \
- V(F64Sqrt, double)
-
-namespace {
-
-constexpr uint32_t kFloat32SignBitMask = uint32_t{1} << 31;
-constexpr uint64_t kFloat64SignBitMask = uint64_t{1} << 63;
-
-inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapDivByZero;
- return 0;
- }
- if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
- *trap = kTrapDivUnrepresentable;
- return 0;
- }
- return a / b;
-}
-
-inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapDivByZero;
- return 0;
- }
- return a / b;
-}
-
-inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapRemByZero;
- return 0;
- }
- if (b == -1) return 0;
- return a % b;
-}
-
-inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapRemByZero;
- return 0;
- }
- return a % b;
-}
-
-inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
- return a << (b & 0x1F);
-}
-
-inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b, TrapReason* trap) {
- return a >> (b & 0x1F);
-}
-
-inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
- return a >> (b & 0x1F);
-}
-
-inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapDivByZero;
- return 0;
- }
- if (b == -1 && a == std::numeric_limits<int64_t>::min()) {
- *trap = kTrapDivUnrepresentable;
- return 0;
- }
- return a / b;
-}
-
-inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapDivByZero;
- return 0;
- }
- return a / b;
-}
-
-inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapRemByZero;
- return 0;
- }
- if (b == -1) return 0;
- return a % b;
-}
-
-inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapRemByZero;
- return 0;
- }
- return a % b;
-}
-
-inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
- return a << (b & 0x3F);
-}
-
-inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b, TrapReason* trap) {
- return a >> (b & 0x3F);
-}
-
-inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
- return a >> (b & 0x3F);
-}
-
-inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
- return (a >> (b & 0x1F)) | (a << ((32 - b) & 0x1F));
-}
-
-inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
- return (a << (b & 0x1F)) | (a >> ((32 - b) & 0x1F));
-}
-
-inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
- return (a >> (b & 0x3F)) | (a << ((64 - b) & 0x3F));
-}
-
-inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
- return (a << (b & 0x3F)) | (a >> ((64 - b) & 0x3F));
-}
-
-inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
- return JSMin(a, b);
-}
-
-inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
- return JSMax(a, b);
-}
-
-inline Float32 ExecuteF32CopySign(Float32 a, Float32 b, TrapReason* trap) {
- return Float32::FromBits((a.get_bits() & ~kFloat32SignBitMask) |
- (b.get_bits() & kFloat32SignBitMask));
-}
-
-inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
- return JSMin(a, b);
-}
-
-inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
- return JSMax(a, b);
-}
-
-inline Float64 ExecuteF64CopySign(Float64 a, Float64 b, TrapReason* trap) {
- return Float64::FromBits((a.get_bits() & ~kFloat64SignBitMask) |
- (b.get_bits() & kFloat64SignBitMask));
-}
-
-inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b, TrapReason* trap) {
- if (b == 0) return 0;
- if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
- return std::numeric_limits<int32_t>::min();
- }
- return a / b;
-}
-
-inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b, TrapReason* trap) {
- if (b == 0) return 0;
- return a / b;
-}
-
-inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b, TrapReason* trap) {
- if (b == 0) return 0;
- if (b == -1) return 0;
- return a % b;
-}
-
-inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b, TrapReason* trap) {
- if (b == 0) return 0;
- return a % b;
-}
-
-inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
- return DoubleToInt32(a);
-}
-
-inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
- return DoubleToUint32(a);
-}
-
-inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
- return DoubleToInt32(a);
-}
-
-inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
- return DoubleToUint32(a);
-}
-
-int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
- return base::bits::CountLeadingZeros(val);
-}
-
-uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
- return base::bits::CountTrailingZeros(val);
-}
-
-uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
- return base::bits::CountPopulation(val);
-}
-
-inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
- return val == 0 ? 1 : 0;
-}
-
-int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
- return base::bits::CountLeadingZeros(val);
-}
-
-inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
- return base::bits::CountTrailingZeros(val);
-}
-
-inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
- return base::bits::CountPopulation(val);
-}
-
-inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
- return val == 0 ? 1 : 0;
-}
-
-inline Float32 ExecuteF32Abs(Float32 a, TrapReason* trap) {
- return Float32::FromBits(a.get_bits() & ~kFloat32SignBitMask);
-}
-
-inline Float32 ExecuteF32Neg(Float32 a, TrapReason* trap) {
- return Float32::FromBits(a.get_bits() ^ kFloat32SignBitMask);
-}
-
-inline float ExecuteF32Ceil(float a, TrapReason* trap) { return ceilf(a); }
-
-inline float ExecuteF32Floor(float a, TrapReason* trap) { return floorf(a); }
-
-inline float ExecuteF32Trunc(float a, TrapReason* trap) { return truncf(a); }
-
-inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
- return nearbyintf(a);
-}
-
-inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
- float result = sqrtf(a);
- return result;
-}
-
-inline Float64 ExecuteF64Abs(Float64 a, TrapReason* trap) {
- return Float64::FromBits(a.get_bits() & ~kFloat64SignBitMask);
-}
-
-inline Float64 ExecuteF64Neg(Float64 a, TrapReason* trap) {
- return Float64::FromBits(a.get_bits() ^ kFloat64SignBitMask);
-}
-
-inline double ExecuteF64Ceil(double a, TrapReason* trap) { return ceil(a); }
-
-inline double ExecuteF64Floor(double a, TrapReason* trap) { return floor(a); }
-
-inline double ExecuteF64Trunc(double a, TrapReason* trap) { return trunc(a); }
-
-inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
- return nearbyint(a);
-}
-
-inline double ExecuteF64Sqrt(double a, TrapReason* trap) { return sqrt(a); }
-
-template <typename int_type, typename float_type>
-int_type ExecuteConvert(float_type a, TrapReason* trap) {
- if (is_inbounds<int_type>(a)) {
- return static_cast<int_type>(a);
- }
- *trap = kTrapFloatUnrepresentable;
- return 0;
-}
-
-template <typename int_type, typename float_type>
-int_type ExecuteConvertSaturate(float_type a) {
- TrapReason base_trap = kTrapCount;
- int32_t val = ExecuteConvert<int_type>(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < static_cast<float_type>(0.0)
- ? std::numeric_limits<int_type>::min()
- : std::numeric_limits<int_type>::max());
-}
-
-template <typename dst_type, typename src_type, void (*fn)(Address)>
-inline dst_type CallExternalIntToFloatFunction(src_type input) {
- uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
- Address data_addr = reinterpret_cast<Address>(data);
- WriteUnalignedValue<src_type>(data_addr, input);
- fn(data_addr);
- return ReadUnalignedValue<dst_type>(data_addr);
-}
-
-template <typename dst_type, typename src_type, int32_t (*fn)(Address)>
-inline dst_type CallExternalFloatToIntFunction(src_type input,
- TrapReason* trap) {
- uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
- Address data_addr = reinterpret_cast<Address>(data);
- WriteUnalignedValue<src_type>(data_addr, input);
- if (!fn(data_addr)) *trap = kTrapFloatUnrepresentable;
- return ReadUnalignedValue<dst_type>(data_addr);
-}
-
-inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
- return static_cast<uint32_t>(a & 0xFFFFFFFF);
-}
-
-int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
- return CallExternalFloatToIntFunction<int64_t, float,
- float32_to_int64_wrapper>(a, trap);
-}
-
-int64_t ExecuteI64SConvertSatF32(float a) {
- TrapReason base_trap = kTrapCount;
- int64_t val = ExecuteI64SConvertF32(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < 0.0 ? std::numeric_limits<int64_t>::min()
- : std::numeric_limits<int64_t>::max());
-}
-
-int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
- return CallExternalFloatToIntFunction<int64_t, double,
- float64_to_int64_wrapper>(a, trap);
-}
-
-int64_t ExecuteI64SConvertSatF64(double a) {
- TrapReason base_trap = kTrapCount;
- int64_t val = ExecuteI64SConvertF64(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < 0.0 ? std::numeric_limits<int64_t>::min()
- : std::numeric_limits<int64_t>::max());
-}
-
-uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
- return CallExternalFloatToIntFunction<uint64_t, float,
- float32_to_uint64_wrapper>(a, trap);
-}
-
-uint64_t ExecuteI64UConvertSatF32(float a) {
- TrapReason base_trap = kTrapCount;
- uint64_t val = ExecuteI64UConvertF32(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
- : std::numeric_limits<uint64_t>::max());
-}
-
-uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
- return CallExternalFloatToIntFunction<uint64_t, double,
- float64_to_uint64_wrapper>(a, trap);
-}
-
-uint64_t ExecuteI64UConvertSatF64(double a) {
- TrapReason base_trap = kTrapCount;
- int64_t val = ExecuteI64UConvertF64(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
- : std::numeric_limits<uint64_t>::max());
-}
-
-inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
- return static_cast<int64_t>(a);
-}
-
-inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
- return static_cast<uint64_t>(a);
-}
-
-inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
- return static_cast<float>(a);
-}
-
-inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
- return static_cast<float>(a);
-}
-
-inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
- return static_cast<float>(a);
-}
-
-inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
- return CallExternalIntToFloatFunction<float, uint64_t,
- uint64_to_float32_wrapper>(a);
-}
-
-inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
- return DoubleToFloat32(a);
-}
-
-inline Float32 ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
- return Float32::FromBits(a);
-}
-
-inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
- return static_cast<double>(a);
-}
-
-inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
- return static_cast<double>(a);
-}
-
-inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
- return static_cast<double>(a);
-}
-
-inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
- return CallExternalIntToFloatFunction<double, uint64_t,
- uint64_to_float64_wrapper>(a);
-}
-
-inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
- return static_cast<double>(a);
-}
-
-inline Float64 ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
- return Float64::FromBits(a);
-}
-
-inline int32_t ExecuteI32ReinterpretF32(WasmValue a) {
- return a.to_f32_boxed().get_bits();
-}
-
-inline int64_t ExecuteI64ReinterpretF64(WasmValue a) {
- return a.to_f64_boxed().get_bits();
-}
-
-enum InternalOpcode {
-#define DECL_INTERNAL_ENUM(name, value) kInternal##name = value,
- FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM)
-#undef DECL_INTERNAL_ENUM
-};
-
-const char* OpcodeName(uint32_t val) {
- switch (val) {
-#define DECL_INTERNAL_CASE(name, value) \
- case kInternal##name: \
- return "Internal" #name;
- FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_CASE)
-#undef DECL_INTERNAL_CASE
- }
- return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
-}
-
-constexpr int32_t kCatchInArity = 1;
-
-} // namespace
-
-class SideTable;
-
-// Code and metadata needed to execute a function.
-struct InterpreterCode {
- const WasmFunction* function; // wasm function
- BodyLocalDecls locals; // local declarations
- const byte* orig_start; // start of original code
- const byte* orig_end; // end of original code
- byte* start; // start of (maybe altered) code
- byte* end; // end of (maybe altered) code
- SideTable* side_table; // precomputed side table for control flow.
-
- const byte* at(pc_t pc) { return start + pc; }
-};
-
-// A helper class to compute the control transfers for each bytecode offset.
-// Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
-// be directly executed without the need to dynamically track blocks.
-class SideTable : public ZoneObject {
- public:
- ControlTransferMap map_;
- int32_t max_stack_height_ = 0;
-
- SideTable(Zone* zone, const WasmModule* module, InterpreterCode* code)
- : map_(zone) {
- // Create a zone for all temporary objects.
- Zone control_transfer_zone(zone->allocator(), ZONE_NAME);
-
- // Represents a control flow label.
- class CLabel : public ZoneObject {
- explicit CLabel(Zone* zone, int32_t target_stack_height, uint32_t arity)
- : target_stack_height(target_stack_height),
- arity(arity),
- refs(zone) {}
-
- public:
- struct Ref {
- const byte* from_pc;
- const int32_t stack_height;
- };
- const byte* target = nullptr;
- int32_t target_stack_height;
- // Arity when branching to this label.
- const uint32_t arity;
- ZoneVector<Ref> refs;
-
- static CLabel* New(Zone* zone, int32_t stack_height, uint32_t arity) {
- return new (zone) CLabel(zone, stack_height, arity);
- }
-
- // Bind this label to the given PC.
- void Bind(const byte* pc) {
- DCHECK_NULL(target);
- target = pc;
- }
-
- // Reference this label from the given location.
- void Ref(const byte* from_pc, int32_t stack_height) {
- // Target being bound before a reference means this is a loop.
- DCHECK_IMPLIES(target, *target == kExprLoop);
- refs.push_back({from_pc, stack_height});
- }
-
- void Finish(ControlTransferMap* map, const byte* start) {
- DCHECK_NOT_NULL(target);
- for (auto ref : refs) {
- size_t offset = static_cast<size_t>(ref.from_pc - start);
- auto pcdiff = static_cast<pcdiff_t>(target - ref.from_pc);
- DCHECK_GE(ref.stack_height, target_stack_height);
- spdiff_t spdiff =
- static_cast<spdiff_t>(ref.stack_height - target_stack_height);
- TRACE("control transfer @%zu: Δpc %d, stack %u->%u = -%u\n", offset,
- pcdiff, ref.stack_height, target_stack_height, spdiff);
- ControlTransferEntry& entry = (*map)[offset];
- entry.pc_diff = pcdiff;
- entry.sp_diff = spdiff;
- entry.target_arity = arity;
- }
- }
- };
-
- // An entry in the control stack.
- struct Control {
- const byte* pc;
- CLabel* end_label;
- CLabel* else_label;
- // Arity (number of values on the stack) when exiting this control
- // structure via |end|.
- uint32_t exit_arity;
- // Track whether this block was already left, i.e. all further
- // instructions are unreachable.
- bool unreachable = false;
-
- Control(const byte* pc, CLabel* end_label, CLabel* else_label,
- uint32_t exit_arity)
- : pc(pc),
- end_label(end_label),
- else_label(else_label),
- exit_arity(exit_arity) {}
- Control(const byte* pc, CLabel* end_label, uint32_t exit_arity)
- : Control(pc, end_label, nullptr, exit_arity) {}
-
- void Finish(ControlTransferMap* map, const byte* start) {
- end_label->Finish(map, start);
- if (else_label) else_label->Finish(map, start);
- }
- };
-
- // Compute the ControlTransfer map.
- // This algorithm maintains a stack of control constructs similar to the
- // AST decoder. The {control_stack} allows matching {br,br_if,br_table}
- // bytecodes with their target, as well as determining whether the current
- // bytecodes are within the true or false block of an else.
- ZoneVector<Control> control_stack(&control_transfer_zone);
- // It also maintains a stack of all nested {try} blocks to resolve local
- // handler targets for potentially throwing operations. These exceptional
- // control transfers are treated just like other branches in the resulting
- // map. This stack contains indices into the above control stack.
- ZoneVector<size_t> exception_stack(zone);
- int32_t stack_height = 0;
- uint32_t func_arity =
- static_cast<uint32_t>(code->function->sig->return_count());
- CLabel* func_label =
- CLabel::New(&control_transfer_zone, stack_height, func_arity);
- control_stack.emplace_back(code->orig_start, func_label, func_arity);
- auto control_parent = [&]() -> Control& {
- DCHECK_LE(2, control_stack.size());
- return control_stack[control_stack.size() - 2];
- };
- auto copy_unreachable = [&] {
- control_stack.back().unreachable = control_parent().unreachable;
- };
- for (BytecodeIterator i(code->orig_start, code->orig_end, &code->locals);
- i.has_next(); i.next()) {
- WasmOpcode opcode = i.current();
- int32_t exceptional_stack_height = 0;
- if (WasmOpcodes::IsPrefixOpcode(opcode)) opcode = i.prefixed_opcode();
- bool unreachable = control_stack.back().unreachable;
- if (unreachable) {
- TRACE("@%u: %s (is unreachable)\n", i.pc_offset(),
- WasmOpcodes::OpcodeName(opcode));
- } else {
- auto stack_effect =
- StackEffect(module, code->function->sig, i.pc(), i.end());
- TRACE("@%u: %s (sp %d - %d + %d)\n", i.pc_offset(),
- WasmOpcodes::OpcodeName(opcode), stack_height, stack_effect.first,
- stack_effect.second);
- DCHECK_GE(stack_height, stack_effect.first);
- DCHECK_GE(kMaxUInt32, static_cast<uint64_t>(stack_height) -
- stack_effect.first + stack_effect.second);
- exceptional_stack_height = stack_height - stack_effect.first;
- stack_height = stack_height - stack_effect.first + stack_effect.second;
- if (stack_height > max_stack_height_) max_stack_height_ = stack_height;
- }
- if (!exception_stack.empty() && WasmOpcodes::IsThrowingOpcode(opcode)) {
- // Record exceptional control flow from potentially throwing opcodes to
- // the local handler if one is present. The stack height at the throw
- // point is assumed to have popped all operands and not pushed any yet.
- DCHECK_GE(control_stack.size() - 1, exception_stack.back());
- const Control* c = &control_stack[exception_stack.back()];
- if (!unreachable) c->else_label->Ref(i.pc(), exceptional_stack_height);
- if (exceptional_stack_height + kCatchInArity > max_stack_height_) {
- max_stack_height_ = exceptional_stack_height + kCatchInArity;
- }
- TRACE("handler @%u: %s -> try @%u\n", i.pc_offset(), OpcodeName(opcode),
- static_cast<uint32_t>(c->pc - code->start));
- }
- switch (opcode) {
- case kExprBlock:
- case kExprLoop: {
- bool is_loop = opcode == kExprLoop;
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc());
- if (imm.type == kWasmBottom) {
- imm.sig = module->signature(imm.sig_index);
- }
- TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
- is_loop ? "Loop" : "Block", imm.in_arity(), imm.out_arity());
- CLabel* label =
- CLabel::New(&control_transfer_zone, stack_height - imm.in_arity(),
- is_loop ? imm.in_arity() : imm.out_arity());
- control_stack.emplace_back(i.pc(), label, imm.out_arity());
- copy_unreachable();
- if (is_loop) label->Bind(i.pc());
- break;
- }
- case kExprIf: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc());
- if (imm.type == kWasmBottom) {
- imm.sig = module->signature(imm.sig_index);
- }
- TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
- imm.in_arity(), imm.out_arity());
- CLabel* end_label =
- CLabel::New(&control_transfer_zone, stack_height - imm.in_arity(),
- imm.out_arity());
- CLabel* else_label =
- CLabel::New(&control_transfer_zone, stack_height, 0);
- control_stack.emplace_back(i.pc(), end_label, else_label,
- imm.out_arity());
- copy_unreachable();
- if (!unreachable) else_label->Ref(i.pc(), stack_height);
- break;
- }
- case kExprElse: {
- Control* c = &control_stack.back();
- copy_unreachable();
- TRACE("control @%u: Else\n", i.pc_offset());
- if (!control_parent().unreachable) {
- c->end_label->Ref(i.pc(), stack_height);
- }
- DCHECK_NOT_NULL(c->else_label);
- c->else_label->Bind(i.pc() + 1);
- c->else_label->Finish(&map_, code->orig_start);
- stack_height = c->else_label->target_stack_height;
- c->else_label = nullptr;
- DCHECK_IMPLIES(!unreachable,
- stack_height >= c->end_label->target_stack_height);
- break;
- }
- case kExprTry: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc());
- if (imm.type == kWasmBottom) {
- imm.sig = module->signature(imm.sig_index);
- }
- TRACE("control @%u: Try, arity %d->%d\n", i.pc_offset(),
- imm.in_arity(), imm.out_arity());
- CLabel* end_label = CLabel::New(&control_transfer_zone, stack_height,
- imm.out_arity());
- CLabel* catch_label =
- CLabel::New(&control_transfer_zone, stack_height, kCatchInArity);
- control_stack.emplace_back(i.pc(), end_label, catch_label,
- imm.out_arity());
- exception_stack.push_back(control_stack.size() - 1);
- copy_unreachable();
- break;
- }
- case kExprCatch: {
- DCHECK_EQ(control_stack.size() - 1, exception_stack.back());
- Control* c = &control_stack.back();
- exception_stack.pop_back();
- copy_unreachable();
- TRACE("control @%u: Catch\n", i.pc_offset());
- if (!control_parent().unreachable) {
- c->end_label->Ref(i.pc(), stack_height);
- }
- DCHECK_NOT_NULL(c->else_label);
- c->else_label->Bind(i.pc() + 1);
- c->else_label->Finish(&map_, code->orig_start);
- c->else_label = nullptr;
- DCHECK_IMPLIES(!unreachable,
- stack_height >= c->end_label->target_stack_height);
- stack_height = c->end_label->target_stack_height + kCatchInArity;
- break;
- }
- case kExprBrOnExn: {
- BranchOnExceptionImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- uint32_t depth = imm.depth.depth; // Extracted for convenience.
- imm.index.exception = &module->exceptions[imm.index.index];
- DCHECK_EQ(0, imm.index.exception->sig->return_count());
- size_t params = imm.index.exception->sig->parameter_count();
- // Taken branches pop the exception and push the encoded values.
- int32_t height = stack_height - 1 + static_cast<int32_t>(params);
- TRACE("control @%u: BrOnExn[depth=%u]\n", i.pc_offset(), depth);
- Control* c = &control_stack[control_stack.size() - depth - 1];
- if (!unreachable) c->end_label->Ref(i.pc(), height);
- break;
- }
- case kExprEnd: {
- Control* c = &control_stack.back();
- TRACE("control @%u: End\n", i.pc_offset());
- // Only loops have bound labels.
- DCHECK_IMPLIES(c->end_label->target, *c->pc == kExprLoop);
- if (!c->end_label->target) {
- if (c->else_label) c->else_label->Bind(i.pc());
- c->end_label->Bind(i.pc() + 1);
- }
- c->Finish(&map_, code->orig_start);
- DCHECK_IMPLIES(!unreachable,
- stack_height >= c->end_label->target_stack_height);
- stack_height = c->end_label->target_stack_height + c->exit_arity;
- control_stack.pop_back();
- break;
- }
- case kExprBr: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), imm.depth);
- Control* c = &control_stack[control_stack.size() - imm.depth - 1];
- if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
- break;
- }
- case kExprBrIf: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), imm.depth);
- Control* c = &control_stack[control_stack.size() - imm.depth - 1];
- if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
- break;
- }
- case kExprBrTable: {
- BranchTableImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- BranchTableIterator<Decoder::kNoValidate> iterator(&i, imm);
- TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
- imm.table_count);
- if (!unreachable) {
- while (iterator.has_next()) {
- uint32_t j = iterator.cur_index();
- uint32_t target = iterator.next();
- Control* c = &control_stack[control_stack.size() - target - 1];
- c->end_label->Ref(i.pc() + j, stack_height);
- }
- }
- break;
- }
- default:
- break;
- }
- if (WasmOpcodes::IsUnconditionalJump(opcode)) {
- control_stack.back().unreachable = true;
- }
- }
- DCHECK_EQ(0, control_stack.size());
- DCHECK_EQ(func_arity, stack_height);
- }
-
- bool HasEntryAt(pc_t from) {
- auto result = map_.find(from);
- return result != map_.end();
- }
-
- ControlTransferEntry& Lookup(pc_t from) {
- auto result = map_.find(from);
- DCHECK(result != map_.end());
- return result->second;
- }
-};
-
-// The main storage for interpreter code. It maps {WasmFunction} to the
-// metadata needed to execute each function.
-class CodeMap {
- Zone* zone_;
- const WasmModule* module_;
- ZoneVector<InterpreterCode> interpreter_code_;
-
- public:
- CodeMap(const WasmModule* module, const uint8_t* module_start, Zone* zone)
- : zone_(zone), module_(module), interpreter_code_(zone) {
- if (module == nullptr) return;
- interpreter_code_.reserve(module->functions.size());
- for (const WasmFunction& function : module->functions) {
- if (function.imported) {
- DCHECK(!function.code.is_set());
- AddFunction(&function, nullptr, nullptr);
- } else {
- AddFunction(&function, module_start + function.code.offset(),
- module_start + function.code.end_offset());
- }
- }
- }
-
- const WasmModule* module() const { return module_; }
-
- InterpreterCode* GetCode(const WasmFunction* function) {
- InterpreterCode* code = GetCode(function->func_index);
- DCHECK_EQ(function, code->function);
- return code;
- }
-
- InterpreterCode* GetCode(uint32_t function_index) {
- DCHECK_LT(function_index, interpreter_code_.size());
- return Preprocess(&interpreter_code_[function_index]);
- }
-
- InterpreterCode* Preprocess(InterpreterCode* code) {
- DCHECK_EQ(code->function->imported, code->start == nullptr);
- if (!code->side_table && code->start) {
- // Compute the control targets map and the local declarations.
- code->side_table = new (zone_) SideTable(zone_, module_, code);
- }
- return code;
- }
-
- void AddFunction(const WasmFunction* function, const byte* code_start,
- const byte* code_end) {
- InterpreterCode code = {
- function, BodyLocalDecls(zone_), code_start,
- code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end),
- nullptr};
-
- DCHECK_EQ(interpreter_code_.size(), function->func_index);
- interpreter_code_.push_back(code);
- }
-
- void SetFunctionCode(const WasmFunction* function, const byte* start,
- const byte* end) {
- DCHECK_LT(function->func_index, interpreter_code_.size());
- InterpreterCode* code = &interpreter_code_[function->func_index];
- DCHECK_EQ(function, code->function);
- code->orig_start = start;
- code->orig_end = end;
- code->start = const_cast<byte*>(start);
- code->end = const_cast<byte*>(end);
- code->side_table = nullptr;
- Preprocess(code);
- }
-};
-
-namespace {
-
-struct ExternalCallResult {
- enum Type {
- // The function should be executed inside this interpreter.
- INTERNAL,
- // For indirect calls: Table or function does not exist.
- INVALID_FUNC,
- // For indirect calls: Signature does not match expected signature.
- SIGNATURE_MISMATCH,
- // The function was executed and returned normally.
- EXTERNAL_RETURNED,
- // The function was executed, threw an exception, and the stack was unwound.
- EXTERNAL_UNWOUND,
- // The function was executed and threw an exception that was locally caught.
- EXTERNAL_CAUGHT
- };
- Type type;
- // If type is INTERNAL, this field holds the function to call internally.
- InterpreterCode* interpreter_code;
-
- ExternalCallResult(Type type) : type(type) { // NOLINT
- DCHECK_NE(INTERNAL, type);
- }
- ExternalCallResult(Type type, InterpreterCode* code)
- : type(type), interpreter_code(code) {
- DCHECK_EQ(INTERNAL, type);
- }
-};
-
-// Like a static_cast from src to dst, but specialized for boxed floats.
-template <typename dst, typename src>
-struct converter {
- dst operator()(src val) const { return static_cast<dst>(val); }
-};
-template <>
-struct converter<Float64, uint64_t> {
- Float64 operator()(uint64_t val) const { return Float64::FromBits(val); }
-};
-template <>
-struct converter<Float32, uint32_t> {
- Float32 operator()(uint32_t val) const { return Float32::FromBits(val); }
-};
-template <>
-struct converter<uint64_t, Float64> {
- uint64_t operator()(Float64 val) const { return val.get_bits(); }
-};
-template <>
-struct converter<uint32_t, Float32> {
- uint32_t operator()(Float32 val) const { return val.get_bits(); }
-};
-
-template <typename T>
-V8_INLINE bool has_nondeterminism(T val) {
- static_assert(!std::is_floating_point<T>::value, "missing specialization");
- return false;
-}
-template <>
-V8_INLINE bool has_nondeterminism<float>(float val) {
- return std::isnan(val);
-}
-template <>
-V8_INLINE bool has_nondeterminism<double>(double val) {
- return std::isnan(val);
-}
-
-} // namespace
-
-// Responsible for executing code directly.
-class ThreadImpl {
- struct Activation {
- uint32_t fp;
- sp_t sp;
- Activation(uint32_t fp, sp_t sp) : fp(fp), sp(sp) {}
- };
-
- public:
- // The {ReferenceStackScope} sets up the reference stack in the interpreter.
- // The handle to the reference stack has to be re-initialized everytime we
- // call into the interpreter because there is no HandleScope that could
- // contain that handle. A global handle is not an option because it can lead
- // to a memory leak if a reference to the {WasmInstanceObject} is put onto the
- // reference stack and thereby transitively keeps the interpreter alive.
- class ReferenceStackScope {
- public:
- explicit ReferenceStackScope(ThreadImpl* impl) : impl_(impl) {
- // The reference stack is already initialized, we don't have to do
- // anything.
- if (!impl_->reference_stack_cell_.is_null()) return;
- impl_->reference_stack_cell_ = handle(
- impl_->instance_object_->debug_info().interpreter_reference_stack(),
- impl_->isolate_);
- // We initialized the reference stack, so we also have to reset it later.
- do_reset_stack_ = true;
- }
-
- ~ReferenceStackScope() {
- if (do_reset_stack_) {
- impl_->reference_stack_cell_ = Handle<Cell>();
- }
- }
-
- private:
- ThreadImpl* impl_;
- bool do_reset_stack_ = false;
- };
-
- ThreadImpl(Zone* zone, CodeMap* codemap,
- Handle<WasmInstanceObject> instance_object)
- : codemap_(codemap),
- isolate_(instance_object->GetIsolate()),
- instance_object_(instance_object),
- frames_(zone),
- activations_(zone) {}
-
- //==========================================================================
- // Implementation of public interface for WasmInterpreter::Thread.
- //==========================================================================
-
- WasmInterpreter::State state() { return state_; }
-
- void InitFrame(const WasmFunction* function, WasmValue* args) {
- DCHECK_EQ(current_activation().fp, frames_.size());
- InterpreterCode* code = codemap()->GetCode(function);
- size_t num_params = function->sig->parameter_count();
- EnsureStackSpace(num_params);
- Push(args, num_params);
- PushFrame(code);
- }
-
- WasmInterpreter::State Run(int num_steps = -1) {
- DCHECK(state_ == WasmInterpreter::STOPPED ||
- state_ == WasmInterpreter::PAUSED);
- DCHECK(num_steps == -1 || num_steps > 0);
- if (num_steps == -1) {
- TRACE(" => Run()\n");
- } else if (num_steps == 1) {
- TRACE(" => Step()\n");
- } else {
- TRACE(" => Run(%d)\n", num_steps);
- }
- state_ = WasmInterpreter::RUNNING;
- Execute(frames_.back().code, frames_.back().pc, num_steps);
- // If state_ is STOPPED, the current activation must be fully unwound.
- DCHECK_IMPLIES(state_ == WasmInterpreter::STOPPED,
- current_activation().fp == frames_.size());
- return state_;
- }
-
- void Pause() { UNIMPLEMENTED(); }
-
- void Reset() {
- TRACE("----- RESET -----\n");
- ResetStack(0);
- frames_.clear();
- state_ = WasmInterpreter::STOPPED;
- trap_reason_ = kTrapCount;
- possible_nondeterminism_ = false;
- }
-
- int GetFrameCount() {
- DCHECK_GE(kMaxInt, frames_.size());
- return static_cast<int>(frames_.size());
- }
-
- WasmValue GetReturnValue(uint32_t index) {
- if (state_ == WasmInterpreter::TRAPPED) return WasmValue(0xDEADBEEF);
- DCHECK_EQ(WasmInterpreter::FINISHED, state_);
- Activation act = current_activation();
- // Current activation must be finished.
- DCHECK_EQ(act.fp, frames_.size());
- return GetStackValue(act.sp + index);
- }
-
- WasmValue GetStackValue(sp_t index) {
- DCHECK_GT(StackHeight(), index);
- return stack_[index].ExtractValue(this, index);
- }
-
- void SetStackValue(sp_t index, WasmValue value) {
- DCHECK_GT(StackHeight(), index);
- stack_[index] = StackValue(value, this, index);
- }
-
- TrapReason GetTrapReason() { return trap_reason_; }
-
- pc_t GetBreakpointPc() { return break_pc_; }
-
- bool PossibleNondeterminism() { return possible_nondeterminism_; }
-
- uint64_t NumInterpretedCalls() { return num_interpreted_calls_; }
-
- void AddBreakFlags(uint8_t flags) { break_flags_ |= flags; }
-
- void ClearBreakFlags() { break_flags_ = WasmInterpreter::BreakFlag::None; }
-
- Handle<Cell> reference_stack_cell() const { return reference_stack_cell_; }
-
- uint32_t NumActivations() {
- return static_cast<uint32_t>(activations_.size());
- }
-
- uint32_t StartActivation() {
- TRACE("----- START ACTIVATION %zu -----\n", activations_.size());
- // If you use activations, use them consistently:
- DCHECK_IMPLIES(activations_.empty(), frames_.empty());
- DCHECK_IMPLIES(activations_.empty(), StackHeight() == 0);
- uint32_t activation_id = static_cast<uint32_t>(activations_.size());
- activations_.emplace_back(static_cast<uint32_t>(frames_.size()),
- StackHeight());
- state_ = WasmInterpreter::STOPPED;
- return activation_id;
- }
-
- void FinishActivation(uint32_t id) {
- TRACE("----- FINISH ACTIVATION %zu -----\n", activations_.size() - 1);
- DCHECK_LT(0, activations_.size());
- DCHECK_EQ(activations_.size() - 1, id);
- // Stack height must match the start of this activation (otherwise unwind
- // first).
- DCHECK_EQ(activations_.back().fp, frames_.size());
- DCHECK_LE(activations_.back().sp, StackHeight());
- ResetStack(activations_.back().sp);
- activations_.pop_back();
- }
-
- uint32_t ActivationFrameBase(uint32_t id) {
- DCHECK_GT(activations_.size(), id);
- return activations_[id].fp;
- }
-
- WasmInterpreter::Thread::ExceptionHandlingResult RaiseException(
- Isolate* isolate, Handle<Object> exception) {
- DCHECK_EQ(WasmInterpreter::TRAPPED, state_);
- isolate->Throw(*exception); // Will check that none is pending.
- if (HandleException(isolate) == WasmInterpreter::Thread::UNWOUND) {
- DCHECK_EQ(WasmInterpreter::STOPPED, state_);
- return WasmInterpreter::Thread::UNWOUND;
- }
- state_ = WasmInterpreter::PAUSED;
- return WasmInterpreter::Thread::HANDLED;
- }
-
- private:
- // Handle a thrown exception. Returns whether the exception was handled inside
- // the current activation. Unwinds the interpreted stack accordingly.
- WasmInterpreter::Thread::ExceptionHandlingResult HandleException(
- Isolate* isolate) {
- DCHECK(isolate->has_pending_exception());
- bool catchable =
- isolate->is_catchable_by_wasm(isolate->pending_exception());
- DCHECK_LT(0, activations_.size());
- Activation& act = activations_.back();
- while (frames_.size() > act.fp) {
- Frame& frame = frames_.back();
- InterpreterCode* code = frame.code;
- if (catchable && code->side_table->HasEntryAt(frame.pc)) {
- TRACE("----- HANDLE -----\n");
- Push(WasmValue(handle(isolate->pending_exception(), isolate)));
- isolate->clear_pending_exception();
- frame.pc += JumpToHandlerDelta(code, frame.pc);
- TRACE(" => handler #%zu (#%u @%zu)\n", frames_.size() - 1,
- code->function->func_index, frame.pc);
- return WasmInterpreter::Thread::HANDLED;
- }
- TRACE(" => drop frame #%zu (#%u @%zu)\n", frames_.size() - 1,
- code->function->func_index, frame.pc);
- ResetStack(frame.sp);
- frames_.pop_back();
- }
- TRACE("----- UNWIND -----\n");
- DCHECK_EQ(act.fp, frames_.size());
- DCHECK_EQ(act.sp, StackHeight());
- state_ = WasmInterpreter::STOPPED;
- return WasmInterpreter::Thread::UNWOUND;
- }
-
- // Entries on the stack of functions being evaluated.
- struct Frame {
- InterpreterCode* code;
- pc_t pc;
- sp_t sp;
-
- // Limit of parameters.
- sp_t plimit() { return sp + code->function->sig->parameter_count(); }
- // Limit of locals.
- sp_t llimit() { return plimit() + code->locals.type_list.size(); }
- };
-
- // Safety wrapper for values on the operand stack represented as {WasmValue}.
- // Most values are stored directly on the stack, only reference values are
- // kept in a separate on-heap reference stack to make the GC trace them.
- // TODO(wasm): Optimize simple stack operations (like "get_local",
- // "set_local", and "tee_local") so that they don't require a handle scope.
- // TODO(wasm): Consider optimizing activations that use no reference
- // values to avoid allocating the reference stack entirely.
- class StackValue {
- public:
- StackValue() = default; // Only needed for resizing the stack.
- StackValue(WasmValue v, ThreadImpl* thread, sp_t index) : value_(v) {
- if (IsReferenceValue()) {
- value_ = WasmValue(Handle<Object>::null());
- int ref_index = static_cast<int>(index);
- thread->reference_stack().set(ref_index, *v.to_anyref());
- }
- }
-
- WasmValue ExtractValue(ThreadImpl* thread, sp_t index) {
- if (!IsReferenceValue()) return value_;
- DCHECK(value_.to_anyref().is_null());
- int ref_index = static_cast<int>(index);
- Isolate* isolate = thread->isolate_;
- Handle<Object> ref(thread->reference_stack().get(ref_index), isolate);
- DCHECK(!ref->IsTheHole(isolate));
- return WasmValue(ref);
- }
-
- bool IsReferenceValue() const { return value_.type() == kWasmAnyRef; }
-
- void ClearValue(ThreadImpl* thread, sp_t index) {
- if (!IsReferenceValue()) return;
- int ref_index = static_cast<int>(index);
- Isolate* isolate = thread->isolate_;
- thread->reference_stack().set_the_hole(isolate, ref_index);
- }
-
- static void ClearValues(ThreadImpl* thread, sp_t index, int count) {
- int ref_index = static_cast<int>(index);
- thread->reference_stack().FillWithHoles(ref_index, ref_index + count);
- }
-
- static bool IsClearedValue(ThreadImpl* thread, sp_t index) {
- int ref_index = static_cast<int>(index);
- Isolate* isolate = thread->isolate_;
- return thread->reference_stack().is_the_hole(isolate, ref_index);
- }
-
- private:
- WasmValue value_;
- };
-
- friend class InterpretedFrameImpl;
- friend class ReferenceStackScope;
-
- CodeMap* codemap_;
- Isolate* isolate_;
- Handle<WasmInstanceObject> instance_object_;
- std::unique_ptr<StackValue[]> stack_;
- StackValue* stack_limit_ = nullptr; // End of allocated stack space.
- StackValue* sp_ = nullptr; // Current stack pointer.
- // The reference stack is pointed to by a {Cell} to be able to replace the
- // underlying {FixedArray} when growing the stack. This avoids having to
- // recreate or update the global handle keeping this object alive.
- Handle<Cell> reference_stack_cell_; // References are on an on-heap stack.
- ZoneVector<Frame> frames_;
- WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
- pc_t break_pc_ = kInvalidPc;
- TrapReason trap_reason_ = kTrapCount;
- bool possible_nondeterminism_ = false;
- uint8_t break_flags_ = 0; // a combination of WasmInterpreter::BreakFlag
- uint64_t num_interpreted_calls_ = 0;
- // Store the stack height of each activation (for unwind and frame
- // inspection).
- ZoneVector<Activation> activations_;
-
- CodeMap* codemap() const { return codemap_; }
- const WasmModule* module() const { return codemap_->module(); }
- FixedArray reference_stack() const {
- return FixedArray::cast(reference_stack_cell_->value());
- }
-
- void DoTrap(TrapReason trap, pc_t pc) {
- TRACE("TRAP: %s\n", WasmOpcodes::TrapReasonMessage(trap));
- state_ = WasmInterpreter::TRAPPED;
- trap_reason_ = trap;
- CommitPc(pc);
- }
-
- // Check if there is room for a function's activation.
- void EnsureStackSpaceForCall(InterpreterCode* code) {
- EnsureStackSpace(code->side_table->max_stack_height_ +
- code->locals.type_list.size());
- DCHECK_GE(StackHeight(), code->function->sig->parameter_count());
- }
-
- // Push a frame with arguments already on the stack.
- void PushFrame(InterpreterCode* code) {
- DCHECK_NOT_NULL(code);
- DCHECK_NOT_NULL(code->side_table);
- EnsureStackSpaceForCall(code);
-
- ++num_interpreted_calls_;
- size_t arity = code->function->sig->parameter_count();
- // The parameters will overlap the arguments already on the stack.
- DCHECK_GE(StackHeight(), arity);
-
- frames_.push_back({code, 0, StackHeight() - arity});
- frames_.back().pc = InitLocals(code);
- TRACE(" => PushFrame #%zu (#%u @%zu)\n", frames_.size() - 1,
- code->function->func_index, frames_.back().pc);
- }
-
- pc_t InitLocals(InterpreterCode* code) {
- for (ValueType p : code->locals.type_list) {
- WasmValue val;
- switch (p.kind()) {
-#define CASE_TYPE(valuetype, ctype) \
- case ValueType::valuetype: \
- val = WasmValue(ctype{}); \
- break;
- FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
-#undef CASE_TYPE
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kEqRef: {
- val = WasmValue(isolate_->factory()->null_value());
- break;
- }
- case ValueType::kStmt:
- case ValueType::kBottom:
- UNREACHABLE();
- break;
- }
- Push(val);
- }
- return code->locals.encoded_size;
- }
-
- void CommitPc(pc_t pc) {
- DCHECK(!frames_.empty());
- frames_.back().pc = pc;
- }
-
- bool SkipBreakpoint(InterpreterCode* code, pc_t pc) {
- if (pc == break_pc_) {
- // Skip the previously hit breakpoint when resuming.
- break_pc_ = kInvalidPc;
- return true;
- }
- return false;
- }
-
- void ReloadFromFrameOnException(Decoder* decoder, InterpreterCode** code,
- pc_t* pc, pc_t* limit) {
- Frame* top = &frames_.back();
- *code = top->code;
- *pc = top->pc;
- *limit = top->code->end - top->code->start;
- decoder->Reset(top->code->start, top->code->end);
- }
-
- int LookupTargetDelta(InterpreterCode* code, pc_t pc) {
- return static_cast<int>(code->side_table->Lookup(pc).pc_diff);
- }
-
- int JumpToHandlerDelta(InterpreterCode* code, pc_t pc) {
- ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
- DoStackTransfer(control_transfer_entry.sp_diff + kCatchInArity,
- control_transfer_entry.target_arity);
- return control_transfer_entry.pc_diff;
- }
-
- int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
- ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
- DoStackTransfer(control_transfer_entry.sp_diff,
- control_transfer_entry.target_arity);
- return control_transfer_entry.pc_diff;
- }
-
- pc_t ReturnPc(Decoder* decoder, InterpreterCode* code, pc_t pc) {
- switch (code->orig_start[pc]) {
- case kExprCallFunction: {
- CallFunctionImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- return pc + 1 + imm.length;
- }
- case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(),
- decoder, code->at(pc));
- return pc + 1 + imm.length;
- }
- default:
- UNREACHABLE();
- }
- }
-
- bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
- size_t arity) {
- DCHECK_GT(frames_.size(), 0);
- spdiff_t sp_diff = static_cast<spdiff_t>(StackHeight() - frames_.back().sp);
- frames_.pop_back();
- if (frames_.size() == current_activation().fp) {
- // A return from the last frame terminates the execution.
- state_ = WasmInterpreter::FINISHED;
- DoStackTransfer(sp_diff, arity);
- TRACE(" => finish\n");
- return false;
- } else {
- // Return to caller frame.
- Frame* top = &frames_.back();
- *code = top->code;
- decoder->Reset((*code)->start, (*code)->end);
- *pc = ReturnPc(decoder, *code, top->pc);
- *limit = top->code->end - top->code->start;
- TRACE(" => Return to #%zu (#%u @%zu)\n", frames_.size() - 1,
- (*code)->function->func_index, *pc);
- DoStackTransfer(sp_diff, arity);
- return true;
- }
- }
-
- // Returns true if the call was successful, false if the stack check failed
- // and the current activation was fully unwound.
- bool DoCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
- pc_t* limit) V8_WARN_UNUSED_RESULT {
- frames_.back().pc = *pc;
- PushFrame(target);
- if (!DoStackCheck()) return false;
- *pc = frames_.back().pc;
- *limit = target->end - target->start;
- decoder->Reset(target->start, target->end);
- return true;
- }
-
- // Returns true if the tail call was successful, false if the stack check
- // failed.
- bool DoReturnCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
- pc_t* limit) V8_WARN_UNUSED_RESULT {
- DCHECK_NOT_NULL(target);
- DCHECK_NOT_NULL(target->side_table);
- EnsureStackSpaceForCall(target);
-
- ++num_interpreted_calls_;
-
- Frame* top = &frames_.back();
-
- // Drop everything except current parameters.
- spdiff_t sp_diff = static_cast<spdiff_t>(StackHeight() - top->sp);
- size_t arity = target->function->sig->parameter_count();
-
- DoStackTransfer(sp_diff, arity);
-
- *limit = target->end - target->start;
- decoder->Reset(target->start, target->end);
-
- // Rebuild current frame to look like a call to callee.
- top->code = target;
- top->pc = 0;
- top->sp = StackHeight() - arity;
- top->pc = InitLocals(target);
-
- *pc = top->pc;
-
- TRACE(" => ReturnCall #%zu (#%u @%zu)\n", frames_.size() - 1,
- target->function->func_index, top->pc);
-
- return true;
- }
-
- // Copies {arity} values on the top of the stack down the stack while also
- // dropping {sp_diff} many stack values in total from the stack.
- void DoStackTransfer(spdiff_t sp_diff, size_t arity) {
- // before: |---------------| pop_count | arity |
- // ^ 0 ^ dest ^ src ^ StackHeight()
- // ^----< sp_diff >----^
- //
- // after: |---------------| arity |
- // ^ 0 ^ StackHeight()
- sp_t stack_height = StackHeight();
- sp_t dest = stack_height - sp_diff;
- sp_t src = stack_height - arity;
- DCHECK_LE(dest, stack_height);
- DCHECK_LE(dest, src);
- if (arity && (dest != src)) {
- StackValue* stack = stack_.get();
- memmove(stack + dest, stack + src, arity * sizeof(StackValue));
- // Also move elements on the reference stack accordingly.
- reference_stack().MoveElements(
- isolate_, static_cast<int>(dest), static_cast<int>(src),
- static_cast<int>(arity), UPDATE_WRITE_BARRIER);
- }
- ResetStack(dest + arity);
- }
-
- inline Address EffectiveAddress(uint32_t index) {
- // Compute the effective address of the access, making sure to condition
- // the index even in the in-bounds case.
- return reinterpret_cast<Address>(instance_object_->memory_start()) +
- (index & instance_object_->memory_mask());
- }
-
- template <typename mtype>
- inline Address BoundsCheckMem(uint32_t offset, uint32_t index) {
- uint32_t effective_index = offset + index;
- if (effective_index < index) {
- return kNullAddress; // wraparound => oob
- }
- if (!base::IsInBounds(effective_index, sizeof(mtype),
- instance_object_->memory_size())) {
- return kNullAddress; // oob
- }
- return EffectiveAddress(effective_index);
- }
-
- inline bool BoundsCheckMemRange(uint32_t index, uint32_t* size,
- Address* out_address) {
- bool ok = base::ClampToBounds(
- index, size, static_cast<uint32_t>(instance_object_->memory_size()));
- *out_address = EffectiveAddress(index);
- return ok;
- }
-
- template <typename ctype, typename mtype>
- bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int* const len, MachineRepresentation rep,
- int prefix_len = 0) {
- // Some opcodes have a prefix byte, and MemoryAccessImmediate assumes that
- // the memarg is 1 byte from pc. We don't increment pc at the caller,
- // because we want to keep pc to the start of the operation to keep trap
- // reporting and tracing accurate, otherwise those will report at the middle
- // of an opcode.
- MemoryAccessImmediate<Decoder::kNoValidate> imm(
- decoder, code->at(pc + prefix_len), sizeof(ctype));
- uint32_t index = Pop().to<uint32_t>();
- Address addr = BoundsCheckMem<mtype>(imm.offset, index);
- if (!addr) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
- WasmValue result(
- converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
-
- Push(result);
- *len += imm.length;
-
- if (FLAG_trace_wasm_memory) {
- MemoryTracingInfo info(imm.offset + index, false, rep);
- TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
- code->function->func_index, static_cast<int>(pc),
- instance_object_->memory_start());
- }
-
- return true;
- }
-
- template <typename ctype, typename mtype>
- bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int* const len, MachineRepresentation rep,
- int prefix_len = 0) {
- // Some opcodes have a prefix byte, and MemoryAccessImmediate assumes that
- // the memarg is 1 byte from pc. We don't increment pc at the caller,
- // because we want to keep pc to the start of the operation to keep trap
- // reporting and tracing accurate, otherwise those will report at the middle
- // of an opcode.
- MemoryAccessImmediate<Decoder::kNoValidate> imm(
- decoder, code->at(pc + prefix_len), sizeof(ctype));
- ctype val = Pop().to<ctype>();
-
- uint32_t index = Pop().to<uint32_t>();
- Address addr = BoundsCheckMem<mtype>(imm.offset, index);
- if (!addr) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
- WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
- *len += imm.length;
-
- if (FLAG_trace_wasm_memory) {
- MemoryTracingInfo info(imm.offset + index, true, rep);
- TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
- code->function->func_index, static_cast<int>(pc),
- instance_object_->memory_start());
- }
-
- return true;
- }
-
- template <typename type, typename op_type>
- bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
- Address* address, pc_t pc, int* const len,
- type* val = nullptr, type* val2 = nullptr) {
- MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 1),
- sizeof(type));
- if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
- if (val) *val = static_cast<type>(Pop().to<op_type>());
- uint32_t index = Pop().to<uint32_t>();
- *address = BoundsCheckMem<type>(imm.offset, index);
- if (!*address) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
- if (!IsAligned(*address, sizeof(type))) {
- DoTrap(kTrapUnalignedAccess, pc);
- return false;
- }
- *len += imm.length;
- return true;
- }
-
- template <typename type>
- bool ExtractAtomicWaitNotifyParams(Decoder* decoder, InterpreterCode* code,
- pc_t pc, int* const len,
- uint32_t* buffer_offset, type* val,
- int64_t* timeout = nullptr) {
- MemoryAccessImmediate<Decoder::kValidate> imm(decoder, code->at(pc + 1),
- sizeof(type));
- if (timeout) {
- *timeout = Pop().to<int64_t>();
- }
- *val = Pop().to<type>();
- auto index = Pop().to<uint32_t>();
- // Check bounds.
- Address address = BoundsCheckMem<uint32_t>(imm.offset, index);
- *buffer_offset = index + imm.offset;
- if (!address) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
- // Check alignment.
- const uint32_t align_mask = sizeof(type) - 1;
- if ((*buffer_offset & align_mask) != 0) {
- DoTrap(kTrapUnalignedAccess, pc);
- return false;
- }
- *len += imm.length;
- return true;
- }
-
- bool ExecuteNumericOp(WasmOpcode opcode, Decoder* decoder,
- InterpreterCode* code, pc_t pc, int* const len) {
- switch (opcode) {
- case kExprI32SConvertSatF32:
- Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
- return true;
- case kExprI32UConvertSatF32:
- Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<float>())));
- return true;
- case kExprI32SConvertSatF64:
- Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<double>())));
- return true;
- case kExprI32UConvertSatF64:
- Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
- return true;
- case kExprI64SConvertSatF32:
- Push(WasmValue(ExecuteI64SConvertSatF32(Pop().to<float>())));
- return true;
- case kExprI64UConvertSatF32:
- Push(WasmValue(ExecuteI64UConvertSatF32(Pop().to<float>())));
- return true;
- case kExprI64SConvertSatF64:
- Push(WasmValue(ExecuteI64SConvertSatF64(Pop().to<double>())));
- return true;
- case kExprI64UConvertSatF64:
- Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
- return true;
- case kExprMemoryInit: {
- MemoryInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- // The data segment index must be in bounds since it is required by
- // validation.
- DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
- *len += imm.length;
- auto size = Pop().to<uint32_t>();
- auto src = Pop().to<uint32_t>();
- auto dst = Pop().to<uint32_t>();
- Address dst_addr;
- auto src_max =
- instance_object_->data_segment_sizes()[imm.data_segment_index];
- if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
- !base::IsInBounds(src, size, src_max)) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
- Address src_addr =
- instance_object_->data_segment_starts()[imm.data_segment_index] +
- src;
- std::memmove(reinterpret_cast<void*>(dst_addr),
- reinterpret_cast<void*>(src_addr), size);
- return true;
- }
- case kExprDataDrop: {
- DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- // The data segment index must be in bounds since it is required by
- // validation.
- DCHECK_LT(imm.index, module()->num_declared_data_segments);
- *len += imm.length;
- instance_object_->data_segment_sizes()[imm.index] = 0;
- return true;
- }
- case kExprMemoryCopy: {
- MemoryCopyImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- *len += imm.length;
- auto size = Pop().to<uint32_t>();
- auto src = Pop().to<uint32_t>();
- auto dst = Pop().to<uint32_t>();
- Address dst_addr;
- Address src_addr;
- if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
- !BoundsCheckMemRange(src, &size, &src_addr)) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
-
- std::memmove(reinterpret_cast<void*>(dst_addr),
- reinterpret_cast<void*>(src_addr), size);
- return true;
- }
- case kExprMemoryFill: {
- MemoryIndexImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 1));
- *len += imm.length;
- auto size = Pop().to<uint32_t>();
- auto value = Pop().to<uint32_t>();
- auto dst = Pop().to<uint32_t>();
- Address dst_addr;
- bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
- if (!ok) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
- std::memset(reinterpret_cast<void*>(dst_addr), value, size);
- return true;
- }
- case kExprTableInit: {
- TableInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- *len += imm.length;
- auto size = Pop().to<uint32_t>();
- auto src = Pop().to<uint32_t>();
- auto dst = Pop().to<uint32_t>();
- HandleScope scope(isolate_); // Avoid leaking handles.
- bool ok = WasmInstanceObject::InitTableEntries(
- instance_object_->GetIsolate(), instance_object_, imm.table.index,
- imm.elem_segment_index, dst, src, size);
- if (!ok) DoTrap(kTrapTableOutOfBounds, pc);
- return ok;
- }
- case kExprElemDrop: {
- ElemDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- *len += imm.length;
- instance_object_->dropped_elem_segments()[imm.index] = 1;
- return true;
- }
- case kExprTableCopy: {
- TableCopyImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- auto size = Pop().to<uint32_t>();
- auto src = Pop().to<uint32_t>();
- auto dst = Pop().to<uint32_t>();
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- bool ok = WasmInstanceObject::CopyTableEntries(
- isolate_, instance_object_, imm.table_dst.index,
- imm.table_src.index, dst, src, size);
- if (!ok) DoTrap(kTrapTableOutOfBounds, pc);
- *len += imm.length;
- return ok;
- }
- case kExprTableGrow: {
- TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 1));
- HandleScope handle_scope(isolate_);
- auto table = handle(
- WasmTableObject::cast(instance_object_->tables().get(imm.index)),
- isolate_);
- auto delta = Pop().to<uint32_t>();
- auto value = Pop().to_anyref();
- int32_t result = WasmTableObject::Grow(isolate_, table, delta, value);
- Push(WasmValue(result));
- *len += imm.length;
- return true;
- }
- case kExprTableSize: {
- TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 1));
- HandleScope handle_scope(isolate_);
- auto table = handle(
- WasmTableObject::cast(instance_object_->tables().get(imm.index)),
- isolate_);
- uint32_t table_size = table->current_length();
- Push(WasmValue(table_size));
- *len += imm.length;
- return true;
- }
- case kExprTableFill: {
- TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 1));
- HandleScope handle_scope(isolate_);
- auto count = Pop().to<uint32_t>();
- auto value = Pop().to_anyref();
- auto start = Pop().to<uint32_t>();
-
- auto table = handle(
- WasmTableObject::cast(instance_object_->tables().get(imm.index)),
- isolate_);
- uint32_t table_size = table->current_length();
- if (start > table_size) {
- DoTrap(kTrapTableOutOfBounds, pc);
- return false;
- }
-
- // Even when table.fill goes out-of-bounds, as many entries as possible
- // are put into the table. Only afterwards we trap.
- uint32_t fill_count = std::min(count, table_size - start);
- if (fill_count < count) {
- DoTrap(kTrapTableOutOfBounds, pc);
- return false;
- }
- WasmTableObject::Fill(isolate_, table, start, value, fill_count);
-
- *len += imm.length;
- return true;
- }
- default:
- FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
- OpcodeName(code->start[pc]));
- UNREACHABLE();
- }
- return false;
- }
-
- template <typename type, typename op_type, typename func>
- op_type ExecuteAtomicBinopBE(type val, Address addr, func op) {
- type old_val;
- type new_val;
- old_val = ReadUnalignedValue<type>(addr);
- do {
- new_val =
- ByteReverse(static_cast<type>(op(ByteReverse<type>(old_val), val)));
- } while (!(std::atomic_compare_exchange_strong(
- reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val)));
- return static_cast<op_type>(ByteReverse<type>(old_val));
- }
-
- template <typename type>
- type AdjustByteOrder(type param) {
-#if V8_TARGET_BIG_ENDIAN
- return ByteReverse(param);
-#else
- return param;
-#endif
- }
-
- bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
- InterpreterCode* code, pc_t pc, int* const len) {
-#if V8_TARGET_BIG_ENDIAN
- constexpr bool kBigEndian = true;
-#else
- constexpr bool kBigEndian = false;
-#endif
- WasmValue result;
- switch (opcode) {
-#define ATOMIC_BINOP_CASE(name, type, op_type, operation, op) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- op_type result; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
- &val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- if (kBigEndian) { \
- auto oplambda = [](type a, type b) { return a op b; }; \
- result = ExecuteAtomicBinopBE<type, op_type>(val, addr, oplambda); \
- } else { \
- result = static_cast<op_type>( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
- } \
- Push(WasmValue(result)); \
- break; \
- }
- ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add, +);
- ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add, +);
- ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, uint32_t, atomic_fetch_add,
- +);
- ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, uint32_t, atomic_fetch_sub, -);
- ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, uint32_t, atomic_fetch_sub, -);
- ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, uint32_t, atomic_fetch_sub,
- -);
- ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, uint32_t, atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, uint32_t, atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, uint32_t,
- atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, uint32_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, uint32_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, uint32_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, uint32_t, atomic_fetch_xor, ^);
- ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, uint32_t, atomic_fetch_xor, ^);
- ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, uint32_t, atomic_fetch_xor,
- ^);
- ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, uint32_t, atomic_exchange,
- =);
- ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, uint32_t, atomic_exchange,
- =);
- ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, uint32_t,
- atomic_exchange, =);
- ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add, +);
- ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add, +);
- ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add,
- +);
- ATOMIC_BINOP_CASE(I64AtomicAdd32U, uint32_t, uint64_t, atomic_fetch_add,
- +);
- ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub, -);
- ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub, -);
- ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub,
- -);
- ATOMIC_BINOP_CASE(I64AtomicSub32U, uint32_t, uint64_t, atomic_fetch_sub,
- -);
- ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t,
- atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I64AtomicAnd32U, uint32_t, uint64_t,
- atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I64AtomicOr32U, uint32_t, uint64_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor, ^);
- ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor, ^);
- ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor,
- ^);
- ATOMIC_BINOP_CASE(I64AtomicXor32U, uint32_t, uint64_t, atomic_fetch_xor,
- ^);
- ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange,
- =);
- ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t, atomic_exchange,
- =);
- ATOMIC_BINOP_CASE(I64AtomicExchange16U, uint16_t, uint64_t,
- atomic_exchange, =);
- ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
- atomic_exchange, =);
-#undef ATOMIC_BINOP_CASE
-#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
- case kExpr##name: { \
- type old_val; \
- type new_val; \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
- &old_val, &new_val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- old_val = AdjustByteOrder<type>(old_val); \
- new_val = AdjustByteOrder<type>(new_val); \
- std::atomic_compare_exchange_strong( \
- reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val); \
- Push(WasmValue(static_cast<op_type>(AdjustByteOrder<type>(old_val)))); \
- break; \
- }
- ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
- uint32_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t,
- uint32_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t,
- uint32_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange, uint64_t,
- uint64_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange8U, uint8_t,
- uint64_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange16U, uint16_t,
- uint64_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U, uint32_t,
- uint64_t);
-#undef ATOMIC_COMPARE_EXCHANGE_CASE
-#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
- case kExpr##name: { \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, \
- len)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr))))); \
- Push(result); \
- break; \
- }
- ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, uint32_t, atomic_load);
- ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, uint32_t, atomic_load);
- ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, uint32_t, atomic_load);
- ATOMIC_LOAD_CASE(I64AtomicLoad, uint64_t, uint64_t, atomic_load);
- ATOMIC_LOAD_CASE(I64AtomicLoad8U, uint8_t, uint64_t, atomic_load);
- ATOMIC_LOAD_CASE(I64AtomicLoad16U, uint16_t, uint64_t, atomic_load);
- ATOMIC_LOAD_CASE(I64AtomicLoad32U, uint32_t, uint64_t, atomic_load);
-#undef ATOMIC_LOAD_CASE
-#define ATOMIC_STORE_CASE(name, type, op_type, operation) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
- &val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), \
- AdjustByteOrder<type>(val)); \
- break; \
- }
- ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
- ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, uint32_t, atomic_store);
- ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, uint32_t, atomic_store);
- ATOMIC_STORE_CASE(I64AtomicStore, uint64_t, uint64_t, atomic_store);
- ATOMIC_STORE_CASE(I64AtomicStore8U, uint8_t, uint64_t, atomic_store);
- ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
- ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
-#undef ATOMIC_STORE_CASE
- case kExprAtomicFence:
- std::atomic_thread_fence(std::memory_order_seq_cst);
- *len += 1;
- break;
- case kExprI32AtomicWait: {
- int32_t val;
- int64_t timeout;
- uint32_t buffer_offset;
- if (!ExtractAtomicWaitNotifyParams<int32_t>(
- decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
- return false;
- }
- HandleScope handle_scope(isolate_);
- Handle<JSArrayBuffer> array_buffer(
- instance_object_->memory_object().array_buffer(), isolate_);
- auto result = FutexEmulation::WaitWasm32(isolate_, array_buffer,
- buffer_offset, val, timeout);
- Push(WasmValue(result.ToSmi().value()));
- break;
- }
- case kExprI64AtomicWait: {
- int64_t val;
- int64_t timeout;
- uint32_t buffer_offset;
- if (!ExtractAtomicWaitNotifyParams<int64_t>(
- decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
- return false;
- }
- HandleScope handle_scope(isolate_);
- Handle<JSArrayBuffer> array_buffer(
- instance_object_->memory_object().array_buffer(), isolate_);
- auto result = FutexEmulation::WaitWasm64(isolate_, array_buffer,
- buffer_offset, val, timeout);
- Push(WasmValue(result.ToSmi().value()));
- break;
- }
- case kExprAtomicNotify: {
- int32_t val;
- uint32_t buffer_offset;
- if (!ExtractAtomicWaitNotifyParams<int32_t>(decoder, code, pc, len,
- &buffer_offset, &val)) {
- return false;
- }
- HandleScope handle_scope(isolate_);
- Handle<JSArrayBuffer> array_buffer(
- instance_object_->memory_object().array_buffer(), isolate_);
- auto result = FutexEmulation::Wake(array_buffer, buffer_offset, val);
- Push(WasmValue(result.ToSmi().value()));
- break;
- }
- default:
- UNREACHABLE();
- return false;
- }
- return true;
- }
-
- bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
- pc_t pc, int* const len, uint32_t opcode_length) {
- switch (opcode) {
-#define SPLAT_CASE(format, sType, valType, num) \
- case kExpr##format##Splat: { \
- WasmValue val = Pop(); \
- valType v = val.to<valType>(); \
- sType s; \
- for (int i = 0; i < num; i++) s.val[i] = v; \
- Push(WasmValue(Simd128(s))); \
- return true; \
- }
- SPLAT_CASE(F64x2, float2, double, 2)
- SPLAT_CASE(F32x4, float4, float, 4)
- SPLAT_CASE(I64x2, int2, int64_t, 2)
- SPLAT_CASE(I32x4, int4, int32_t, 4)
- SPLAT_CASE(I16x8, int8, int32_t, 8)
- SPLAT_CASE(I8x16, int16, int32_t, 16)
-#undef SPLAT_CASE
-#define EXTRACT_LANE_CASE(format, name) \
- case kExpr##format##ExtractLane: { \
- SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc), \
- opcode_length); \
- *len += 1; \
- WasmValue val = Pop(); \
- Simd128 s = val.to_s128(); \
- auto ss = s.to_##name(); \
- Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \
- return true; \
- }
- EXTRACT_LANE_CASE(F64x2, f64x2)
- EXTRACT_LANE_CASE(F32x4, f32x4)
- EXTRACT_LANE_CASE(I64x2, i64x2)
- EXTRACT_LANE_CASE(I32x4, i32x4)
-#undef EXTRACT_LANE_CASE
-#define EXTRACT_LANE_EXTEND_CASE(format, name, sign, type) \
- case kExpr##format##ExtractLane##sign: { \
- SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc), \
- opcode_length); \
- *len += 1; \
- WasmValue val = Pop(); \
- Simd128 s = val.to_s128(); \
- auto ss = s.to_##name(); \
- Push(WasmValue(static_cast<type>(ss.val[LANE(imm.lane, ss)]))); \
- return true; \
- }
- EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, S, int32_t)
- EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, U, uint32_t)
- EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, S, int32_t)
- EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, U, uint32_t)
-#undef EXTRACT_LANE_EXTEND_CASE
-#define BINOP_CASE(op, name, stype, count, expr) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- stype s1 = v1.to_s128().to_##name(); \
- stype s2 = v2.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count; ++i) { \
- auto a = s1.val[LANE(i, s1)]; \
- auto b = s2.val[LANE(i, s1)]; \
- auto result = expr; \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- res.val[LANE(i, s1)] = expr; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- BINOP_CASE(F64x2Add, f64x2, float2, 2, a + b)
- BINOP_CASE(F64x2Sub, f64x2, float2, 2, a - b)
- BINOP_CASE(F64x2Mul, f64x2, float2, 2, a * b)
- BINOP_CASE(F64x2Div, f64x2, float2, 2, base::Divide(a, b))
- BINOP_CASE(F64x2Min, f64x2, float2, 2, JSMin(a, b))
- BINOP_CASE(F64x2Max, f64x2, float2, 2, JSMax(a, b))
- BINOP_CASE(F64x2Pmin, f64x2, float2, 2, std::min(a, b))
- BINOP_CASE(F64x2Pmax, f64x2, float2, 2, std::max(a, b))
- BINOP_CASE(F32x4Add, f32x4, float4, 4, a + b)
- BINOP_CASE(F32x4Sub, f32x4, float4, 4, a - b)
- BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
- BINOP_CASE(F32x4Div, f32x4, float4, 4, a / b)
- BINOP_CASE(F32x4Min, f32x4, float4, 4, JSMin(a, b))
- BINOP_CASE(F32x4Max, f32x4, float4, 4, JSMax(a, b))
- BINOP_CASE(F32x4Pmin, f32x4, float4, 4, std::min(a, b))
- BINOP_CASE(F32x4Pmax, f32x4, float4, 4, std::max(a, b))
- BINOP_CASE(I64x2Add, i64x2, int2, 2, base::AddWithWraparound(a, b))
- BINOP_CASE(I64x2Sub, i64x2, int2, 2, base::SubWithWraparound(a, b))
- BINOP_CASE(I64x2Mul, i64x2, int2, 2, base::MulWithWraparound(a, b))
- BINOP_CASE(I64x2MinS, i64x2, int2, 2, a < b ? a : b)
- BINOP_CASE(I64x2MinU, i64x2, int2, 2,
- static_cast<uint64_t>(a) < static_cast<uint64_t>(b) ? a : b)
- BINOP_CASE(I64x2MaxS, i64x2, int2, 2, a > b ? a : b)
- BINOP_CASE(I64x2MaxU, i64x2, int2, 2,
- static_cast<uint64_t>(a) > static_cast<uint64_t>(b) ? a : b)
- BINOP_CASE(I32x4Add, i32x4, int4, 4, base::AddWithWraparound(a, b))
- BINOP_CASE(I32x4Sub, i32x4, int4, 4, base::SubWithWraparound(a, b))
- BINOP_CASE(I32x4Mul, i32x4, int4, 4, base::MulWithWraparound(a, b))
- BINOP_CASE(I32x4MinS, i32x4, int4, 4, a < b ? a : b)
- BINOP_CASE(I32x4MinU, i32x4, int4, 4,
- static_cast<uint32_t>(a) < static_cast<uint32_t>(b) ? a : b)
- BINOP_CASE(I32x4MaxS, i32x4, int4, 4, a > b ? a : b)
- BINOP_CASE(I32x4MaxU, i32x4, int4, 4,
- static_cast<uint32_t>(a) > static_cast<uint32_t>(b) ? a : b)
- BINOP_CASE(S128And, i32x4, int4, 4, a & b)
- BINOP_CASE(S128Or, i32x4, int4, 4, a | b)
- BINOP_CASE(S128Xor, i32x4, int4, 4, a ^ b)
- BINOP_CASE(S128AndNot, i32x4, int4, 4, a & ~b)
- BINOP_CASE(I16x8Add, i16x8, int8, 8, base::AddWithWraparound(a, b))
- BINOP_CASE(I16x8Sub, i16x8, int8, 8, base::SubWithWraparound(a, b))
- BINOP_CASE(I16x8Mul, i16x8, int8, 8, base::MulWithWraparound(a, b))
- BINOP_CASE(I16x8MinS, i16x8, int8, 8, a < b ? a : b)
- BINOP_CASE(I16x8MinU, i16x8, int8, 8,
- static_cast<uint16_t>(a) < static_cast<uint16_t>(b) ? a : b)
- BINOP_CASE(I16x8MaxS, i16x8, int8, 8, a > b ? a : b)
- BINOP_CASE(I16x8MaxU, i16x8, int8, 8,
- static_cast<uint16_t>(a) > static_cast<uint16_t>(b) ? a : b)
- BINOP_CASE(I16x8AddSaturateS, i16x8, int8, 8, SaturateAdd<int16_t>(a, b))
- BINOP_CASE(I16x8AddSaturateU, i16x8, int8, 8, SaturateAdd<uint16_t>(a, b))
- BINOP_CASE(I16x8SubSaturateS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
- BINOP_CASE(I16x8SubSaturateU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
- BINOP_CASE(I16x8RoundingAverageU, i16x8, int8, 8,
- base::RoundingAverageUnsigned<uint16_t>(a, b))
- BINOP_CASE(I8x16Add, i8x16, int16, 16, base::AddWithWraparound(a, b))
- BINOP_CASE(I8x16Sub, i8x16, int16, 16, base::SubWithWraparound(a, b))
- BINOP_CASE(I8x16Mul, i8x16, int16, 16, base::MulWithWraparound(a, b))
- BINOP_CASE(I8x16MinS, i8x16, int16, 16, a < b ? a : b)
- BINOP_CASE(I8x16MinU, i8x16, int16, 16,
- static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
- BINOP_CASE(I8x16MaxS, i8x16, int16, 16, a > b ? a : b)
- BINOP_CASE(I8x16MaxU, i8x16, int16, 16,
- static_cast<uint8_t>(a) > static_cast<uint8_t>(b) ? a : b)
- BINOP_CASE(I8x16AddSaturateS, i8x16, int16, 16, SaturateAdd<int8_t>(a, b))
- BINOP_CASE(I8x16AddSaturateU, i8x16, int16, 16,
- SaturateAdd<uint8_t>(a, b))
- BINOP_CASE(I8x16SubSaturateS, i8x16, int16, 16, SaturateSub<int8_t>(a, b))
- BINOP_CASE(I8x16SubSaturateU, i8x16, int16, 16,
- SaturateSub<uint8_t>(a, b))
- BINOP_CASE(I8x16RoundingAverageU, i8x16, int16, 16,
- base::RoundingAverageUnsigned<uint8_t>(a, b))
-#undef BINOP_CASE
-#define UNOP_CASE(op, name, stype, count, expr) \
- case kExpr##op: { \
- WasmValue v = Pop(); \
- stype s = v.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count; ++i) { \
- auto a = s.val[i]; \
- auto result = expr; \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- res.val[i] = result; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- UNOP_CASE(F64x2Abs, f64x2, float2, 2, std::abs(a))
- UNOP_CASE(F64x2Neg, f64x2, float2, 2, -a)
- UNOP_CASE(F64x2Sqrt, f64x2, float2, 2, std::sqrt(a))
- UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
- UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
- UNOP_CASE(F32x4Sqrt, f32x4, float4, 4, std::sqrt(a))
- UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, base::Recip(a))
- UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a))
- UNOP_CASE(I64x2Neg, i64x2, int2, 2, base::NegateWithWraparound(a))
- UNOP_CASE(I32x4Neg, i32x4, int4, 4, base::NegateWithWraparound(a))
- UNOP_CASE(I32x4Abs, i32x4, int4, 4, std::abs(a))
- UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
- UNOP_CASE(I16x8Neg, i16x8, int8, 8, base::NegateWithWraparound(a))
- UNOP_CASE(I16x8Abs, i16x8, int8, 8, std::abs(a))
- UNOP_CASE(I8x16Neg, i8x16, int16, 16, base::NegateWithWraparound(a))
- UNOP_CASE(I8x16Abs, i8x16, int16, 16, std::abs(a))
-#undef UNOP_CASE
-
-// Cast to double in call to signbit is due to MSCV issue, see
-// https://github.com/microsoft/STL/issues/519.
-#define BITMASK_CASE(op, name, stype, count) \
- case kExpr##op: { \
- WasmValue v = Pop(); \
- stype s = v.to_s128().to_##name(); \
- int32_t res = 0; \
- for (size_t i = 0; i < count; ++i) { \
- bool sign = std::signbit(static_cast<double>(s.val[i])); \
- res |= (sign << i); \
- } \
- Push(WasmValue(res)); \
- return true; \
- }
- BITMASK_CASE(I8x16BitMask, i8x16, int16, 16)
- BITMASK_CASE(I16x8BitMask, i16x8, int8, 8)
- BITMASK_CASE(I32x4BitMask, i32x4, int4, 4)
-#undef BITMASK_CASE
-
-#define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- stype s1 = v1.to_s128().to_##name(); \
- stype s2 = v2.to_s128().to_##name(); \
- out_stype res; \
- for (size_t i = 0; i < count; ++i) { \
- auto a = s1.val[i]; \
- auto b = s2.val[i]; \
- auto result = expr; \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- res.val[i] = result ? -1 : 0; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- CMPOP_CASE(F64x2Eq, f64x2, float2, int2, 2, a == b)
- CMPOP_CASE(F64x2Ne, f64x2, float2, int2, 2, a != b)
- CMPOP_CASE(F64x2Gt, f64x2, float2, int2, 2, a > b)
- CMPOP_CASE(F64x2Ge, f64x2, float2, int2, 2, a >= b)
- CMPOP_CASE(F64x2Lt, f64x2, float2, int2, 2, a < b)
- CMPOP_CASE(F64x2Le, f64x2, float2, int2, 2, a <= b)
- CMPOP_CASE(F32x4Eq, f32x4, float4, int4, 4, a == b)
- CMPOP_CASE(F32x4Ne, f32x4, float4, int4, 4, a != b)
- CMPOP_CASE(F32x4Gt, f32x4, float4, int4, 4, a > b)
- CMPOP_CASE(F32x4Ge, f32x4, float4, int4, 4, a >= b)
- CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
- CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
- CMPOP_CASE(I64x2Eq, i64x2, int2, int2, 2, a == b)
- CMPOP_CASE(I64x2Ne, i64x2, int2, int2, 2, a != b)
- CMPOP_CASE(I64x2GtS, i64x2, int2, int2, 2, a > b)
- CMPOP_CASE(I64x2GeS, i64x2, int2, int2, 2, a >= b)
- CMPOP_CASE(I64x2LtS, i64x2, int2, int2, 2, a < b)
- CMPOP_CASE(I64x2LeS, i64x2, int2, int2, 2, a <= b)
- CMPOP_CASE(I64x2GtU, i64x2, int2, int2, 2,
- static_cast<uint64_t>(a) > static_cast<uint64_t>(b))
- CMPOP_CASE(I64x2GeU, i64x2, int2, int2, 2,
- static_cast<uint64_t>(a) >= static_cast<uint64_t>(b))
- CMPOP_CASE(I64x2LtU, i64x2, int2, int2, 2,
- static_cast<uint64_t>(a) < static_cast<uint64_t>(b))
- CMPOP_CASE(I64x2LeU, i64x2, int2, int2, 2,
- static_cast<uint64_t>(a) <= static_cast<uint64_t>(b))
- CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
- CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
- CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
- CMPOP_CASE(I32x4GeS, i32x4, int4, int4, 4, a >= b)
- CMPOP_CASE(I32x4LtS, i32x4, int4, int4, 4, a < b)
- CMPOP_CASE(I32x4LeS, i32x4, int4, int4, 4, a <= b)
- CMPOP_CASE(I32x4GtU, i32x4, int4, int4, 4,
- static_cast<uint32_t>(a) > static_cast<uint32_t>(b))
- CMPOP_CASE(I32x4GeU, i32x4, int4, int4, 4,
- static_cast<uint32_t>(a) >= static_cast<uint32_t>(b))
- CMPOP_CASE(I32x4LtU, i32x4, int4, int4, 4,
- static_cast<uint32_t>(a) < static_cast<uint32_t>(b))
- CMPOP_CASE(I32x4LeU, i32x4, int4, int4, 4,
- static_cast<uint32_t>(a) <= static_cast<uint32_t>(b))
- CMPOP_CASE(I16x8Eq, i16x8, int8, int8, 8, a == b)
- CMPOP_CASE(I16x8Ne, i16x8, int8, int8, 8, a != b)
- CMPOP_CASE(I16x8GtS, i16x8, int8, int8, 8, a > b)
- CMPOP_CASE(I16x8GeS, i16x8, int8, int8, 8, a >= b)
- CMPOP_CASE(I16x8LtS, i16x8, int8, int8, 8, a < b)
- CMPOP_CASE(I16x8LeS, i16x8, int8, int8, 8, a <= b)
- CMPOP_CASE(I16x8GtU, i16x8, int8, int8, 8,
- static_cast<uint16_t>(a) > static_cast<uint16_t>(b))
- CMPOP_CASE(I16x8GeU, i16x8, int8, int8, 8,
- static_cast<uint16_t>(a) >= static_cast<uint16_t>(b))
- CMPOP_CASE(I16x8LtU, i16x8, int8, int8, 8,
- static_cast<uint16_t>(a) < static_cast<uint16_t>(b))
- CMPOP_CASE(I16x8LeU, i16x8, int8, int8, 8,
- static_cast<uint16_t>(a) <= static_cast<uint16_t>(b))
- CMPOP_CASE(I8x16Eq, i8x16, int16, int16, 16, a == b)
- CMPOP_CASE(I8x16Ne, i8x16, int16, int16, 16, a != b)
- CMPOP_CASE(I8x16GtS, i8x16, int16, int16, 16, a > b)
- CMPOP_CASE(I8x16GeS, i8x16, int16, int16, 16, a >= b)
- CMPOP_CASE(I8x16LtS, i8x16, int16, int16, 16, a < b)
- CMPOP_CASE(I8x16LeS, i8x16, int16, int16, 16, a <= b)
- CMPOP_CASE(I8x16GtU, i8x16, int16, int16, 16,
- static_cast<uint8_t>(a) > static_cast<uint8_t>(b))
- CMPOP_CASE(I8x16GeU, i8x16, int16, int16, 16,
- static_cast<uint8_t>(a) >= static_cast<uint8_t>(b))
- CMPOP_CASE(I8x16LtU, i8x16, int16, int16, 16,
- static_cast<uint8_t>(a) < static_cast<uint8_t>(b))
- CMPOP_CASE(I8x16LeU, i8x16, int16, int16, 16,
- static_cast<uint8_t>(a) <= static_cast<uint8_t>(b))
-#undef CMPOP_CASE
-#define REPLACE_LANE_CASE(format, name, stype, ctype) \
- case kExpr##format##ReplaceLane: { \
- SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc), \
- opcode_length); \
- *len += 1; \
- WasmValue new_val = Pop(); \
- WasmValue simd_val = Pop(); \
- stype s = simd_val.to_s128().to_##name(); \
- s.val[LANE(imm.lane, s)] = new_val.to<ctype>(); \
- Push(WasmValue(Simd128(s))); \
- return true; \
- }
- REPLACE_LANE_CASE(F64x2, f64x2, float2, double)
- REPLACE_LANE_CASE(F32x4, f32x4, float4, float)
- REPLACE_LANE_CASE(I64x2, i64x2, int2, int64_t)
- REPLACE_LANE_CASE(I32x4, i32x4, int4, int32_t)
- REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
- REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
-#undef REPLACE_LANE_CASE
- case kExprS128LoadMem:
- return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
- MachineRepresentation::kSimd128,
- /*prefix_len=*/opcode_length);
- case kExprS128StoreMem:
- return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
- MachineRepresentation::kSimd128,
- /*prefix_len=*/opcode_length);
-#define SHIFT_CASE(op, name, stype, count, expr) \
- case kExpr##op: { \
- uint32_t shift = Pop().to<uint32_t>(); \
- WasmValue v = Pop(); \
- stype s = v.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count; ++i) { \
- auto a = s.val[i]; \
- res.val[i] = expr; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- SHIFT_CASE(I64x2Shl, i64x2, int2, 2,
- static_cast<uint64_t>(a) << (shift % 64))
- SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> (shift % 64))
- SHIFT_CASE(I64x2ShrU, i64x2, int2, 2,
- static_cast<uint64_t>(a) >> (shift % 64))
- SHIFT_CASE(I32x4Shl, i32x4, int4, 4,
- static_cast<uint32_t>(a) << (shift % 32))
- SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> (shift % 32))
- SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
- static_cast<uint32_t>(a) >> (shift % 32))
- SHIFT_CASE(I16x8Shl, i16x8, int8, 8,
- static_cast<uint16_t>(a) << (shift % 16))
- SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> (shift % 16))
- SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
- static_cast<uint16_t>(a) >> (shift % 16))
- SHIFT_CASE(I8x16Shl, i8x16, int16, 16,
- static_cast<uint8_t>(a) << (shift % 8))
- SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> (shift % 8))
- SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
- static_cast<uint8_t>(a) >> (shift % 8))
-#undef SHIFT_CASE
-#define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
- expr) \
- case kExpr##op: { \
- WasmValue v = Pop(); \
- src_type s = v.to_s128().to_##name(); \
- dst_type res; \
- for (size_t i = 0; i < count; ++i) { \
- ctype a = s.val[LANE(start_index + i, s)]; \
- auto result = expr; \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- res.val[LANE(i, res)] = expr; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- CONVERT_CASE(F32x4SConvertI32x4, int4, i32x4, float4, 4, 0, int32_t,
- static_cast<float>(a))
- CONVERT_CASE(F32x4UConvertI32x4, int4, i32x4, float4, 4, 0, uint32_t,
- static_cast<float>(a))
- CONVERT_CASE(I32x4SConvertF32x4, float4, f32x4, int4, 4, 0, double,
- std::isnan(a) ? 0
- : a<kMinInt ? kMinInt : a> kMaxInt
- ? kMaxInt
- : static_cast<int32_t>(a))
- CONVERT_CASE(I32x4UConvertF32x4, float4, f32x4, int4, 4, 0, double,
- std::isnan(a)
- ? 0
- : a<0 ? 0 : a> kMaxUInt32 ? kMaxUInt32
- : static_cast<uint32_t>(a))
- CONVERT_CASE(I32x4SConvertI16x8High, int8, i16x8, int4, 4, 4, int16_t,
- a)
- CONVERT_CASE(I32x4UConvertI16x8High, int8, i16x8, int4, 4, 4, uint16_t,
- a)
- CONVERT_CASE(I32x4SConvertI16x8Low, int8, i16x8, int4, 4, 0, int16_t, a)
- CONVERT_CASE(I32x4UConvertI16x8Low, int8, i16x8, int4, 4, 0, uint16_t,
- a)
- CONVERT_CASE(I16x8SConvertI8x16High, int16, i8x16, int8, 8, 8, int8_t,
- a)
- CONVERT_CASE(I16x8UConvertI8x16High, int16, i8x16, int8, 8, 8, uint8_t,
- a)
- CONVERT_CASE(I16x8SConvertI8x16Low, int16, i8x16, int8, 8, 0, int8_t, a)
- CONVERT_CASE(I16x8UConvertI8x16Low, int16, i8x16, int8, 8, 0, uint8_t,
- a)
-#undef CONVERT_CASE
-#define PACK_CASE(op, src_type, name, dst_type, count, ctype, dst_ctype) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- src_type s1 = v1.to_s128().to_##name(); \
- src_type s2 = v2.to_s128().to_##name(); \
- dst_type res; \
- int64_t min = std::numeric_limits<ctype>::min(); \
- int64_t max = std::numeric_limits<ctype>::max(); \
- for (size_t i = 0; i < count; ++i) { \
- int64_t v = i < count / 2 ? s1.val[LANE(i, s1)] \
- : s2.val[LANE(i - count / 2, s2)]; \
- res.val[LANE(i, res)] = \
- static_cast<dst_ctype>(std::max(min, std::min(max, v))); \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- PACK_CASE(I16x8SConvertI32x4, int4, i32x4, int8, 8, int16_t, int16_t)
- PACK_CASE(I16x8UConvertI32x4, int4, i32x4, int8, 8, uint16_t, int16_t)
- PACK_CASE(I8x16SConvertI16x8, int8, i16x8, int16, 16, int8_t, int8_t)
- PACK_CASE(I8x16UConvertI16x8, int8, i16x8, int16, 16, uint8_t, int8_t)
-#undef PACK_CASE
- case kExprS128Select: {
- int4 bool_val = Pop().to_s128().to_i32x4();
- int4 v2 = Pop().to_s128().to_i32x4();
- int4 v1 = Pop().to_s128().to_i32x4();
- int4 res;
- for (size_t i = 0; i < 4; ++i) {
- res.val[i] = v2.val[i] ^ ((v1.val[i] ^ v2.val[i]) & bool_val.val[i]);
- }
- Push(WasmValue(Simd128(res)));
- return true;
- }
-#define ADD_HORIZ_CASE(op, name, stype, count) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- stype s1 = v1.to_s128().to_##name(); \
- stype s2 = v2.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count / 2; ++i) { \
- auto result1 = s1.val[LANE(i * 2, s1)] + s1.val[LANE(i * 2 + 1, s1)]; \
- possible_nondeterminism_ |= has_nondeterminism(result1); \
- res.val[LANE(i, s1)] = result1; \
- auto result2 = s2.val[LANE(i * 2, s1)] + s2.val[LANE(i * 2 + 1, s1)]; \
- possible_nondeterminism_ |= has_nondeterminism(result2); \
- res.val[LANE(i + count / 2, s1)] = result2; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- ADD_HORIZ_CASE(I32x4AddHoriz, i32x4, int4, 4)
- ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
- ADD_HORIZ_CASE(I16x8AddHoriz, i16x8, int8, 8)
-#undef ADD_HORIZ_CASE
- case kExprS8x16Swizzle: {
- int16 v2 = Pop().to_s128().to_i8x16();
- int16 v1 = Pop().to_s128().to_i8x16();
- int16 res;
- for (size_t i = 0; i < kSimd128Size; ++i) {
- int lane = v2.val[LANE(i, v1)];
- res.val[LANE(i, v1)] =
- lane < kSimd128Size && lane >= 0 ? v1.val[LANE(lane, v1)] : 0;
- }
- Push(WasmValue(Simd128(res)));
- return true;
- }
- case kExprS8x16Shuffle: {
- Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(
- decoder, code->at(pc), opcode_length);
- *len += 16;
- int16 v2 = Pop().to_s128().to_i8x16();
- int16 v1 = Pop().to_s128().to_i8x16();
- int16 res;
- for (size_t i = 0; i < kSimd128Size; ++i) {
- int lane = imm.shuffle[i];
- res.val[LANE(i, v1)] = lane < kSimd128Size
- ? v1.val[LANE(lane, v1)]
- : v2.val[LANE(lane - kSimd128Size, v1)];
- }
- Push(WasmValue(Simd128(res)));
- return true;
- }
- case kExprS1x2AnyTrue:
- case kExprS1x4AnyTrue:
- case kExprS1x8AnyTrue:
- case kExprS1x16AnyTrue: {
- int4 s = Pop().to_s128().to_i32x4();
- bool res = s.val[0] | s.val[1] | s.val[2] | s.val[3];
- Push(WasmValue((res)));
- return true;
- }
-#define REDUCTION_CASE(op, name, stype, count, operation) \
- case kExpr##op: { \
- stype s = Pop().to_s128().to_##name(); \
- bool res = true; \
- for (size_t i = 0; i < count; ++i) { \
- res = res & static_cast<bool>(s.val[i]); \
- } \
- Push(WasmValue(res)); \
- return true; \
- }
- REDUCTION_CASE(S1x2AllTrue, i64x2, int2, 2, &)
- REDUCTION_CASE(S1x4AllTrue, i32x4, int4, 4, &)
- REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &)
- REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &)
-#undef REDUCTION_CASE
-#define QFM_CASE(op, name, stype, count, operation) \
- case kExpr##op: { \
- stype c = Pop().to_s128().to_##name(); \
- stype b = Pop().to_s128().to_##name(); \
- stype a = Pop().to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count; i++) { \
- res.val[i] = a.val[i] operation(b.val[i] * c.val[i]); \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- QFM_CASE(F32x4Qfma, f32x4, float4, 4, +)
- QFM_CASE(F32x4Qfms, f32x4, float4, 4, -)
- QFM_CASE(F64x2Qfma, f64x2, float2, 2, +)
- QFM_CASE(F64x2Qfms, f64x2, float2, 2, -)
-#undef QFM_CASE
- case kExprS8x16LoadSplat: {
- return DoSimdLoadSplat<int16, int32_t, int8_t>(
- decoder, code, pc, len, MachineRepresentation::kWord8);
- }
- case kExprS16x8LoadSplat: {
- return DoSimdLoadSplat<int8, int32_t, int16_t>(
- decoder, code, pc, len, MachineRepresentation::kWord16);
- }
- case kExprS32x4LoadSplat: {
- return DoSimdLoadSplat<int4, int32_t, int32_t>(
- decoder, code, pc, len, MachineRepresentation::kWord32);
- }
- case kExprS64x2LoadSplat: {
- return DoSimdLoadSplat<int2, int64_t, int64_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- case kExprI16x8Load8x8S: {
- return DoSimdLoadExtend<int8, int16_t, int8_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- case kExprI16x8Load8x8U: {
- return DoSimdLoadExtend<int8, uint16_t, uint8_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- case kExprI32x4Load16x4S: {
- return DoSimdLoadExtend<int4, int32_t, int16_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- case kExprI32x4Load16x4U: {
- return DoSimdLoadExtend<int4, uint32_t, uint16_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- case kExprI64x2Load32x2S: {
- return DoSimdLoadExtend<int2, int64_t, int32_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- case kExprI64x2Load32x2U: {
- return DoSimdLoadExtend<int2, uint64_t, uint32_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- default:
- return false;
- }
- }
-
- template <typename s_type, typename result_type, typename load_type>
- bool DoSimdLoadSplat(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int* const len, MachineRepresentation rep) {
- // len is the number of bytes the make up this op, including prefix byte, so
- // the prefix_len for ExecuteLoad is len, minus the prefix byte itself.
- // Think of prefix_len as: number of extra bytes that make up this op.
- if (!ExecuteLoad<result_type, load_type>(decoder, code, pc, len, rep,
- /*prefix_len=*/*len - 1)) {
- return false;
- }
- result_type v = Pop().to<result_type>();
- s_type s;
- for (size_t i = 0; i < arraysize(s.val); i++) s.val[i] = v;
- Push(WasmValue(Simd128(s)));
- return true;
- }
-
- template <typename s_type, typename wide_type, typename narrow_type>
- bool DoSimdLoadExtend(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int* const len, MachineRepresentation rep) {
- static_assert(sizeof(wide_type) == sizeof(narrow_type) * 2,
- "size mismatch for wide and narrow types");
- if (!ExecuteLoad<uint64_t, uint64_t>(decoder, code, pc, len, rep,
- /*prefix_len=*/*len - 1)) {
- return false;
- }
- constexpr int lanes = kSimd128Size / sizeof(wide_type);
- uint64_t v = Pop().to_u64();
- s_type s;
- for (int i = 0; i < lanes; i++) {
- uint8_t shift = i * (sizeof(narrow_type) * 8);
- narrow_type el = static_cast<narrow_type>(v >> shift);
- s.val[i] = static_cast<wide_type>(el);
- }
- Push(WasmValue(Simd128(s)));
- return true;
- }
-
- // Check if our control stack (frames_) exceeds the limit. Trigger stack
- // overflow if it does, and unwinding the current frame.
- // Returns true if execution can continue, false if the current activation was
- // fully unwound.
- // Do call this function immediately *after* pushing a new frame. The pc of
- // the top frame will be reset to 0 if the stack check fails.
- bool DoStackCheck() V8_WARN_UNUSED_RESULT {
- // The goal of this stack check is not to prevent actual stack overflows,
- // but to simulate stack overflows during the execution of compiled code.
- // That is why this function uses FLAG_stack_size, even though the value
- // stack actually lies in zone memory.
- const size_t stack_size_limit = FLAG_stack_size * KB;
- // Sum up the value stack size and the control stack size.
- const size_t current_stack_size = (sp_ - stack_.get()) * sizeof(*sp_) +
- frames_.size() * sizeof(frames_[0]);
- if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
- return true;
- }
- // The pc of the top frame is initialized to the first instruction. We reset
- // it to 0 here such that we report the same position as in compiled code.
- frames_.back().pc = 0;
- isolate_->StackOverflow();
- return HandleException(isolate_) == WasmInterpreter::Thread::HANDLED;
- }
-
- void EncodeI32ExceptionValue(Handle<FixedArray> encoded_values,
- uint32_t* encoded_index, uint32_t value) {
- encoded_values->set((*encoded_index)++, Smi::FromInt(value >> 16));
- encoded_values->set((*encoded_index)++, Smi::FromInt(value & 0xffff));
- }
-
- void EncodeI64ExceptionValue(Handle<FixedArray> encoded_values,
- uint32_t* encoded_index, uint64_t value) {
- EncodeI32ExceptionValue(encoded_values, encoded_index,
- static_cast<uint32_t>(value >> 32));
- EncodeI32ExceptionValue(encoded_values, encoded_index,
- static_cast<uint32_t>(value));
- }
-
- // Allocate, initialize and throw a new exception. The exception values are
- // being popped off the operand stack. Returns true if the exception is being
- // handled locally by the interpreter, false otherwise (interpreter exits).
- bool DoThrowException(const WasmException* exception,
- uint32_t index) V8_WARN_UNUSED_RESULT {
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- Handle<WasmExceptionTag> exception_tag(
- WasmExceptionTag::cast(instance_object_->exceptions_table().get(index)),
- isolate_);
- uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
- Handle<WasmExceptionPackage> exception_object =
- WasmExceptionPackage::New(isolate_, exception_tag, encoded_size);
- Handle<FixedArray> encoded_values = Handle<FixedArray>::cast(
- WasmExceptionPackage::GetExceptionValues(isolate_, exception_object));
- // Encode the exception values on the operand stack into the exception
- // package allocated above. This encoding has to be in sync with other
- // backends so that exceptions can be passed between them.
- const WasmExceptionSig* sig = exception->sig;
- uint32_t encoded_index = 0;
- sp_t base_index = StackHeight() - sig->parameter_count();
- for (size_t i = 0; i < sig->parameter_count(); ++i) {
- WasmValue value = GetStackValue(base_index + i);
- switch (sig->GetParam(i).kind()) {
- case ValueType::kI32: {
- uint32_t u32 = value.to_u32();
- EncodeI32ExceptionValue(encoded_values, &encoded_index, u32);
- break;
- }
- case ValueType::kF32: {
- uint32_t f32 = value.to_f32_boxed().get_bits();
- EncodeI32ExceptionValue(encoded_values, &encoded_index, f32);
- break;
- }
- case ValueType::kI64: {
- uint64_t u64 = value.to_u64();
- EncodeI64ExceptionValue(encoded_values, &encoded_index, u64);
- break;
- }
- case ValueType::kF64: {
- uint64_t f64 = value.to_f64_boxed().get_bits();
- EncodeI64ExceptionValue(encoded_values, &encoded_index, f64);
- break;
- }
- case ValueType::kS128: {
- int4 s128 = value.to_s128().to_i32x4();
- EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[0]);
- EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[1]);
- EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[2]);
- EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[3]);
- break;
- }
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef: {
- Handle<Object> anyref = value.to_anyref();
- DCHECK_IMPLIES(sig->GetParam(i) == kWasmNullRef, anyref->IsNull());
- encoded_values->set(encoded_index++, *anyref);
- break;
- }
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kEqRef:
- // TODO(7748): Implement these.
- UNIMPLEMENTED();
- case ValueType::kStmt:
- case ValueType::kBottom:
- UNREACHABLE();
- }
- }
- DCHECK_EQ(encoded_size, encoded_index);
- Drop(static_cast<int>(sig->parameter_count()));
- // Now that the exception is ready, set it as pending.
- isolate_->Throw(*exception_object);
- return HandleException(isolate_) == WasmInterpreter::Thread::HANDLED;
- }
-
- // Throw a given existing exception. Returns true if the exception is being
- // handled locally by the interpreter, false otherwise (interpreter exits).
- bool DoRethrowException(WasmValue exception) {
- isolate_->ReThrow(*exception.to_anyref());
- return HandleException(isolate_) == WasmInterpreter::Thread::HANDLED;
- }
-
- // Determines whether the given exception has a tag matching the expected tag
- // for the given index within the exception table of the current instance.
- bool MatchingExceptionTag(Handle<Object> exception_object, uint32_t index) {
- if (!exception_object->IsWasmExceptionPackage(isolate_)) return false;
- Handle<Object> caught_tag = WasmExceptionPackage::GetExceptionTag(
- isolate_, Handle<WasmExceptionPackage>::cast(exception_object));
- Handle<Object> expected_tag =
- handle(instance_object_->exceptions_table().get(index), isolate_);
- DCHECK(expected_tag->IsWasmExceptionTag());
- return expected_tag.is_identical_to(caught_tag);
- }
-
- void DecodeI32ExceptionValue(Handle<FixedArray> encoded_values,
- uint32_t* encoded_index, uint32_t* value) {
- uint32_t msb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
- uint32_t lsb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
- *value = (msb << 16) | (lsb & 0xffff);
- }
-
- void DecodeI64ExceptionValue(Handle<FixedArray> encoded_values,
- uint32_t* encoded_index, uint64_t* value) {
- uint32_t lsb = 0, msb = 0;
- DecodeI32ExceptionValue(encoded_values, encoded_index, &msb);
- DecodeI32ExceptionValue(encoded_values, encoded_index, &lsb);
- *value = (static_cast<uint64_t>(msb) << 32) | static_cast<uint64_t>(lsb);
- }
-
- // Unpack the values encoded in the given exception. The exception values are
- // pushed onto the operand stack. Callers must perform a tag check to ensure
- // the encoded values match the expected signature of the exception.
- void DoUnpackException(const WasmException* exception,
- Handle<Object> exception_object) {
- Handle<FixedArray> encoded_values =
- Handle<FixedArray>::cast(WasmExceptionPackage::GetExceptionValues(
- isolate_, Handle<WasmExceptionPackage>::cast(exception_object)));
- // Decode the exception values from the given exception package and push
- // them onto the operand stack. This encoding has to be in sync with other
- // backends so that exceptions can be passed between them.
- const WasmExceptionSig* sig = exception->sig;
- uint32_t encoded_index = 0;
- for (size_t i = 0; i < sig->parameter_count(); ++i) {
- WasmValue value;
- switch (sig->GetParam(i).kind()) {
- case ValueType::kI32: {
- uint32_t u32 = 0;
- DecodeI32ExceptionValue(encoded_values, &encoded_index, &u32);
- value = WasmValue(u32);
- break;
- }
- case ValueType::kF32: {
- uint32_t f32_bits = 0;
- DecodeI32ExceptionValue(encoded_values, &encoded_index, &f32_bits);
- value = WasmValue(Float32::FromBits(f32_bits));
- break;
- }
- case ValueType::kI64: {
- uint64_t u64 = 0;
- DecodeI64ExceptionValue(encoded_values, &encoded_index, &u64);
- value = WasmValue(u64);
- break;
- }
- case ValueType::kF64: {
- uint64_t f64_bits = 0;
- DecodeI64ExceptionValue(encoded_values, &encoded_index, &f64_bits);
- value = WasmValue(Float64::FromBits(f64_bits));
- break;
- }
- case ValueType::kS128: {
- int4 s128 = {0, 0, 0, 0};
- uint32_t* vals = reinterpret_cast<uint32_t*>(s128.val);
- DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[0]);
- DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[1]);
- DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[2]);
- DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[3]);
- value = WasmValue(Simd128(s128));
- break;
- }
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef: {
- Handle<Object> anyref(encoded_values->get(encoded_index++), isolate_);
- DCHECK_IMPLIES(sig->GetParam(i) == kWasmNullRef, anyref->IsNull());
- value = WasmValue(anyref);
- break;
- }
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kEqRef:
- // TODO(7748): Implement these.
- UNIMPLEMENTED();
- case ValueType::kStmt:
- case ValueType::kBottom:
- UNREACHABLE();
- }
- Push(value);
- }
- DCHECK_EQ(WasmExceptionPackage::GetEncodedSize(exception), encoded_index);
- }
-
- void Execute(InterpreterCode* code, pc_t pc, int max) {
- DCHECK_NOT_NULL(code->side_table);
- DCHECK(!frames_.empty());
- // There must be enough space on the stack to hold the arguments, locals,
- // and the value stack.
- DCHECK_LE(code->function->sig->parameter_count() +
- code->locals.type_list.size() +
- code->side_table->max_stack_height_,
- stack_limit_ - stack_.get() - frames_.back().sp);
- // Seal the surrounding {HandleScope} to ensure that all cases within the
- // interpreter switch below which deal with handles open their own scope.
- // This avoids leaking / accumulating handles in the surrounding scope.
- SealHandleScope shs(isolate_);
-
- Decoder decoder(code->start, code->end);
- pc_t limit = code->end - code->start;
- bool hit_break = false;
-
- while (true) {
-#define PAUSE_IF_BREAK_FLAG(flag) \
- if (V8_UNLIKELY(break_flags_ & WasmInterpreter::BreakFlag::flag)) { \
- hit_break = true; \
- max = 0; \
- }
-
- DCHECK_GT(limit, pc);
- DCHECK_NOT_NULL(code->start);
-
- // Do first check for a breakpoint, in order to set hit_break correctly.
- const char* skip = " ";
- int len = 1;
- // We need to store this, because SIMD opcodes are LEB encoded, and later
- // on when executing, we need to know where to read immediates from.
- uint32_t simd_opcode_length = 0;
- byte orig = code->start[pc];
- WasmOpcode opcode = static_cast<WasmOpcode>(orig);
- if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- opcode = decoder.read_prefixed_opcode<Decoder::kNoValidate>(
- &code->start[pc], &simd_opcode_length);
- len += simd_opcode_length;
- }
- if (V8_UNLIKELY(orig == kInternalBreakpoint)) {
- orig = code->orig_start[pc];
- if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(orig))) {
- opcode = decoder.read_prefixed_opcode<Decoder::kNoValidate>(
- &code->start[pc]);
- }
- if (SkipBreakpoint(code, pc)) {
- // skip breakpoint by switching on original code.
- skip = "[skip] ";
- } else {
- TRACE("@%-3zu: [break] %-24s:", pc, WasmOpcodes::OpcodeName(opcode));
- TraceValueStack();
- TRACE("\n");
- hit_break = true;
- break;
- }
- }
-
- // If max is 0, break. If max is positive (a limit is set), decrement it.
- if (max >= 0 && WasmOpcodes::IsBreakable(opcode)) {
- if (max == 0) break;
- --max;
- }
-
- USE(skip);
- TRACE("@%-3zu: %s%-24s:", pc, skip, WasmOpcodes::OpcodeName(opcode));
- TraceValueStack();
- TRACE("\n");
-
-#ifdef DEBUG
- // Compute the stack effect of this opcode, and verify later that the
- // stack was modified accordingly.
- std::pair<uint32_t, uint32_t> stack_effect =
- StackEffect(codemap_->module(), frames_.back().code->function->sig,
- code->orig_start + pc, code->orig_end);
- sp_t expected_new_stack_height =
- StackHeight() - stack_effect.first + stack_effect.second;
-#endif
-
- switch (orig) {
- case kExprNop:
- break;
- case kExprBlock:
- case kExprLoop:
- case kExprTry: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(),
- &decoder, code->at(pc));
- len = 1 + imm.length;
- break;
- }
- case kExprIf: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(),
- &decoder, code->at(pc));
- WasmValue cond = Pop();
- bool is_true = cond.to<uint32_t>() != 0;
- if (is_true) {
- // fall through to the true block.
- len = 1 + imm.length;
- TRACE(" true => fallthrough\n");
- } else {
- len = LookupTargetDelta(code, pc);
- TRACE(" false => @%zu\n", pc + len);
- }
- break;
- }
- case kExprElse:
- case kExprCatch: {
- len = LookupTargetDelta(code, pc);
- TRACE(" end => @%zu\n", pc + len);
- break;
- }
- case kExprThrow: {
- ExceptionIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- CommitPc(pc); // Needed for local unwinding.
- const WasmException* exception = &module()->exceptions[imm.index];
- if (!DoThrowException(exception, imm.index)) return;
- ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
- continue; // Do not bump pc.
- }
- case kExprRethrow: {
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- WasmValue ex = Pop();
- if (ex.to_anyref()->IsNull()) return DoTrap(kTrapRethrowNullRef, pc);
- CommitPc(pc); // Needed for local unwinding.
- if (!DoRethrowException(ex)) return;
- ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
- continue; // Do not bump pc.
- }
- case kExprBrOnExn: {
- BranchOnExceptionImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- WasmValue ex = Pop();
- Handle<Object> exception = ex.to_anyref();
- if (exception->IsNull()) return DoTrap(kTrapBrOnExnNullRef, pc);
- if (MatchingExceptionTag(exception, imm.index.index)) {
- imm.index.exception = &module()->exceptions[imm.index.index];
- DoUnpackException(imm.index.exception, exception);
- len = DoBreak(code, pc, imm.depth.depth);
- TRACE(" match => @%zu\n", pc + len);
- } else {
- Push(ex); // Exception remains on stack.
- TRACE(" false => fallthrough\n");
- len = 1 + imm.length;
- }
- break;
- }
- case kExprSelectWithType: {
- SelectTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(),
- &decoder, code->at(pc));
- len = 1 + imm.length;
- V8_FALLTHROUGH;
- }
- case kExprSelect: {
- HandleScope scope(isolate_); // Avoid leaking handles.
- WasmValue cond = Pop();
- WasmValue fval = Pop();
- WasmValue tval = Pop();
- Push(cond.to<int32_t>() != 0 ? tval : fval);
- break;
- }
- case kExprBr: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- len = DoBreak(code, pc, imm.depth);
- TRACE(" br => @%zu\n", pc + len);
- break;
- }
- case kExprBrIf: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- WasmValue cond = Pop();
- bool is_true = cond.to<uint32_t>() != 0;
- if (is_true) {
- len = DoBreak(code, pc, imm.depth);
- TRACE(" br_if => @%zu\n", pc + len);
- } else {
- TRACE(" false => fallthrough\n");
- len = 1 + imm.length;
- }
- break;
- }
- case kExprBrTable: {
- BranchTableImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- BranchTableIterator<Decoder::kNoValidate> iterator(&decoder, imm);
- uint32_t key = Pop().to<uint32_t>();
- uint32_t depth = 0;
- if (key >= imm.table_count) key = imm.table_count;
- for (uint32_t i = 0; i <= key; i++) {
- DCHECK(iterator.has_next());
- depth = iterator.next();
- }
- len = key + DoBreak(code, pc + key, static_cast<size_t>(depth));
- TRACE(" br[%u] => @%zu\n", key, pc + key + len);
- break;
- }
- case kExprReturn: {
- size_t arity = code->function->sig->return_count();
- if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
- PAUSE_IF_BREAK_FLAG(AfterReturn);
- continue; // Do not bump pc.
- }
- case kExprUnreachable: {
- return DoTrap(kTrapUnreachable, pc);
- }
- case kExprEnd: {
- break;
- }
- case kExprI32Const: {
- ImmI32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- Push(WasmValue(imm.value));
- len = 1 + imm.length;
- break;
- }
- case kExprI64Const: {
- ImmI64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- Push(WasmValue(imm.value));
- len = 1 + imm.length;
- break;
- }
- case kExprF32Const: {
- ImmF32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- Push(WasmValue(imm.value));
- len = 1 + imm.length;
- break;
- }
- case kExprF64Const: {
- ImmF64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- Push(WasmValue(imm.value));
- len = 1 + imm.length;
- break;
- }
- case kExprRefNull: {
- Push(WasmValue(isolate_->factory()->null_value()));
- break;
- }
- case kExprRefFunc: {
- FunctionIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
-
- Handle<WasmExternalFunction> function =
- WasmInstanceObject::GetOrCreateWasmExternalFunction(
- isolate_, instance_object_, imm.index);
- Push(WasmValue(function));
- len = 1 + imm.length;
- break;
- }
- case kExprLocalGet: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- Push(GetStackValue(frames_.back().sp + imm.index));
- len = 1 + imm.length;
- break;
- }
- case kExprLocalSet: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- WasmValue val = Pop();
- SetStackValue(frames_.back().sp + imm.index, val);
- len = 1 + imm.length;
- break;
- }
- case kExprLocalTee: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- WasmValue val = Pop();
- SetStackValue(frames_.back().sp + imm.index, val);
- Push(val);
- len = 1 + imm.length;
- break;
- }
- case kExprDrop: {
- Drop();
- break;
- }
- case kExprCallFunction: {
- CallFunctionImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- InterpreterCode* target = codemap()->GetCode(imm.index);
- if (target->function->imported) {
- CommitPc(pc);
- ExternalCallResult result =
- CallImportedFunction(target->function->func_index);
- switch (result.type) {
- case ExternalCallResult::INTERNAL:
- // The import is a function of this instance. Call it directly.
- DCHECK(!result.interpreter_code->function->imported);
- break;
- case ExternalCallResult::INVALID_FUNC:
- case ExternalCallResult::SIGNATURE_MISMATCH:
- // Direct calls are checked statically.
- UNREACHABLE();
- case ExternalCallResult::EXTERNAL_RETURNED:
- PAUSE_IF_BREAK_FLAG(AfterCall);
- len = 1 + imm.length;
- break;
- case ExternalCallResult::EXTERNAL_UNWOUND:
- return;
- case ExternalCallResult::EXTERNAL_CAUGHT:
- ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
- continue; // Do not bump pc.
- }
- if (result.type != ExternalCallResult::INTERNAL) break;
- }
- // Execute an internal call.
- if (!DoCall(&decoder, target, &pc, &limit)) return;
- code = target;
- PAUSE_IF_BREAK_FLAG(AfterCall);
- continue; // Do not bump pc.
- } break;
-
- case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(
- WasmFeatures::All(), &decoder, code->at(pc));
- uint32_t entry_index = Pop().to<uint32_t>();
- CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
- ExternalCallResult result =
- CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
- switch (result.type) {
- case ExternalCallResult::INTERNAL:
- // The import is a function of this instance. Call it directly.
- if (!DoCall(&decoder, result.interpreter_code, &pc, &limit))
- return;
- code = result.interpreter_code;
- PAUSE_IF_BREAK_FLAG(AfterCall);
- continue; // Do not bump pc.
- case ExternalCallResult::INVALID_FUNC:
- return DoTrap(kTrapFuncInvalid, pc);
- case ExternalCallResult::SIGNATURE_MISMATCH:
- return DoTrap(kTrapFuncSigMismatch, pc);
- case ExternalCallResult::EXTERNAL_RETURNED:
- PAUSE_IF_BREAK_FLAG(AfterCall);
- len = 1 + imm.length;
- break;
- case ExternalCallResult::EXTERNAL_UNWOUND:
- return;
- case ExternalCallResult::EXTERNAL_CAUGHT:
- ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
- continue; // Do not bump pc.
- }
- } break;
-
- case kExprReturnCall: {
- CallFunctionImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- InterpreterCode* target = codemap()->GetCode(imm.index);
-
- if (!target->function->imported) {
- // Enter internal found function.
- if (!DoReturnCall(&decoder, target, &pc, &limit)) return;
- code = target;
- PAUSE_IF_BREAK_FLAG(AfterCall);
-
- continue; // Do not bump pc.
- }
- // Function is imported.
- CommitPc(pc);
- ExternalCallResult result =
- CallImportedFunction(target->function->func_index);
- switch (result.type) {
- case ExternalCallResult::INTERNAL:
- // Cannot import internal functions.
- case ExternalCallResult::INVALID_FUNC:
- case ExternalCallResult::SIGNATURE_MISMATCH:
- // Direct calls are checked statically.
- UNREACHABLE();
- case ExternalCallResult::EXTERNAL_RETURNED:
- len = 1 + imm.length;
- break;
- case ExternalCallResult::EXTERNAL_UNWOUND:
- return;
- case ExternalCallResult::EXTERNAL_CAUGHT:
- ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
- continue;
- }
- size_t arity = code->function->sig->return_count();
- if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
- PAUSE_IF_BREAK_FLAG(AfterReturn);
- continue;
- } break;
-
- case kExprReturnCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(
- WasmFeatures::All(), &decoder, code->at(pc));
- uint32_t entry_index = Pop().to<uint32_t>();
- CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
-
- // TODO(wasm): Calling functions needs some refactoring to avoid
- // multi-exit code like this.
- ExternalCallResult result =
- CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
- switch (result.type) {
- case ExternalCallResult::INTERNAL: {
- InterpreterCode* target = result.interpreter_code;
-
- DCHECK(!target->function->imported);
-
- // The function belongs to this instance. Enter it directly.
- if (!DoReturnCall(&decoder, target, &pc, &limit)) return;
- code = result.interpreter_code;
- PAUSE_IF_BREAK_FLAG(AfterCall);
- continue; // Do not bump pc.
- }
- case ExternalCallResult::INVALID_FUNC:
- return DoTrap(kTrapFuncInvalid, pc);
- case ExternalCallResult::SIGNATURE_MISMATCH:
- return DoTrap(kTrapFuncSigMismatch, pc);
- case ExternalCallResult::EXTERNAL_RETURNED: {
- len = 1 + imm.length;
-
- size_t arity = code->function->sig->return_count();
- if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
- PAUSE_IF_BREAK_FLAG(AfterCall);
- break;
- }
- case ExternalCallResult::EXTERNAL_UNWOUND:
- return;
-
- case ExternalCallResult::EXTERNAL_CAUGHT:
- ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
- break;
- }
- } break;
-
- case kExprGlobalGet: {
- GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- HandleScope handle_scope(isolate_);
- Push(WasmInstanceObject::GetGlobalValue(
- instance_object_, module()->globals[imm.index]));
- len = 1 + imm.length;
- break;
- }
- case kExprGlobalSet: {
- GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- auto& global = module()->globals[imm.index];
- switch (global.type.kind()) {
-#define CASE_TYPE(valuetype, ctype) \
- case ValueType::valuetype: { \
- uint8_t* ptr = \
- WasmInstanceObject::GetGlobalStorage(instance_object_, global); \
- WriteLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr), \
- Pop().to<ctype>()); \
- break; \
- }
- FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
-#undef CASE_TYPE
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kEqRef: {
- // TODO(7748): Type checks or DCHECKs for ref types?
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- Handle<FixedArray> global_buffer; // The buffer of the global.
- uint32_t global_index; // The index into the buffer.
- std::tie(global_buffer, global_index) =
- WasmInstanceObject::GetGlobalBufferAndIndex(instance_object_,
- global);
- Handle<Object> ref = Pop().to_anyref();
- DCHECK_IMPLIES(global.type == kWasmNullRef, ref->IsNull());
- global_buffer->set(global_index, *ref);
- break;
- }
- case ValueType::kStmt:
- case ValueType::kBottom:
- UNREACHABLE();
- }
- len = 1 + imm.length;
- break;
- }
- case kExprTableGet: {
- TableIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- HandleScope handle_scope(isolate_);
- auto table = handle(
- WasmTableObject::cast(instance_object_->tables().get(imm.index)),
- isolate_);
- uint32_t table_size = table->current_length();
- uint32_t entry_index = Pop().to<uint32_t>();
- if (entry_index >= table_size) {
- return DoTrap(kTrapTableOutOfBounds, pc);
- }
- Handle<Object> value =
- WasmTableObject::Get(isolate_, table, entry_index);
- Push(WasmValue(value));
- len = 1 + imm.length;
- break;
- }
- case kExprTableSet: {
- TableIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- HandleScope handle_scope(isolate_);
- auto table = handle(
- WasmTableObject::cast(instance_object_->tables().get(imm.index)),
- isolate_);
- uint32_t table_size = table->current_length();
- Handle<Object> value = Pop().to_anyref();
- uint32_t entry_index = Pop().to<uint32_t>();
- if (entry_index >= table_size) {
- return DoTrap(kTrapTableOutOfBounds, pc);
- }
- WasmTableObject::Set(isolate_, table, entry_index, value);
- len = 1 + imm.length;
- break;
- }
-#define LOAD_CASE(name, ctype, mtype, rep) \
- case kExpr##name: { \
- if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, &len, \
- MachineRepresentation::rep)) \
- return; \
- break; \
- }
-
- LOAD_CASE(I32LoadMem8S, int32_t, int8_t, kWord8);
- LOAD_CASE(I32LoadMem8U, int32_t, uint8_t, kWord8);
- LOAD_CASE(I32LoadMem16S, int32_t, int16_t, kWord16);
- LOAD_CASE(I32LoadMem16U, int32_t, uint16_t, kWord16);
- LOAD_CASE(I64LoadMem8S, int64_t, int8_t, kWord8);
- LOAD_CASE(I64LoadMem8U, int64_t, uint8_t, kWord16);
- LOAD_CASE(I64LoadMem16S, int64_t, int16_t, kWord16);
- LOAD_CASE(I64LoadMem16U, int64_t, uint16_t, kWord16);
- LOAD_CASE(I64LoadMem32S, int64_t, int32_t, kWord32);
- LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32);
- LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32);
- LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64);
- LOAD_CASE(F32LoadMem, Float32, uint32_t, kFloat32);
- LOAD_CASE(F64LoadMem, Float64, uint64_t, kFloat64);
-#undef LOAD_CASE
-
-#define STORE_CASE(name, ctype, mtype, rep) \
- case kExpr##name: { \
- if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, &len, \
- MachineRepresentation::rep)) \
- return; \
- break; \
- }
-
- STORE_CASE(I32StoreMem8, int32_t, int8_t, kWord8);
- STORE_CASE(I32StoreMem16, int32_t, int16_t, kWord16);
- STORE_CASE(I64StoreMem8, int64_t, int8_t, kWord8);
- STORE_CASE(I64StoreMem16, int64_t, int16_t, kWord16);
- STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32);
- STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32);
- STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64);
- STORE_CASE(F32StoreMem, Float32, uint32_t, kFloat32);
- STORE_CASE(F64StoreMem, Float64, uint64_t, kFloat64);
-#undef STORE_CASE
-
-#define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
- case kExpr##name: { \
- uint32_t index = Pop().to<uint32_t>(); \
- ctype result; \
- Address addr = BoundsCheckMem<mtype>(0, index); \
- if (!addr) { \
- result = defval; \
- } else { \
- /* TODO(titzer): alignment for asmjs load mem? */ \
- result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
- } \
- Push(WasmValue(result)); \
- break; \
- }
- ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
- ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
- ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0);
- ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0);
- ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0);
- ASMJS_LOAD_CASE(F32AsmjsLoadMem, float, float,
- std::numeric_limits<float>::quiet_NaN());
- ASMJS_LOAD_CASE(F64AsmjsLoadMem, double, double,
- std::numeric_limits<double>::quiet_NaN());
-#undef ASMJS_LOAD_CASE
-
-#define ASMJS_STORE_CASE(name, ctype, mtype) \
- case kExpr##name: { \
- WasmValue val = Pop(); \
- uint32_t index = Pop().to<uint32_t>(); \
- Address addr = BoundsCheckMem<mtype>(0, index); \
- if (addr) { \
- *(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
- } \
- Push(val); \
- break; \
- }
-
- ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t);
- ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t);
- ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t);
- ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
- ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
-#undef ASMJS_STORE_CASE
- case kExprMemoryGrow: {
- MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- uint32_t delta_pages = Pop().to<uint32_t>();
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- Handle<WasmMemoryObject> memory(instance_object_->memory_object(),
- isolate_);
- int32_t result =
- WasmMemoryObject::Grow(isolate_, memory, delta_pages);
- Push(WasmValue(result));
- len = 1 + imm.length;
- // Treat one grow_memory instruction like 1000 other instructions,
- // because it is a really expensive operation.
- if (max > 0) max = std::max(0, max - 1000);
- break;
- }
- case kExprMemorySize: {
- MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- Push(WasmValue(static_cast<uint32_t>(instance_object_->memory_size() /
- kWasmPageSize)));
- len = 1 + imm.length;
- break;
- }
- // We need to treat kExprI32ReinterpretF32 and kExprI64ReinterpretF64
- // specially to guarantee that the quiet bit of a NaN is preserved on
- // ia32 by the reinterpret casts.
- case kExprI32ReinterpretF32: {
- WasmValue val = Pop();
- Push(WasmValue(ExecuteI32ReinterpretF32(val)));
- break;
- }
- case kExprI64ReinterpretF64: {
- WasmValue val = Pop();
- Push(WasmValue(ExecuteI64ReinterpretF64(val)));
- break;
- }
-#define SIGN_EXTENSION_CASE(name, wtype, ntype) \
- case kExpr##name: { \
- ntype val = static_cast<ntype>(Pop().to<wtype>()); \
- Push(WasmValue(static_cast<wtype>(val))); \
- break; \
- }
- SIGN_EXTENSION_CASE(I32SExtendI8, int32_t, int8_t);
- SIGN_EXTENSION_CASE(I32SExtendI16, int32_t, int16_t);
- SIGN_EXTENSION_CASE(I64SExtendI8, int64_t, int8_t);
- SIGN_EXTENSION_CASE(I64SExtendI16, int64_t, int16_t);
- SIGN_EXTENSION_CASE(I64SExtendI32, int64_t, int32_t);
-#undef SIGN_EXTENSION_CASE
- case kExprRefIsNull: {
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- uint32_t result = Pop().to_anyref()->IsNull() ? 1 : 0;
- Push(WasmValue(result));
- break;
- }
- case kNumericPrefix: {
- if (!ExecuteNumericOp(opcode, &decoder, code, pc, &len)) return;
- break;
- }
- case kAtomicPrefix: {
- if (!ExecuteAtomicOp(opcode, &decoder, code, pc, &len)) return;
- break;
- }
- case kSimdPrefix: {
- if (!ExecuteSimdOp(opcode, &decoder, code, pc, &len,
- simd_opcode_length))
- return;
- break;
- }
-
-#define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
- case kExpr##name: { \
- WasmValue rval = Pop(); \
- WasmValue lval = Pop(); \
- auto result = lval.to<ctype>() op rval.to<ctype>(); \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- Push(WasmValue(result)); \
- break; \
- }
- FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
-#undef EXECUTE_SIMPLE_BINOP
-
-#define EXECUTE_OTHER_BINOP(name, ctype) \
- case kExpr##name: { \
- TrapReason trap = kTrapCount; \
- ctype rval = Pop().to<ctype>(); \
- ctype lval = Pop().to<ctype>(); \
- auto result = Execute##name(lval, rval, &trap); \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- if (trap != kTrapCount) return DoTrap(trap, pc); \
- Push(WasmValue(result)); \
- break; \
- }
- FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
-#undef EXECUTE_OTHER_BINOP
-
-#define EXECUTE_UNOP(name, ctype, exec_fn) \
- case kExpr##name: { \
- TrapReason trap = kTrapCount; \
- ctype val = Pop().to<ctype>(); \
- auto result = exec_fn(val, &trap); \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- if (trap != kTrapCount) return DoTrap(trap, pc); \
- Push(WasmValue(result)); \
- break; \
- }
-
-#define EXECUTE_OTHER_UNOP(name, ctype) EXECUTE_UNOP(name, ctype, Execute##name)
- FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
-#undef EXECUTE_OTHER_UNOP
-
-#define EXECUTE_I32CONV_FLOATOP(name, out_type, in_type) \
- EXECUTE_UNOP(name, in_type, ExecuteConvert<out_type>)
- FOREACH_I32CONV_FLOATOP(EXECUTE_I32CONV_FLOATOP)
-#undef EXECUTE_I32CONV_FLOATOP
-#undef EXECUTE_UNOP
-
- default:
- FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
- OpcodeName(code->start[pc]));
- UNREACHABLE();
- }
-
-#ifdef DEBUG
- if (!WasmOpcodes::IsControlOpcode(opcode)) {
- DCHECK_EQ(expected_new_stack_height, StackHeight());
- }
-#endif
-
- pc += len;
- if (pc == limit) {
- // Fell off end of code; do an implicit return.
- TRACE("@%-3zu: ImplicitReturn\n", pc);
- size_t arity = code->function->sig->return_count();
- DCHECK_EQ(StackHeight() - arity, frames_.back().llimit());
- if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
- PAUSE_IF_BREAK_FLAG(AfterReturn);
- }
-#undef PAUSE_IF_BREAK_FLAG
- }
-
- state_ = WasmInterpreter::PAUSED;
- break_pc_ = hit_break ? pc : kInvalidPc;
- CommitPc(pc);
- }
-
- WasmValue Pop() {
- DCHECK_GT(frames_.size(), 0);
- DCHECK_GT(StackHeight(), frames_.back().llimit()); // can't pop into locals
- StackValue stack_value = *--sp_;
- // Note that {StackHeight} depends on the current {sp} value, hence this
- // operation is split into two statements to ensure proper evaluation order.
- WasmValue val = stack_value.ExtractValue(this, StackHeight());
- stack_value.ClearValue(this, StackHeight());
- return val;
- }
-
- void Drop(int n = 1) {
- DCHECK_GE(StackHeight(), n);
- DCHECK_GT(frames_.size(), 0);
- // Check that we don't pop into locals.
- DCHECK_GE(StackHeight() - n, frames_.back().llimit());
- StackValue::ClearValues(this, StackHeight() - n, n);
- sp_ -= n;
- }
-
- WasmValue PopArity(size_t arity) {
- if (arity == 0) return WasmValue();
- CHECK_EQ(1, arity);
- return Pop();
- }
-
- void Push(WasmValue val) {
- DCHECK_NE(kWasmStmt, val.type());
- DCHECK_LE(1, stack_limit_ - sp_);
- DCHECK(StackValue::IsClearedValue(this, StackHeight()));
- StackValue stack_value(val, this, StackHeight());
- // Note that {StackHeight} depends on the current {sp} value, hence this
- // operation is split into two statements to ensure proper evaluation order.
- *sp_++ = stack_value;
- }
-
- void Push(WasmValue* vals, size_t arity) {
- DCHECK_LE(arity, stack_limit_ - sp_);
- for (WasmValue *val = vals, *end = vals + arity; val != end; ++val) {
- DCHECK_NE(kWasmStmt, val->type());
- Push(*val);
- }
- }
-
- void ResetStack(sp_t new_height) {
- DCHECK_LE(new_height, StackHeight()); // Only allowed to shrink.
- int count = static_cast<int>(StackHeight() - new_height);
- StackValue::ClearValues(this, new_height, count);
- sp_ = stack_.get() + new_height;
- }
-
- void EnsureStackSpace(size_t size) {
- if (V8_LIKELY(static_cast<size_t>(stack_limit_ - sp_) >= size)) return;
- size_t old_size = stack_limit_ - stack_.get();
- size_t requested_size =
- base::bits::RoundUpToPowerOfTwo64((sp_ - stack_.get()) + size);
- size_t new_size = Max(size_t{8}, Max(2 * old_size, requested_size));
- std::unique_ptr<StackValue[]> new_stack(new StackValue[new_size]);
- if (old_size > 0) {
- memcpy(new_stack.get(), stack_.get(), old_size * sizeof(*sp_));
- }
- sp_ = new_stack.get() + (sp_ - stack_.get());
- stack_ = std::move(new_stack);
- stack_limit_ = stack_.get() + new_size;
- // Also resize the reference stack to the same size.
- int grow_by = static_cast<int>(new_size - old_size);
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- Handle<FixedArray> old_ref_stack(reference_stack(), isolate_);
- Handle<FixedArray> new_ref_stack =
- isolate_->factory()->CopyFixedArrayAndGrow(old_ref_stack, grow_by);
- new_ref_stack->FillWithHoles(static_cast<int>(old_size),
- static_cast<int>(new_size));
- reference_stack_cell_->set_value(*new_ref_stack);
- }
-
- sp_t StackHeight() { return sp_ - stack_.get(); }
-
- void TraceValueStack() {
-#ifdef DEBUG
- if (!FLAG_trace_wasm_interpreter) return;
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
- sp_t sp = top ? top->sp : 0;
- sp_t plimit = top ? top->plimit() : 0;
- sp_t llimit = top ? top->llimit() : 0;
- for (size_t i = sp; i < StackHeight(); ++i) {
- if (i < plimit) {
- PrintF(" p%zu:", i);
- } else if (i < llimit) {
- PrintF(" l%zu:", i);
- } else {
- PrintF(" s%zu:", i);
- }
- WasmValue val = GetStackValue(i);
- switch (val.type().kind()) {
- case ValueType::kI32:
- PrintF("i32:%d", val.to<int32_t>());
- break;
- case ValueType::kI64:
- PrintF("i64:%" PRId64 "", val.to<int64_t>());
- break;
- case ValueType::kF32:
- PrintF("f32:%f", val.to<float>());
- break;
- case ValueType::kF64:
- PrintF("f64:%lf", val.to<double>());
- break;
- case ValueType::kS128: {
- // This defaults to tracing all S128 values as i32x4 values for now,
- // when there is more state to know what type of values are on the
- // stack, the right format should be printed here.
- int4 s = val.to_s128().to_i32x4();
- PrintF("i32x4:%d,%d,%d,%d", s.val[0], s.val[1], s.val[2], s.val[3]);
- break;
- }
- case ValueType::kAnyRef: {
- Handle<Object> ref = val.to_anyref();
- if (ref->IsNull()) {
- PrintF("ref:null");
- } else {
- PrintF("ref:0x%" V8PRIxPTR, ref->ptr());
- }
- break;
- }
- case ValueType::kStmt:
- PrintF("void");
- break;
- case ValueType::kFuncRef:
- case ValueType::kExnRef:
- case ValueType::kNullRef:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kEqRef:
- PrintF("(func|null|exn|opt|eq|)ref:unimplemented");
- break;
- case ValueType::kBottom:
- UNREACHABLE();
- break;
- }
- }
-#endif // DEBUG
- }
-
- ExternalCallResult TryHandleException(Isolate* isolate) {
- DCHECK(isolate->has_pending_exception()); // Assume exceptional return.
- if (HandleException(isolate) == WasmInterpreter::Thread::UNWOUND) {
- return {ExternalCallResult::EXTERNAL_UNWOUND};
- }
- return {ExternalCallResult::EXTERNAL_CAUGHT};
- }
-
- ExternalCallResult CallExternalWasmFunction(Isolate* isolate,
- Handle<Object> object_ref,
- const WasmCode* code,
- const FunctionSig* sig) {
- int num_args = static_cast<int>(sig->parameter_count());
- WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
-
- if (code->kind() == WasmCode::kWasmToJsWrapper &&
- !IsJSCompatibleSignature(sig, enabled_features)) {
- Drop(num_args); // Pop arguments before throwing.
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kWasmTrapTypeError));
- return TryHandleException(isolate);
- }
-
- Handle<WasmDebugInfo> debug_info(instance_object_->debug_info(), isolate);
- Handle<Code> wasm_entry = WasmDebugInfo::GetCWasmEntry(debug_info, sig);
-
- TRACE(" => Calling external wasm function\n");
-
- // Copy the arguments to one buffer.
- CWasmArgumentsPacker packer(CWasmArgumentsPacker::TotalSize(sig));
- sp_t base_index = StackHeight() - num_args;
- for (int i = 0; i < num_args; ++i) {
- WasmValue arg = GetStackValue(base_index + i);
- switch (sig->GetParam(i).kind()) {
- case ValueType::kI32:
- packer.Push(arg.to<uint32_t>());
- break;
- case ValueType::kI64:
- packer.Push(arg.to<uint64_t>());
- break;
- case ValueType::kF32:
- packer.Push(arg.to<float>());
- break;
- case ValueType::kF64:
- packer.Push(arg.to<double>());
- break;
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef:
- DCHECK_IMPLIES(sig->GetParam(i) == kWasmNullRef,
- arg.to_anyref()->IsNull());
- packer.Push(arg.to_anyref()->ptr());
- break;
- default:
- UNIMPLEMENTED();
- }
- }
-
- Address call_target = code->instruction_start();
- Execution::CallWasm(isolate, wasm_entry, call_target, object_ref,
- packer.argv());
- TRACE(" => External wasm function returned%s\n",
- isolate->has_pending_exception() ? " with exception" : "");
-
- // Pop arguments off the stack.
- Drop(num_args);
-
- if (isolate->has_pending_exception()) {
- return TryHandleException(isolate);
- }
-
- // Push return values.
- packer.Reset();
- for (size_t i = 0; i < sig->return_count(); i++) {
- switch (sig->GetReturn(i).kind()) {
- case ValueType::kI32:
- Push(WasmValue(packer.Pop<uint32_t>()));
- break;
- case ValueType::kI64:
- Push(WasmValue(packer.Pop<uint64_t>()));
- break;
- case ValueType::kF32:
- Push(WasmValue(packer.Pop<float>()));
- break;
- case ValueType::kF64:
- Push(WasmValue(packer.Pop<double>()));
- break;
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef: {
- Handle<Object> ref(Object(packer.Pop<Address>()), isolate);
- DCHECK_IMPLIES(sig->GetReturn(i) == kWasmNullRef, ref->IsNull());
- Push(WasmValue(ref));
- break;
- }
- default:
- UNIMPLEMENTED();
- }
- }
- return {ExternalCallResult::EXTERNAL_RETURNED};
- }
-
- static WasmCode* GetTargetCode(Isolate* isolate, Address target) {
- WasmCodeManager* code_manager = isolate->wasm_engine()->code_manager();
- NativeModule* native_module = code_manager->LookupNativeModule(target);
- WasmCode* code = native_module->Lookup(target);
- if (code->kind() == WasmCode::kJumpTable) {
- uint32_t func_index =
- native_module->GetFunctionIndexFromJumpTableSlot(target);
-
- if (!native_module->HasCode(func_index)) {
- bool success = CompileLazy(isolate, native_module, func_index);
- if (!success) {
- DCHECK(isolate->has_pending_exception());
- return nullptr;
- }
- }
-
- return native_module->GetCode(func_index);
- }
- DCHECK_EQ(code->instruction_start(), target);
- return code;
- }
-
- ExternalCallResult CallImportedFunction(uint32_t function_index) {
- DCHECK_GT(module()->num_imported_functions, function_index);
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
-
- ImportedFunctionEntry entry(instance_object_, function_index);
- Handle<Object> object_ref(entry.object_ref(), isolate_);
- WasmCode* code = GetTargetCode(isolate_, entry.target());
-
- // In case a function's body is invalid and the function is lazily validated
- // and compiled we may get an exception.
- if (code == nullptr) return TryHandleException(isolate_);
-
- const FunctionSig* sig = module()->functions[function_index].sig;
- return CallExternalWasmFunction(isolate_, object_ref, code, sig);
- }
-
- ExternalCallResult CallIndirectFunction(uint32_t table_index,
- uint32_t entry_index,
- uint32_t sig_index) {
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- uint32_t expected_sig_id = module()->signature_ids[sig_index];
- DCHECK_EQ(expected_sig_id,
- module()->signature_map.Find(*module()->signature(sig_index)));
- // Bounds check against table size.
- if (entry_index >=
- static_cast<uint32_t>(WasmInstanceObject::IndirectFunctionTableSize(
- isolate_, instance_object_, table_index))) {
- return {ExternalCallResult::INVALID_FUNC};
- }
-
- IndirectFunctionTableEntry entry(instance_object_, table_index,
- entry_index);
- // Signature check.
- if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
- return {ExternalCallResult::SIGNATURE_MISMATCH};
- }
-
- const FunctionSig* signature = module()->signature(sig_index);
- Handle<Object> object_ref = handle(entry.object_ref(), isolate_);
- WasmCode* code = GetTargetCode(isolate_, entry.target());
-
- // In case a function's body is invalid and the function is lazily validated
- // and compiled we may get an exception.
- if (code == nullptr) return TryHandleException(isolate_);
-
- if (!object_ref->IsWasmInstanceObject() || /* call to an import */
- !instance_object_.is_identical_to(object_ref) /* cross-instance */) {
- return CallExternalWasmFunction(isolate_, object_ref, code, signature);
- }
-
- DCHECK_EQ(WasmCode::kFunction, code->kind());
- return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
- }
-
- inline Activation current_activation() {
- return activations_.empty() ? Activation(0, 0) : activations_.back();
- }
-};
-
-class InterpretedFrameImpl {
- public:
- InterpretedFrameImpl(ThreadImpl* thread, int index)
- : thread_(thread), index_(index) {
- DCHECK_LE(0, index);
- }
-
- const WasmFunction* function() const { return frame()->code->function; }
-
- int pc() const {
- DCHECK_LE(0, frame()->pc);
- DCHECK_GE(kMaxInt, frame()->pc);
- return static_cast<int>(frame()->pc);
- }
-
- int GetParameterCount() const {
- DCHECK_GE(kMaxInt, function()->sig->parameter_count());
- return static_cast<int>(function()->sig->parameter_count());
- }
-
- int GetLocalCount() const {
- size_t num_locals = function()->sig->parameter_count() +
- frame()->code->locals.type_list.size();
- DCHECK_GE(kMaxInt, num_locals);
- return static_cast<int>(num_locals);
- }
-
- int GetStackHeight() const {
- bool is_top_frame =
- static_cast<size_t>(index_) + 1 == thread_->frames_.size();
- size_t stack_limit =
- is_top_frame ? thread_->StackHeight() : thread_->frames_[index_ + 1].sp;
- DCHECK_LE(frame()->sp, stack_limit);
- size_t frame_size = stack_limit - frame()->sp;
- DCHECK_LE(GetLocalCount(), frame_size);
- return static_cast<int>(frame_size) - GetLocalCount();
- }
-
- WasmValue GetLocalValue(int index) const {
- ThreadImpl::ReferenceStackScope stack_scope(thread_);
- DCHECK_LE(0, index);
- DCHECK_GT(GetLocalCount(), index);
- return thread_->GetStackValue(static_cast<int>(frame()->sp) + index);
- }
-
- WasmValue GetStackValue(int index) const {
- ThreadImpl::ReferenceStackScope stack_scope(thread_);
- DCHECK_LE(0, index);
- // Index must be within the number of stack values of this frame.
- DCHECK_GT(GetStackHeight(), index);
- return thread_->GetStackValue(static_cast<int>(frame()->sp) +
- GetLocalCount() + index);
- }
-
- private:
- ThreadImpl* thread_;
- int index_;
-
- ThreadImpl::Frame* frame() const {
- DCHECK_GT(thread_->frames_.size(), index_);
- return &thread_->frames_[index_];
- }
-};
-
-namespace {
-
-// Converters between WasmInterpreter::Thread and WasmInterpreter::ThreadImpl.
-// Thread* is the public interface, without knowledge of the object layout.
-// This cast is potentially risky, but as long as we always cast it back before
-// accessing any data, it should be fine. UBSan is not complaining.
-WasmInterpreter::Thread* ToThread(ThreadImpl* impl) {
- return reinterpret_cast<WasmInterpreter::Thread*>(impl);
-}
-ThreadImpl* ToImpl(WasmInterpreter::Thread* thread) {
- return reinterpret_cast<ThreadImpl*>(thread);
-}
-
-// Same conversion for InterpretedFrame and InterpretedFrameImpl.
-InterpretedFrame* ToFrame(InterpretedFrameImpl* impl) {
- return reinterpret_cast<InterpretedFrame*>(impl);
-}
-const InterpretedFrameImpl* ToImpl(const InterpretedFrame* frame) {
- return reinterpret_cast<const InterpretedFrameImpl*>(frame);
-}
-
-} // namespace
-
-//============================================================================
-// Implementation of the pimpl idiom for WasmInterpreter::Thread.
-// Instead of placing a pointer to the ThreadImpl inside of the Thread object,
-// we just reinterpret_cast them. ThreadImpls are only allocated inside this
-// translation unit anyway.
-//============================================================================
-WasmInterpreter::State WasmInterpreter::Thread::state() {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->state();
-}
-void WasmInterpreter::Thread::InitFrame(const WasmFunction* function,
- WasmValue* args) {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- impl->InitFrame(function, args);
-}
-WasmInterpreter::State WasmInterpreter::Thread::Run(int num_steps) {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->Run(num_steps);
-}
-void WasmInterpreter::Thread::Pause() { return ToImpl(this)->Pause(); }
-void WasmInterpreter::Thread::Reset() {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->Reset();
-}
-WasmInterpreter::Thread::ExceptionHandlingResult
-WasmInterpreter::Thread::RaiseException(Isolate* isolate,
- Handle<Object> exception) {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->RaiseException(isolate, exception);
-}
-pc_t WasmInterpreter::Thread::GetBreakpointPc() {
- return ToImpl(this)->GetBreakpointPc();
-}
-int WasmInterpreter::Thread::GetFrameCount() {
- return ToImpl(this)->GetFrameCount();
-}
-WasmInterpreter::FramePtr WasmInterpreter::Thread::GetFrame(int index) {
- DCHECK_LE(0, index);
- DCHECK_GT(GetFrameCount(), index);
- return FramePtr(ToFrame(new InterpretedFrameImpl(ToImpl(this), index)));
-}
-WasmValue WasmInterpreter::Thread::GetReturnValue(int index) {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->GetReturnValue(index);
-}
-TrapReason WasmInterpreter::Thread::GetTrapReason() {
- return ToImpl(this)->GetTrapReason();
-}
-bool WasmInterpreter::Thread::PossibleNondeterminism() {
- return ToImpl(this)->PossibleNondeterminism();
-}
-uint64_t WasmInterpreter::Thread::NumInterpretedCalls() {
- return ToImpl(this)->NumInterpretedCalls();
-}
-void WasmInterpreter::Thread::AddBreakFlags(uint8_t flags) {
- ToImpl(this)->AddBreakFlags(flags);
-}
-void WasmInterpreter::Thread::ClearBreakFlags() {
- ToImpl(this)->ClearBreakFlags();
-}
-uint32_t WasmInterpreter::Thread::NumActivations() {
- return ToImpl(this)->NumActivations();
-}
-uint32_t WasmInterpreter::Thread::StartActivation() {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->StartActivation();
-}
-void WasmInterpreter::Thread::FinishActivation(uint32_t id) {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- impl->FinishActivation(id);
-}
-uint32_t WasmInterpreter::Thread::ActivationFrameBase(uint32_t id) {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->ActivationFrameBase(id);
-}
-
-//============================================================================
-// The implementation details of the interpreter.
-//============================================================================
-class WasmInterpreterInternals {
- public:
- // Create a copy of the module bytes for the interpreter, since the passed
- // pointer might be invalidated after constructing the interpreter.
- const ZoneVector<uint8_t> module_bytes_;
- CodeMap codemap_;
- std::vector<ThreadImpl> threads_;
-
- WasmInterpreterInternals(Zone* zone, const WasmModule* module,
- const ModuleWireBytes& wire_bytes,
- Handle<WasmInstanceObject> instance_object)
- : module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
- codemap_(module, module_bytes_.data(), zone) {
- threads_.emplace_back(zone, &codemap_, instance_object);
- }
-};
-
-namespace {
-void NopFinalizer(const v8::WeakCallbackInfo<void>& data) {
- Address* global_handle_location =
- reinterpret_cast<Address*>(data.GetParameter());
- GlobalHandles::Destroy(global_handle_location);
-}
-
-Handle<WasmInstanceObject> MakeWeak(
- Isolate* isolate, Handle<WasmInstanceObject> instance_object) {
- Handle<WasmInstanceObject> weak_instance =
- isolate->global_handles()->Create<WasmInstanceObject>(*instance_object);
- Address* global_handle_location = weak_instance.location();
- GlobalHandles::MakeWeak(global_handle_location, global_handle_location,
- &NopFinalizer, v8::WeakCallbackType::kParameter);
- return weak_instance;
-}
-} // namespace
-
-//============================================================================
-// Implementation of the public interface of the interpreter.
-//============================================================================
-WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
- const ModuleWireBytes& wire_bytes,
- Handle<WasmInstanceObject> instance_object)
- : zone_(isolate->allocator(), ZONE_NAME),
- internals_(new WasmInterpreterInternals(
- &zone_, module, wire_bytes, MakeWeak(isolate, instance_object))) {}
-
-// The destructor is here so we can forward declare {WasmInterpreterInternals}
-// used in the {unique_ptr} in the header.
-WasmInterpreter::~WasmInterpreter() {}
-
-void WasmInterpreter::Run() { internals_->threads_[0].Run(); }
-
-void WasmInterpreter::Pause() { internals_->threads_[0].Pause(); }
-
-void WasmInterpreter::PrepareStepIn(const WasmFunction* function) {
- // Set a breakpoint at the start of function.
- InterpreterCode* code = internals_->codemap_.GetCode(function);
- pc_t pc = code->locals.encoded_size;
- SetBreakpoint(function, pc, true);
-}
-
-bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
- bool enabled) {
- InterpreterCode* code = internals_->codemap_.GetCode(function);
- size_t size = static_cast<size_t>(code->end - code->start);
- // Check bounds for {pc}.
- if (pc < code->locals.encoded_size || pc >= size) return false;
- // Make a copy of the code before enabling a breakpoint.
- if (enabled && code->orig_start == code->start) {
- code->start = reinterpret_cast<byte*>(zone_.New(size));
- memcpy(code->start, code->orig_start, size);
- code->end = code->start + size;
- }
- bool prev = code->start[pc] == kInternalBreakpoint;
- if (enabled) {
- code->start[pc] = kInternalBreakpoint;
- } else {
- code->start[pc] = code->orig_start[pc];
- }
- return prev;
-}
-
-bool WasmInterpreter::GetBreakpoint(const WasmFunction* function, pc_t pc) {
- InterpreterCode* code = internals_->codemap_.GetCode(function);
- size_t size = static_cast<size_t>(code->end - code->start);
- // Check bounds for {pc}.
- if (pc < code->locals.encoded_size || pc >= size) return false;
- // Check if a breakpoint is present at that place in the code.
- return code->start[pc] == kInternalBreakpoint;
-}
-
-bool WasmInterpreter::SetTracing(const WasmFunction* function, bool enabled) {
- UNIMPLEMENTED();
- return false;
-}
-
-int WasmInterpreter::GetThreadCount() {
- return 1; // only one thread for now.
-}
-
-WasmInterpreter::Thread* WasmInterpreter::GetThread(int id) {
- CHECK_EQ(0, id); // only one thread for now.
- return ToThread(&internals_->threads_[id]);
-}
-
-void WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
- internals_->codemap_.AddFunction(function, nullptr, nullptr);
-}
-
-void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
- const byte* start,
- const byte* end) {
- internals_->codemap_.SetFunctionCode(function, start, end);
-}
-
-ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
- Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
- // Create some dummy structures, to avoid special-casing the implementation
- // just for testing.
- FunctionSig sig(0, 0, nullptr);
- WasmFunction function{&sig, // sig
- 0, // func_index
- 0, // sig_index
- {0, 0}, // code
- false, // imported
- false, // exported
- false}; // declared
- InterpreterCode code{
- &function, BodyLocalDecls(zone), start, end, nullptr, nullptr, nullptr};
-
- // Now compute and return the control transfers.
- SideTable side_table(zone, module, &code);
- return side_table.map_;
-}
-
-//============================================================================
-// Implementation of the frame inspection interface.
-//============================================================================
-const WasmFunction* InterpretedFrame::function() const {
- return ToImpl(this)->function();
-}
-int InterpretedFrame::pc() const { return ToImpl(this)->pc(); }
-int InterpretedFrame::GetParameterCount() const {
- return ToImpl(this)->GetParameterCount();
-}
-int InterpretedFrame::GetLocalCount() const {
- return ToImpl(this)->GetLocalCount();
-}
-int InterpretedFrame::GetStackHeight() const {
- return ToImpl(this)->GetStackHeight();
-}
-WasmValue InterpretedFrame::GetLocalValue(int index) const {
- return ToImpl(this)->GetLocalValue(index);
-}
-WasmValue InterpretedFrame::GetStackValue(int index) const {
- return ToImpl(this)->GetStackValue(index);
-}
-void InterpretedFrameDeleter::operator()(InterpretedFrame* ptr) {
- delete ToImpl(ptr);
-}
-
-#undef TRACE
-#undef LANE
-#undef FOREACH_INTERNAL_OPCODE
-#undef FOREACH_SIMPLE_BINOP
-#undef FOREACH_OTHER_BINOP
-#undef FOREACH_I32CONV_FLOATOP
-#undef FOREACH_OTHER_UNOP
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-interpreter.h b/chromium/v8/src/wasm/wasm-interpreter.h
deleted file mode 100644
index 5a154be6982..00000000000
--- a/chromium/v8/src/wasm/wasm-interpreter.h
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_WASM_INTERPRETER_H_
-#define V8_WASM_WASM_INTERPRETER_H_
-
-#include <memory>
-
-#include "src/wasm/wasm-opcodes.h"
-#include "src/wasm/wasm-value.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-
-namespace internal {
-class WasmInstanceObject;
-
-namespace wasm {
-
-// Forward declarations.
-struct ModuleWireBytes;
-struct WasmFunction;
-struct WasmModule;
-class WasmInterpreterInternals;
-
-using pc_t = size_t;
-using sp_t = size_t;
-using pcdiff_t = int32_t;
-using spdiff_t = uint32_t;
-
-constexpr pc_t kInvalidPc = 0x80000000;
-
-struct ControlTransferEntry {
- // Distance from the instruction to the label to jump to (forward, but can be
- // negative).
- pcdiff_t pc_diff;
- // Delta by which to decrease the stack height.
- spdiff_t sp_diff;
- // Arity of the block we jump to.
- uint32_t target_arity;
-};
-
-using ControlTransferMap = ZoneMap<pc_t, ControlTransferEntry>;
-
-// Representation of frames within the interpreter.
-//
-// Layout of a frame:
-// -----------------
-// stack slot #N ‾\.
-// ... | stack entries: GetStackHeight(); GetStackValue()
-// stack slot #0 _/·
-// local #L ‾\.
-// ... | locals: GetLocalCount(); GetLocalValue()
-// local #P+1 |
-// param #P | ‾\.
-// ... | | parameters: GetParameterCount(); GetLocalValue()
-// param #0 _/· _/·
-// -----------------
-//
-class V8_EXPORT_PRIVATE InterpretedFrame {
- public:
- const WasmFunction* function() const;
- int pc() const;
-
- int GetParameterCount() const;
- int GetLocalCount() const;
- int GetStackHeight() const;
- WasmValue GetLocalValue(int index) const;
- WasmValue GetStackValue(int index) const;
-
- private:
- friend class WasmInterpreter;
- // Don't instante InterpretedFrames; they will be allocated as
- // InterpretedFrameImpl in the interpreter implementation.
- InterpretedFrame() = delete;
- DISALLOW_COPY_AND_ASSIGN(InterpretedFrame);
-};
-
-// Deleter struct to delete the underlying InterpretedFrameImpl without
-// violating language specifications.
-struct V8_EXPORT_PRIVATE InterpretedFrameDeleter {
- void operator()(InterpretedFrame* ptr);
-};
-
-// An interpreter capable of executing WebAssembly.
-class V8_EXPORT_PRIVATE WasmInterpreter {
- public:
- // State machine for a Thread:
- // +----------------------------------------------------------+
- // | +--------Run()/Step()---------+ |
- // V V | |
- // STOPPED ---Run()--> RUNNING ------Pause()-----+-> PAUSED <--+
- // ^ | | | | / |
- // +--- Exception ---+ | | +--- Breakpoint ---+ RaiseException() <--+
- // | | |
- // | +---------- Trap --------------> TRAPPED --------+
- // +----------- Finish -------------> FINISHED
- enum State { STOPPED, RUNNING, PAUSED, FINISHED, TRAPPED };
-
- // Tells a thread to pause after certain instructions.
- enum BreakFlag : uint8_t {
- None = 0,
- AfterReturn = 1 << 0,
- AfterCall = 1 << 1
- };
-
- using FramePtr = std::unique_ptr<InterpretedFrame, InterpretedFrameDeleter>;
-
- // Representation of a thread in the interpreter.
- class V8_EXPORT_PRIVATE Thread {
- // Don't instante Threads; they will be allocated as ThreadImpl in the
- // interpreter implementation.
- Thread() = delete;
-
- public:
- enum ExceptionHandlingResult { HANDLED, UNWOUND };
-
- // Execution control.
- State state();
- void InitFrame(const WasmFunction* function, WasmValue* args);
- // Pass -1 as num_steps to run till completion, pause or breakpoint.
- State Run(int num_steps = -1);
- State Step() { return Run(1); }
- void Pause();
- void Reset();
-
- // Raise an exception in the current activation and unwind the stack
- // accordingly. Return whether the exception was handled inside wasm:
- // - HANDLED: Activation at handler position and in {PAUSED} state.
- // - UNWOUND: Frames unwound, exception pending, and in {STOPPED} state.
- ExceptionHandlingResult RaiseException(Isolate*, Handle<Object> exception);
-
- // Stack inspection and modification.
- pc_t GetBreakpointPc();
- int GetFrameCount();
- // The InterpretedFrame is only valid as long as the Thread is paused.
- FramePtr GetFrame(int index);
- WasmValue GetReturnValue(int index = 0);
- TrapReason GetTrapReason();
-
- // Returns true if the thread executed an instruction which may produce
- // nondeterministic results, e.g. float div, float sqrt, and float mul,
- // where the sign bit of a NaN is nondeterministic.
- bool PossibleNondeterminism();
-
- // Returns the number of calls / function frames executed on this thread.
- uint64_t NumInterpretedCalls();
-
- // Thread-specific breakpoints.
- // TODO(wasm): Implement this once we support multiple threads.
- // bool SetBreakpoint(const WasmFunction* function, int pc, bool enabled);
- // bool GetBreakpoint(const WasmFunction* function, int pc);
-
- void AddBreakFlags(uint8_t flags);
- void ClearBreakFlags();
-
- // Each thread can have multiple activations, each represented by a portion
- // of the stack frames of this thread. StartActivation returns the id
- // (counting from 0 up) of the started activation.
- // Activations must be properly stacked, i.e. if FinishActivation is called,
- // the given id must the the latest activation on the stack.
- uint32_t NumActivations();
- uint32_t StartActivation();
- void FinishActivation(uint32_t activation_id);
- // Return the frame base of the given activation, i.e. the number of frames
- // when this activation was started.
- uint32_t ActivationFrameBase(uint32_t activation_id);
- };
-
- WasmInterpreter(Isolate* isolate, const WasmModule* module,
- const ModuleWireBytes& wire_bytes,
- Handle<WasmInstanceObject> instance);
-
- ~WasmInterpreter();
-
- //==========================================================================
- // Execution controls.
- //==========================================================================
- void Run();
- void Pause();
-
- // Prepare {function} for stepping in from Javascript.
- void PrepareStepIn(const WasmFunction* function);
-
- // Set a breakpoint at {pc} in {function} to be {enabled}. Returns the
- // previous state of the breakpoint at {pc}.
- bool SetBreakpoint(const WasmFunction* function, pc_t pc, bool enabled);
-
- // Gets the current state of the breakpoint at {function}.
- bool GetBreakpoint(const WasmFunction* function, pc_t pc);
-
- // Enable or disable tracing for {function}. Return the previous state.
- bool SetTracing(const WasmFunction* function, bool enabled);
-
- //==========================================================================
- // Thread iteration and inspection.
- //==========================================================================
- int GetThreadCount();
- Thread* GetThread(int id);
-
- //==========================================================================
- // Testing functionality.
- //==========================================================================
- // Manually adds a function to this interpreter. The func_index of the
- // function must match the current number of functions.
- void AddFunctionForTesting(const WasmFunction* function);
- // Manually adds code to the interpreter for the given function.
- void SetFunctionCodeForTesting(const WasmFunction* function,
- const byte* start, const byte* end);
-
- // Computes the control transfers for the given bytecode. Used internally in
- // the interpreter, but exposed for testing.
- static ControlTransferMap ComputeControlTransfersForTesting(
- Zone* zone, const WasmModule* module, const byte* start, const byte* end);
-
- private:
- Zone zone_;
- std::unique_ptr<WasmInterpreterInternals> internals_;
-
- DISALLOW_COPY_AND_ASSIGN(WasmInterpreter);
-};
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_WASM_INTERPRETER_H_
diff --git a/chromium/v8/src/wasm/wasm-js.cc b/chromium/v8/src/wasm/wasm-js.cc
index 64719fb59a3..25109bd3969 100644
--- a/chromium/v8/src/wasm/wasm-js.cc
+++ b/chromium/v8/src/wasm/wasm-js.cc
@@ -93,37 +93,48 @@ class WasmStreaming::WasmStreamingImpl {
};
WasmStreaming::WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl)
- : impl_(std::move(impl)) {}
+ : impl_(std::move(impl)) {
+ TRACE_EVENT0("v8.wasm", "wasm.InitializeStreaming");
+}
// The destructor is defined here because we have a unique_ptr with forward
// declaration.
WasmStreaming::~WasmStreaming() = default;
void WasmStreaming::OnBytesReceived(const uint8_t* bytes, size_t size) {
+ TRACE_EVENT1("v8.wasm", "wasm.OnBytesReceived", "num_bytes", size);
impl_->OnBytesReceived(bytes, size);
}
-void WasmStreaming::Finish() { impl_->Finish(); }
+void WasmStreaming::Finish() {
+ TRACE_EVENT0("v8.wasm", "wasm.FinishStreaming");
+ impl_->Finish();
+}
void WasmStreaming::Abort(MaybeLocal<Value> exception) {
+ TRACE_EVENT0("v8.wasm", "wasm.AbortStreaming");
impl_->Abort(exception);
}
bool WasmStreaming::SetCompiledModuleBytes(const uint8_t* bytes, size_t size) {
+ TRACE_EVENT0("v8.wasm", "wasm.SetCompiledModuleBytes");
return impl_->SetCompiledModuleBytes(bytes, size);
}
void WasmStreaming::SetClient(std::shared_ptr<Client> client) {
+ TRACE_EVENT0("v8.wasm", "wasm.WasmStreaming.SetClient");
impl_->SetClient(client);
}
void WasmStreaming::SetUrl(const char* url, size_t length) {
+ TRACE_EVENT0("v8.wasm", "wasm.SetUrl");
impl_->SetUrl(internal::VectorOf(url, length));
}
// static
std::shared_ptr<WasmStreaming> WasmStreaming::Unpack(Isolate* isolate,
Local<Value> value) {
+ TRACE_EVENT0("v8.wasm", "wasm.WasmStreaming.Unpack");
i::HandleScope scope(reinterpret_cast<i::Isolate*>(isolate));
auto managed =
i::Handle<i::Managed<WasmStreaming>>::cast(Utils::OpenHandle(*value));
@@ -1066,16 +1077,15 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::String> string;
if (!value->ToString(context).ToLocal(&string)) return;
auto enabled_features = i::wasm::WasmFeatures::FromFlags();
+ // The JS api uses 'anyfunc' instead of 'funcref'.
if (string->StringEquals(v8_str(isolate, "anyfunc"))) {
type = i::wasm::kWasmFuncRef;
- } else if (enabled_features.has_anyref() &&
- string->StringEquals(v8_str(isolate, "anyref"))) {
- type = i::wasm::kWasmAnyRef;
- } else if (enabled_features.has_anyref() &&
- string->StringEquals(v8_str(isolate, "nullref"))) {
- type = i::wasm::kWasmNullRef;
+ } else if (enabled_features.has_reftypes() &&
+ string->StringEquals(v8_str(isolate, "externref"))) {
+ type = i::wasm::kWasmExternRef;
} else {
- thrower.TypeError("Descriptor property 'element' must be 'anyfunc'");
+ thrower.TypeError(
+ "Descriptor property 'element' must be a WebAssembly reference type");
return;
}
}
@@ -1198,15 +1208,13 @@ bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
*type = i::wasm::kWasmI64;
} else if (string->StringEquals(v8_str(isolate, "f64"))) {
*type = i::wasm::kWasmF64;
- } else if (enabled_features.has_anyref() &&
- string->StringEquals(v8_str(isolate, "anyref"))) {
- *type = i::wasm::kWasmAnyRef;
- } else if (enabled_features.has_anyref() &&
+ } else if (enabled_features.has_reftypes() &&
+ string->StringEquals(v8_str(isolate, "externref"))) {
+ *type = i::wasm::kWasmExternRef;
+ // The JS api spec uses 'anyfunc' instead of 'funcref'.
+ } else if (enabled_features.has_reftypes() &&
string->StringEquals(v8_str(isolate, "anyfunc"))) {
*type = i::wasm::kWasmFuncRef;
- } else if (enabled_features.has_anyref() &&
- string->StringEquals(v8_str(isolate, "nullref"))) {
- *type = i::wasm::kWasmNullRef;
} else if (enabled_features.has_eh() &&
string->StringEquals(v8_str(isolate, "exnref"))) {
*type = i::wasm::kWasmExnRef;
@@ -1259,8 +1267,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (!GetValueType(isolate, maybe, context, &type, enabled_features)) return;
if (type == i::wasm::kWasmStmt) {
thrower.TypeError(
- "Descriptor property 'value' must be 'i32', 'i64', 'f32', or "
- "'f64'");
+ "Descriptor property 'value' must be a WebAssembly type");
return;
}
}
@@ -1327,48 +1334,48 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetF64(f64_value);
break;
}
- case i::wasm::ValueType::kAnyRef:
- case i::wasm::ValueType::kExnRef: {
- if (args.Length() < 2) {
- // When no initial value is provided, we have to use the WebAssembly
- // default value 'null', and not the JS default value 'undefined'.
- global_obj->SetAnyRef(i_isolate->factory()->null_value());
- break;
- }
- global_obj->SetAnyRef(Utils::OpenHandle(*value));
- break;
- }
- case i::wasm::ValueType::kNullRef:
- if (args.Length() < 2) {
- // When no initial value is provided, we have to use the WebAssembly
- // default value 'null', and not the JS default value 'undefined'.
- global_obj->SetNullRef(i_isolate->factory()->null_value());
- break;
- }
- if (!global_obj->SetNullRef(Utils::OpenHandle(*value))) {
- thrower.TypeError("The value of nullref globals must be null");
- }
- break;
- case i::wasm::ValueType::kFuncRef: {
- if (args.Length() < 2) {
- // When no initial value is provided, we have to use the WebAssembly
- // default value 'null', and not the JS default value 'undefined'.
- global_obj->SetFuncRef(i_isolate, i_isolate->factory()->null_value());
- break;
- }
-
- if (!global_obj->SetFuncRef(i_isolate, Utils::OpenHandle(*value))) {
- thrower.TypeError(
- "The value of anyfunc globals must be null or an "
- "exported function");
+ case i::wasm::ValueType::kRef:
+ case i::wasm::ValueType::kOptRef: {
+ switch (type.heap_type()) {
+ case i::wasm::kHeapExtern:
+ case i::wasm::kHeapExn: {
+ if (args.Length() < 2) {
+ // When no initial value is provided, we have to use the WebAssembly
+ // default value 'null', and not the JS default value 'undefined'.
+ global_obj->SetExternRef(i_isolate->factory()->null_value());
+ break;
+ }
+ global_obj->SetExternRef(Utils::OpenHandle(*value));
+ break;
+ }
+ case i::wasm::kHeapFunc: {
+ if (args.Length() < 2) {
+ // When no initial value is provided, we have to use the WebAssembly
+ // default value 'null', and not the JS default value 'undefined'.
+ global_obj->SetFuncRef(i_isolate,
+ i_isolate->factory()->null_value());
+ break;
+ }
+
+ if (!global_obj->SetFuncRef(i_isolate, Utils::OpenHandle(*value))) {
+ thrower.TypeError(
+ "The value of funcref globals must be null or an "
+ "exported function");
+ }
+ break;
+ }
+ case i::wasm::kHeapEq:
+ default:
+ // TODO(7748): Implement these.
+ UNIMPLEMENTED();
}
break;
}
- case i::wasm::ValueType::kRef:
- case i::wasm::ValueType::kOptRef:
- case i::wasm::ValueType::kEqRef:
- // TODO(7748): Implement these.
+ case i::wasm::ValueType::kRtt:
+ // TODO(7748): Implement.
UNIMPLEMENTED();
+ case i::wasm::ValueType::kI8:
+ case i::wasm::ValueType::kI16:
case i::wasm::ValueType::kStmt:
case i::wasm::ValueType::kS128:
case i::wasm::ValueType::kBottom:
@@ -1589,7 +1596,7 @@ void WebAssemblyTableGetLength(
v8::Number::New(isolate, receiver->current_length()));
}
-// WebAssembly.Table.grow(num) -> num
+// WebAssembly.Table.grow(num, init_value = null) -> num
void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -1603,8 +1610,20 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- int old_size = i::WasmTableObject::Grow(i_isolate, receiver, grow_by,
- i_isolate->factory()->null_value());
+ i::Handle<i::Object> init_value = i_isolate->factory()->null_value();
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
+ if (enabled_features.has_typed_funcref()) {
+ if (args.Length() >= 2 && !args[1]->IsUndefined()) {
+ init_value = Utils::OpenHandle(*args[1]);
+ }
+ if (!i::WasmTableObject::IsValidElement(i_isolate, receiver, init_value)) {
+ thrower.TypeError("Argument 1 must be a valid type for the table");
+ return;
+ }
+ }
+
+ int old_size =
+ i::WasmTableObject::Grow(i_isolate, receiver, grow_by, init_value);
if (old_size < 0) {
thrower.RangeError("failed to grow table by %u", grow_by);
@@ -1809,22 +1828,31 @@ void WebAssemblyGlobalGetValueCommon(
case i::wasm::ValueType::kF64:
return_value.Set(receiver->GetF64());
break;
- case i::wasm::ValueType::kAnyRef:
- case i::wasm::ValueType::kFuncRef:
- case i::wasm::ValueType::kNullRef:
- case i::wasm::ValueType::kExnRef:
- DCHECK_IMPLIES(receiver->type() == i::wasm::kWasmNullRef,
- receiver->GetRef()->IsNull());
- return_value.Set(Utils::ToLocal(receiver->GetRef()));
+ case i::wasm::ValueType::kS128:
+ thrower.TypeError("Can't get the value of s128 WebAssembly.Global");
break;
case i::wasm::ValueType::kRef:
case i::wasm::ValueType::kOptRef:
- case i::wasm::ValueType::kEqRef:
- // TODO(7748): Implement these.
- UNIMPLEMENTED();
+ switch (receiver->type().heap_type()) {
+ case i::wasm::kHeapExtern:
+ case i::wasm::kHeapFunc:
+ case i::wasm::kHeapExn:
+ return_value.Set(Utils::ToLocal(receiver->GetRef()));
+ break;
+ case i::wasm::kHeapEq:
+ default:
+ // TODO(7748): Implement these.
+ UNIMPLEMENTED();
+ break;
+ }
+ break;
+ case i::wasm::ValueType::kRtt:
+ UNIMPLEMENTED(); // TODO(7748): Implement.
+ break;
+ case i::wasm::ValueType::kI8:
+ case i::wasm::ValueType::kI16:
case i::wasm::ValueType::kBottom:
case i::wasm::ValueType::kStmt:
- case i::wasm::ValueType::kS128:
UNREACHABLE();
}
}
@@ -1889,32 +1917,40 @@ void WebAssemblyGlobalSetValue(
receiver->SetF64(f64_value);
break;
}
- case i::wasm::ValueType::kAnyRef:
- case i::wasm::ValueType::kExnRef: {
- receiver->SetAnyRef(Utils::OpenHandle(*args[0]));
- break;
- }
- case i::wasm::ValueType::kNullRef:
- if (!receiver->SetNullRef(Utils::OpenHandle(*args[0]))) {
- thrower.TypeError("The value of nullref must be null");
- }
- break;
- case i::wasm::ValueType::kFuncRef: {
- if (!receiver->SetFuncRef(i_isolate, Utils::OpenHandle(*args[0]))) {
- thrower.TypeError(
- "value of an anyfunc reference must be either null or an "
- "exported function");
- }
+ case i::wasm::ValueType::kS128:
+ thrower.TypeError("Can't set the value of s128 WebAssembly.Global");
break;
- }
case i::wasm::ValueType::kRef:
case i::wasm::ValueType::kOptRef:
- case i::wasm::ValueType::kEqRef:
- // TODO(7748): Implement these.
+ switch (receiver->type().heap_type()) {
+ case i::wasm::kHeapExtern:
+ case i::wasm::kHeapExn:
+ receiver->SetExternRef(Utils::OpenHandle(*args[0]));
+ break;
+ case i::wasm::kHeapFunc: {
+ if (!receiver->SetFuncRef(i_isolate, Utils::OpenHandle(*args[0]))) {
+ thrower.TypeError(
+ "value of an funcref reference must be either null or an "
+ "exported function");
+ }
+ break;
+ }
+
+ case i::wasm::kHeapEq:
+ default:
+ // TODO(7748): Implement these.
+ UNIMPLEMENTED();
+ break;
+ }
+ break;
+ case i::wasm::ValueType::kRtt:
+ // TODO(7748): Implement.
UNIMPLEMENTED();
+ break;
+ case i::wasm::ValueType::kI8:
+ case i::wasm::ValueType::kI16:
case i::wasm::ValueType::kBottom:
case i::wasm::ValueType::kStmt:
- case i::wasm::ValueType::kS128:
UNREACHABLE();
}
}
diff --git a/chromium/v8/src/wasm/wasm-module-builder.cc b/chromium/v8/src/wasm/wasm-module-builder.cc
index bcfc49dcbaa..8ea63ef4b8e 100644
--- a/chromium/v8/src/wasm/wasm-module-builder.cc
+++ b/chromium/v8/src/wasm/wasm-module-builder.cc
@@ -414,8 +414,11 @@ void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
namespace {
void WriteValueType(ZoneBuffer* buffer, const ValueType& type) {
buffer->write_u8(type.value_type_code());
- if (type.has_immediate()) {
- buffer->write_u32v(type.ref_index());
+ if (type.has_depth()) {
+ buffer->write_u32v(type.depth());
+ }
+ if (type.encoding_needs_heap_type()) {
+ buffer->write_u32v(type.heap_type_code());
}
}
@@ -450,8 +453,9 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
StructType* struct_type = type.struct_type;
buffer->write_u8(kWasmStructTypeCode);
buffer->write_size(struct_type->field_count());
- for (auto field : struct_type->fields()) {
- WriteValueType(buffer, field);
+ for (uint32_t i = 0; i < struct_type->field_count(); i++) {
+ WriteValueType(buffer, struct_type->field(i));
+ buffer->write_u8(struct_type->mutability(i) ? 1 : 0);
}
break;
}
@@ -459,6 +463,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
ArrayType* array_type = type.array_type;
buffer->write_u8(kWasmArrayTypeCode);
WriteValueType(buffer, array_type->element_type());
+ buffer->write_u8(array_type->mutability() ? 1 : 0);
break;
}
}
@@ -564,6 +569,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
break;
case WasmInitExpr::kRefNullConst:
buffer->write_u8(kExprRefNull);
+ WriteValueType(buffer, global.type);
break;
case WasmInitExpr::kRefFuncConst:
UNIMPLEMENTED();
@@ -590,12 +596,15 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_f64(0.);
break;
case ValueType::kOptRef:
- case ValueType::kFuncRef:
- case ValueType::kExnRef:
- case ValueType::kEqRef:
buffer->write_u8(kExprRefNull);
break;
- default:
+ case ValueType::kI8:
+ case ValueType::kI16:
+ case ValueType::kStmt:
+ case ValueType::kS128:
+ case ValueType::kBottom:
+ case ValueType::kRef:
+ case ValueType::kRtt:
UNREACHABLE();
}
}
diff --git a/chromium/v8/src/wasm/wasm-module.cc b/chromium/v8/src/wasm/wasm-module.cc
index 5111a783728..405586107a2 100644
--- a/chromium/v8/src/wasm/wasm-module.cc
+++ b/chromium/v8/src/wasm/wasm-module.cc
@@ -50,8 +50,11 @@ LazilyGeneratedNames::LookupNameFromImportsAndExports(
Vector<const WasmImport> import_table,
Vector<const WasmExport> export_table) const {
base::MutexGuard lock(&mutex_);
- DCHECK(kind == kExternalGlobal || kind == kExternalMemory);
- auto& names = kind == kExternalGlobal ? global_names_ : memory_names_;
+ DCHECK(kind == kExternalGlobal || kind == kExternalMemory ||
+ kind == kExternalTable);
+ auto& names = kind == kExternalGlobal
+ ? global_names_
+ : kind == kExternalMemory ? memory_names_ : table_names_;
if (!names) {
names.reset(
new std::unordered_map<uint32_t,
@@ -215,7 +218,16 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
}
WasmModule::WasmModule(std::unique_ptr<Zone> signature_zone)
- : signature_zone(std::move(signature_zone)) {}
+ : signature_zone(std::move(signature_zone)),
+ subtyping_cache(this->signature_zone.get() == nullptr
+ ? nullptr
+ : new ZoneUnorderedSet<std::pair<uint32_t, uint32_t>>(
+ this->signature_zone.get())),
+ type_equivalence_cache(
+ this->signature_zone.get() == nullptr
+ ? nullptr
+ : new ZoneUnorderedSet<std::pair<uint32_t, uint32_t>>(
+ this->signature_zone.get())) {}
bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
// TODO(wasm): Once wasm has its own CSP policy, we should introduce a
@@ -239,50 +251,10 @@ namespace {
// Converts the given {type} into a string representation that can be used in
// reflective functions. Should be kept in sync with the {GetValueType} helper.
Handle<String> ToValueTypeString(Isolate* isolate, ValueType type) {
- // TODO(ahaas/jkummerow): This could be as simple as:
- // return isolate->factory()->InternalizeUtf8String(type.type_name());
- // if we clean up all occurrences of "anyfunc" in favor of "funcref".
- Factory* factory = isolate->factory();
- Handle<String> string;
- switch (type.kind()) {
- case i::wasm::ValueType::kI32: {
- string = factory->InternalizeUtf8String("i32");
- break;
- }
- case i::wasm::ValueType::kI64: {
- string = factory->InternalizeUtf8String("i64");
- break;
- }
- case i::wasm::ValueType::kF32: {
- string = factory->InternalizeUtf8String("f32");
- break;
- }
- case i::wasm::ValueType::kF64: {
- string = factory->InternalizeUtf8String("f64");
- break;
- }
- case i::wasm::ValueType::kAnyRef: {
- string = factory->InternalizeUtf8String("anyref");
- break;
- }
- case i::wasm::ValueType::kFuncRef: {
- string = factory->InternalizeUtf8String("anyfunc");
- break;
- }
- case i::wasm::ValueType::kNullRef: {
- string = factory->InternalizeUtf8String("nullref");
- break;
- }
- case i::wasm::ValueType::kExnRef: {
- string = factory->InternalizeUtf8String("exnref");
- break;
- }
- default:
- UNREACHABLE();
- }
- return string;
+ return isolate->factory()->InternalizeUtf8String(
+ type == kWasmFuncRef ? CStrVector("anyfunc")
+ : VectorOf(type.type_name()));
}
-
} // namespace
Handle<JSObject> GetTypeForFunction(Isolate* isolate, const FunctionSig* sig) {
@@ -357,13 +329,14 @@ Handle<JSObject> GetTypeForTable(Isolate* isolate, ValueType type,
Factory* factory = isolate->factory();
Handle<String> element;
- if (type == kWasmFuncRef) {
- // TODO(wasm): We should define the "anyfunc" string in one central place
- // and then use that constant everywhere.
+ if (type.is_reference_to(kHeapFunc)) {
+ // TODO(wasm): We should define the "anyfunc" string in one central
+ // place and then use that constant everywhere.
element = factory->InternalizeUtf8String("anyfunc");
} else {
- DCHECK(WasmFeatures::FromFlags().has_anyref() && type == kWasmAnyRef);
- element = factory->InternalizeUtf8String("anyref");
+ DCHECK(WasmFeatures::FromFlags().has_reftypes() &&
+ type.is_reference_to(kHeapExtern));
+ element = factory->InternalizeUtf8String("externref");
}
Handle<JSFunction> object_function = isolate->object_function();
@@ -458,9 +431,8 @@ Handle<JSArray> GetImports(Isolate* isolate,
case kExternalException:
import_kind = exception_string;
break;
- default:
- UNREACHABLE();
}
+ DCHECK(!import_kind->is_null());
Handle<String> import_module =
WasmModuleObject::ExtractUtf8StringFromModuleBytes(
diff --git a/chromium/v8/src/wasm/wasm-module.h b/chromium/v8/src/wasm/wasm-module.h
index a189964ad73..f0f8db890b4 100644
--- a/chromium/v8/src/wasm/wasm-module.h
+++ b/chromium/v8/src/wasm/wasm-module.h
@@ -15,6 +15,7 @@
#include "src/wasm/struct-types.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-opcodes.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
@@ -117,7 +118,7 @@ struct WasmElemSegment {
// Construct an active segment.
WasmElemSegment(uint32_t table_index, WasmInitExpr offset)
- : type(kWasmFuncRef),
+ : type(ValueType::Ref(kHeapFunc, kNullable)),
table_index(table_index),
offset(offset),
status(kStatusActive) {}
@@ -125,7 +126,7 @@ struct WasmElemSegment {
// Construct a passive or declarative segment, which has no table index or
// offset.
explicit WasmElemSegment(bool declarative)
- : type(kWasmFuncRef),
+ : type(ValueType::Ref(kHeapFunc, kNullable)),
table_index(0),
status(declarative ? kStatusDeclarative : kStatusPassive) {}
@@ -206,7 +207,7 @@ class V8_EXPORT_PRIVATE LazilyGeneratedNames {
void AddForTesting(int function_index, WireBytesRef name);
private:
- // {function_names_}, {global_names_} and {memory_names_} are
+ // {function_names_}, {global_names_}, {memory_names_} and {table_names_} are
// populated lazily after decoding, and therefore need a mutex to protect
// concurrent modifications from multiple {WasmModuleObject}.
mutable base::Mutex mutex_;
@@ -218,6 +219,9 @@ class V8_EXPORT_PRIVATE LazilyGeneratedNames {
mutable std::unique_ptr<
std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>>
memory_names_;
+ mutable std::unique_ptr<
+ std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>>
+ table_names_;
};
class V8_EXPORT_PRIVATE AsmJsOffsetInformation {
@@ -327,6 +331,28 @@ struct V8_EXPORT_PRIVATE WasmModule {
bool has_array(uint32_t index) const {
return index < types.size() && type_kinds[index] == kWasmArrayTypeCode;
}
+ bool is_cached_subtype(uint32_t subtype, uint32_t supertype) const {
+ return subtyping_cache->count(std::make_pair(subtype, supertype)) == 1;
+ }
+ void cache_subtype(uint32_t subtype, uint32_t supertype) const {
+ subtyping_cache->emplace(subtype, supertype);
+ }
+ void uncache_subtype(uint32_t subtype, uint32_t supertype) const {
+ subtyping_cache->erase(std::make_pair(subtype, supertype));
+ }
+ bool is_cached_equivalent_type(uint32_t type1, uint32_t type2) const {
+ if (type1 > type2) std::swap(type1, type2);
+ return type_equivalence_cache->count(std::make_pair(type1, type2)) == 1;
+ }
+ void cache_type_equivalence(uint32_t type1, uint32_t type2) const {
+ if (type1 > type2) std::swap(type1, type2);
+ type_equivalence_cache->emplace(type1, type2);
+ }
+ void uncache_type_equivalence(uint32_t type1, uint32_t type2) const {
+ if (type1 > type2) std::swap(type1, type2);
+ type_equivalence_cache->erase(std::make_pair(type1, type2));
+ }
+
std::vector<WasmFunction> functions;
std::vector<WasmDataSegment> data_segments;
std::vector<WasmTable> tables;
@@ -347,6 +373,15 @@ struct V8_EXPORT_PRIVATE WasmModule {
explicit WasmModule(std::unique_ptr<Zone> signature_zone = nullptr);
+ private:
+ // Cache for discovered subtyping pairs.
+ std::unique_ptr<ZoneUnorderedSet<std::pair<uint32_t, uint32_t>>>
+ subtyping_cache;
+ // Cache for discovered equivalent type pairs.
+ // Indexes are stored in increasing order.
+ std::unique_ptr<ZoneUnorderedSet<std::pair<uint32_t, uint32_t>>>
+ type_equivalence_cache;
+
DISALLOW_COPY_AND_ASSIGN(WasmModule);
};
diff --git a/chromium/v8/src/wasm/wasm-objects-inl.h b/chromium/v8/src/wasm/wasm-objects-inl.h
index 93234493445..d832be25b83 100644
--- a/chromium/v8/src/wasm/wasm-objects-inl.h
+++ b/chromium/v8/src/wasm/wasm-objects-inl.h
@@ -30,7 +30,6 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(WasmExceptionObject, JSObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExceptionTag)
OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData, Struct)
-OBJECT_CONSTRUCTORS_IMPL(WasmDebugInfo, Struct)
OBJECT_CONSTRUCTORS_IMPL(WasmGlobalObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmInstanceObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmMemoryObject, JSObject)
@@ -40,9 +39,6 @@ OBJECT_CONSTRUCTORS_IMPL(AsmWasmData, Struct)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmStruct)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmArray)
-NEVER_READ_ONLY_SPACE_IMPL(WasmDebugInfo)
-
-CAST_ACCESSOR(WasmDebugInfo)
CAST_ACCESSOR(WasmExceptionObject)
CAST_ACCESSOR(WasmExportedFunctionData)
CAST_ACCESSOR(WasmGlobalObject)
@@ -126,20 +122,22 @@ ACCESSORS(WasmGlobalObject, untagged_buffer, JSArrayBuffer,
kUntaggedBufferOffset)
ACCESSORS(WasmGlobalObject, tagged_buffer, FixedArray, kTaggedBufferOffset)
SMI_ACCESSORS(WasmGlobalObject, offset, kOffsetOffset)
-SMI_ACCESSORS(WasmGlobalObject, flags, kFlagsOffset)
+// TODO(7748): This will not suffice to hold the 32-bit encoding of a ValueType.
+// We need to devise and encoding that does, and also encodes is_mutable.
+SMI_ACCESSORS(WasmGlobalObject, raw_type, kRawTypeOffset)
+SMI_ACCESSORS(WasmGlobalObject, is_mutable, kIsMutableOffset)
+
wasm::ValueType WasmGlobalObject::type() const {
- return wasm::ValueType(TypeBits::decode(flags()));
+ return wasm::ValueType::FromRawBitField(raw_type());
}
void WasmGlobalObject::set_type(wasm::ValueType value) {
- set_flags(TypeBits::update(flags(), value.kind()));
+ set_raw_type(static_cast<int>(value.raw_bit_field()));
}
-BIT_FIELD_ACCESSORS(WasmGlobalObject, flags, is_mutable,
- WasmGlobalObject::IsMutableBit)
int WasmGlobalObject::type_size() const { return type().element_size_bytes(); }
Address WasmGlobalObject::address() const {
- DCHECK_NE(type(), wasm::kWasmAnyRef);
+ DCHECK_NE(type(), wasm::kWasmExternRef);
DCHECK_LE(offset() + type_size(), untagged_buffer().byte_length());
return Address(untagged_buffer().backing_store()) + offset();
}
@@ -161,8 +159,8 @@ double WasmGlobalObject::GetF64() {
}
Handle<Object> WasmGlobalObject::GetRef() {
- // We use this getter for anyref, funcref, and exnref.
- DCHECK(type().IsReferenceType());
+ // We use this getter for externref, funcref, and exnref.
+ DCHECK(type().is_reference_type());
return handle(tagged_buffer().get(offset()), GetIsolate());
}
@@ -182,21 +180,13 @@ void WasmGlobalObject::SetF64(double value) {
base::WriteLittleEndianValue<double>(address(), value);
}
-void WasmGlobalObject::SetAnyRef(Handle<Object> value) {
- // We use this getter anyref and exnref.
- DCHECK(type() == wasm::kWasmAnyRef || type() == wasm::kWasmExnRef);
+void WasmGlobalObject::SetExternRef(Handle<Object> value) {
+ // We use this getter externref and exnref.
+ DCHECK(type().is_reference_to(wasm::kHeapExtern) ||
+ type().is_reference_to(wasm::kHeapExn));
tagged_buffer().set(offset(), *value);
}
-bool WasmGlobalObject::SetNullRef(Handle<Object> value) {
- DCHECK_EQ(type(), wasm::kWasmNullRef);
- if (!value->IsNull()) {
- return false;
- }
- tagged_buffer().set(offset(), *value);
- return true;
-}
-
bool WasmGlobalObject::SetFuncRef(Isolate* isolate, Handle<Object> value) {
DCHECK_EQ(type(), wasm::kWasmFuncRef);
if (!value->IsNull(isolate) &&
@@ -253,8 +243,6 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, tagged_globals_buffer, FixedArray,
kTaggedGlobalsBufferOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, imported_mutable_globals_buffers,
FixedArray, kImportedMutableGlobalsBuffersOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
- kDebugInfoOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, tables, FixedArray, kTablesOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_tables, FixedArray,
kIndirectFunctionTablesOffset)
@@ -391,23 +379,15 @@ OPTIONAL_ACCESSORS(WasmIndirectFunctionTable, managed_native_allocations,
Foreign, kManagedNativeAllocationsOffset)
ACCESSORS(WasmIndirectFunctionTable, refs, FixedArray, kRefsOffset)
-// WasmDebugInfo
-ACCESSORS(WasmDebugInfo, wasm_instance, WasmInstanceObject, kInstanceOffset)
-ACCESSORS(WasmDebugInfo, interpreter_handle, Object, kInterpreterHandleOffset)
-ACCESSORS(WasmDebugInfo, interpreter_reference_stack, Cell,
- kInterpreterReferenceStackOffset)
-OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entries, FixedArray,
- kCWasmEntriesOffset)
-OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
- kCWasmEntryMapOffset)
-
#undef OPTIONAL_ACCESSORS
#undef READ_PRIMITIVE_FIELD
#undef WRITE_PRIMITIVE_FIELD
#undef PRIMITIVE_ACCESSORS
wasm::ValueType WasmTableObject::type() {
- return wasm::ValueType(static_cast<wasm::ValueType::Kind>(raw_type()));
+ // TODO(7748): Support non-nullable tables?
+ return wasm::ValueType::Ref(static_cast<wasm::HeapType>(raw_type()),
+ wasm::kNullable);
}
bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
diff --git a/chromium/v8/src/wasm/wasm-objects.cc b/chromium/v8/src/wasm/wasm-objects.cc
index 28834678893..ae9d64b956d 100644
--- a/chromium/v8/src/wasm/wasm-objects.cc
+++ b/chromium/v8/src/wasm/wasm-objects.cc
@@ -283,7 +283,9 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate,
table_obj->set_entries(*backing_store);
table_obj->set_current_length(initial);
table_obj->set_maximum_length(*max);
- table_obj->set_raw_type(static_cast<int>(type.kind()));
+ // TODO(7748): Make this work with other table types.
+ CHECK(type.is_nullable());
+ table_obj->set_raw_type(static_cast<int>(type.heap_type()));
table_obj->set_dispatch_tables(ReadOnlyRoots(isolate).empty_fixed_array());
if (entries != nullptr) {
@@ -384,14 +386,10 @@ bool WasmTableObject::IsValidElement(Isolate* isolate,
Handle<WasmTableObject> table,
Handle<Object> entry) {
// Anyref and exnref tables take everything.
- if (table->type() == wasm::kWasmAnyRef ||
- table->type() == wasm::kWasmExnRef) {
+ if (table->type().heap_type() == wasm::kHeapExtern ||
+ table->type().heap_type() == wasm::kHeapExn) {
return true;
}
- // Nullref only takes {null}.
- if (table->type() == wasm::kWasmNullRef) {
- return entry->IsNull(isolate);
- }
// FuncRef tables can store {null}, {WasmExportedFunction}, {WasmJSFunction},
// or {WasmCapiFunction} objects.
if (entry->IsNull(isolate)) return true;
@@ -409,8 +407,8 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
Handle<FixedArray> entries(table->entries(), isolate);
// The FixedArray is addressed with int's.
int entry_index = static_cast<int>(index);
- if (table->type() == wasm::kWasmAnyRef ||
- table->type() == wasm::kWasmExnRef) {
+ if (table->type().heap_type() == wasm::kHeapExtern ||
+ table->type().heap_type() == wasm::kHeapExn) {
entries->set(entry_index, *entry);
return;
}
@@ -454,9 +452,9 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
Handle<Object> entry(entries->get(entry_index), isolate);
- // First we handle the easy anyref and exnref table case.
- if (table->type() == wasm::kWasmAnyRef ||
- table->type() == wasm::kWasmExnRef) {
+ // First we handle the easy externref and exnref table case.
+ if (table->type().heap_type() == wasm::kHeapExtern ||
+ table->type().heap_type() == wasm::kHeapExn) {
return entry;
}
@@ -634,7 +632,7 @@ void WasmTableObject::GetFunctionTableEntry(
Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
bool* is_valid, bool* is_null, MaybeHandle<WasmInstanceObject>* instance,
int* function_index, MaybeHandle<WasmJSFunction>* maybe_js_function) {
- DCHECK_EQ(table->type(), wasm::kWasmFuncRef);
+ DCHECK_EQ(table->type().heap_type(), wasm::kHeapFunc);
DCHECK_LT(entry_index, table->current_length());
// We initialize {is_valid} with {true}. We may change it later.
*is_valid = true;
@@ -856,7 +854,7 @@ void WasmMemoryObject::update_instances(Isolate* isolate,
int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmMemoryObject> memory_object,
uint32_t pages) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "GrowMemory");
+ TRACE_EVENT0("v8.wasm", "wasm.GrowMemory");
Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
// Any buffer used as an asmjs memory cannot be detached, and
// therefore this memory cannot be grown.
@@ -951,13 +949,13 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
// Disallow GC until all fields have acceptable types.
DisallowHeapAllocation no_gc;
- global_obj->set_flags(0);
+ global_obj->set_raw_type(0);
global_obj->set_type(type);
global_obj->set_offset(offset);
global_obj->set_is_mutable(is_mutable);
}
- if (type.IsReferenceType()) {
+ if (type.is_reference_type()) {
DCHECK(maybe_untagged_buffer.is_null());
Handle<FixedArray> tagged_buffer;
if (!maybe_tagged_buffer.ToHandle(&tagged_buffer)) {
@@ -1175,16 +1173,6 @@ const WasmModule* WasmInstanceObject::module() {
return module_object().module();
}
-Handle<WasmDebugInfo> WasmInstanceObject::GetOrCreateDebugInfo(
- Handle<WasmInstanceObject> instance) {
- if (instance->has_debug_info()) {
- return handle(instance->debug_info(), instance->GetIsolate());
- }
- Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(instance);
- DCHECK(instance->has_debug_info());
- return new_info;
-}
-
Handle<WasmInstanceObject> WasmInstanceObject::New(
Isolate* isolate, Handle<WasmModuleObject> module_object) {
Handle<JSFunction> instance_cons(
@@ -1483,7 +1471,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
// static
uint8_t* WasmInstanceObject::GetGlobalStorage(
Handle<WasmInstanceObject> instance, const wasm::WasmGlobal& global) {
- DCHECK(!global.type.IsReferenceType());
+ DCHECK(!global.type.is_reference_type());
if (global.mutability && global.imported) {
return reinterpret_cast<byte*>(
instance->imported_mutable_globals()[global.index]);
@@ -1496,7 +1484,7 @@ uint8_t* WasmInstanceObject::GetGlobalStorage(
std::pair<Handle<FixedArray>, uint32_t>
WasmInstanceObject::GetGlobalBufferAndIndex(Handle<WasmInstanceObject> instance,
const wasm::WasmGlobal& global) {
- DCHECK(global.type.IsReferenceType());
+ DCHECK(global.type.is_reference_type());
Isolate* isolate = instance->GetIsolate();
if (global.mutability && global.imported) {
Handle<FixedArray> buffer(
@@ -1522,10 +1510,19 @@ MaybeHandle<String> WasmInstanceObject::GetGlobalNameOrNull(
// static
MaybeHandle<String> WasmInstanceObject::GetMemoryNameOrNull(
Isolate* isolate, Handle<WasmInstanceObject> instance,
- uint32_t global_index) {
+ uint32_t memory_index) {
return WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
isolate, instance, wasm::ImportExportKindCode::kExternalMemory,
- global_index);
+ memory_index);
+}
+
+// static
+MaybeHandle<String> WasmInstanceObject::GetTableNameOrNull(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t table_index) {
+ return WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
+ isolate, instance, wasm::ImportExportKindCode::kExternalTable,
+ table_index);
}
// static
@@ -1533,7 +1530,8 @@ MaybeHandle<String> WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
Isolate* isolate, Handle<WasmInstanceObject> instance,
wasm::ImportExportKindCode kind, uint32_t index) {
DCHECK(kind == wasm::ImportExportKindCode::kExternalGlobal ||
- kind == wasm::ImportExportKindCode::kExternalMemory);
+ kind == wasm::ImportExportKindCode::kExternalMemory ||
+ kind == wasm::ImportExportKindCode::kExternalTable);
wasm::ModuleWireBytes wire_bytes(
instance->module_object().native_module()->wire_bytes());
@@ -1562,7 +1560,7 @@ MaybeHandle<String> WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
wasm::WasmValue WasmInstanceObject::GetGlobalValue(
Handle<WasmInstanceObject> instance, const wasm::WasmGlobal& global) {
Isolate* isolate = instance->GetIsolate();
- if (global.type.IsReferenceType()) {
+ if (global.type.is_reference_type()) {
Handle<FixedArray> global_buffer; // The buffer of the global.
uint32_t global_index = 0; // The index into the buffer.
std::tie(global_buffer, global_index) =
@@ -1727,17 +1725,15 @@ uint32_t WasmExceptionPackage::GetEncodedSize(
DCHECK_EQ(8, ComputeEncodedElementSize(sig->GetParam(i)));
encoded_size += 8;
break;
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kFuncRef:
- case wasm::ValueType::kNullRef:
- case wasm::ValueType::kExnRef:
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
- case wasm::ValueType::kEqRef:
encoded_size += 1;
break;
+ case wasm::ValueType::kRtt:
case wasm::ValueType::kStmt:
case wasm::ValueType::kBottom:
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
UNREACHABLE();
}
}
diff --git a/chromium/v8/src/wasm/wasm-objects.h b/chromium/v8/src/wasm/wasm-objects.h
index 217bd50d154..f8ead0fe3e7 100644
--- a/chromium/v8/src/wasm/wasm-objects.h
+++ b/chromium/v8/src/wasm/wasm-objects.h
@@ -42,7 +42,6 @@ class BreakPoint;
class JSArrayBuffer;
class SeqOneByteString;
class WasmCapiFunction;
-class WasmDebugInfo;
class WasmExceptionTag;
class WasmExportedFunction;
class WasmExternalFunction;
@@ -67,7 +66,7 @@ class Managed;
// - object = target instance, if a Wasm function, tuple if imported
// - sig_id = signature id of function
// - target = entrypoint to Wasm code or import wrapper code
-class IndirectFunctionTableEntry {
+class V8_EXPORT_PRIVATE IndirectFunctionTableEntry {
public:
inline IndirectFunctionTableEntry(Handle<WasmInstanceObject>, int table_index,
int entry_index);
@@ -76,9 +75,8 @@ class IndirectFunctionTableEntry {
int entry_index);
void clear();
- V8_EXPORT_PRIVATE void Set(int sig_id,
- Handle<WasmInstanceObject> target_instance,
- int target_func_index);
+ void Set(int sig_id, Handle<WasmInstanceObject> target_instance,
+ int target_func_index);
void Set(int sig_id, Address call_target, Object ref);
Object object_ref() const;
@@ -324,16 +322,16 @@ class WasmGlobalObject : public JSObject {
DECL_ACCESSORS(untagged_buffer, JSArrayBuffer)
DECL_ACCESSORS(tagged_buffer, FixedArray)
DECL_INT32_ACCESSORS(offset)
- DECL_INT_ACCESSORS(flags)
+ DECL_INT_ACCESSORS(raw_type)
DECL_PRIMITIVE_ACCESSORS(type, wasm::ValueType)
- DECL_BOOLEAN_ACCESSORS(is_mutable)
+ // TODO(7748): Once we improve the encoding of mutability/type, turn this back
+ // into a boolean accessor.
+ DECL_INT_ACCESSORS(is_mutable)
// Dispatched behavior.
DECL_PRINTER(WasmGlobalObject)
DECL_VERIFIER(WasmGlobalObject)
- DEFINE_TORQUE_GENERATED_WASM_GLOBAL_OBJECT_FLAGS()
-
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_WASM_GLOBAL_OBJECT_FIELDS)
@@ -354,8 +352,7 @@ class WasmGlobalObject : public JSObject {
inline void SetI64(int64_t value);
inline void SetF32(float value);
inline void SetF64(double value);
- inline void SetAnyRef(Handle<Object> value);
- inline bool SetNullRef(Handle<Object> value);
+ inline void SetExternRef(Handle<Object> value);
inline bool SetFuncRef(Isolate* isolate, Handle<Object> value);
private:
@@ -368,7 +365,7 @@ class WasmGlobalObject : public JSObject {
};
// Representation of a WebAssembly.Instance JavaScript-level object.
-class WasmInstanceObject : public JSObject {
+class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
public:
DECL_CAST(WasmInstanceObject)
@@ -379,7 +376,6 @@ class WasmInstanceObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(untagged_globals_buffer, JSArrayBuffer)
DECL_OPTIONAL_ACCESSORS(tagged_globals_buffer, FixedArray)
DECL_OPTIONAL_ACCESSORS(imported_mutable_globals_buffers, FixedArray)
- DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
DECL_OPTIONAL_ACCESSORS(tables, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_tables, FixedArray)
DECL_ACCESSORS(imported_function_refs, FixedArray)
@@ -441,7 +437,6 @@ class WasmInstanceObject : public JSObject {
V(kUntaggedGlobalsBufferOffset, kTaggedSize) \
V(kTaggedGlobalsBufferOffset, kTaggedSize) \
V(kImportedMutableGlobalsBuffersOffset, kTaggedSize) \
- V(kDebugInfoOffset, kTaggedSize) \
V(kTablesOffset, kTaggedSize) \
V(kIndirectFunctionTablesOffset, kTaggedSize) \
V(kManagedNativeAllocationsOffset, kTaggedSize) \
@@ -480,7 +475,6 @@ class WasmInstanceObject : public JSObject {
kUntaggedGlobalsBufferOffset,
kTaggedGlobalsBufferOffset,
kImportedMutableGlobalsBuffersOffset,
- kDebugInfoOffset,
kTablesOffset,
kIndirectFunctionTablesOffset,
kManagedNativeAllocationsOffset,
@@ -488,21 +482,15 @@ class WasmInstanceObject : public JSObject {
kWasmExternalFunctionsOffset,
kManagedObjectMapsOffset};
- V8_EXPORT_PRIVATE const wasm::WasmModule* module();
+ const wasm::WasmModule* module();
- V8_EXPORT_PRIVATE static bool EnsureIndirectFunctionTableWithMinimumSize(
+ static bool EnsureIndirectFunctionTableWithMinimumSize(
Handle<WasmInstanceObject> instance, int table_index,
uint32_t minimum_size);
- V8_EXPORT_PRIVATE void SetRawMemory(byte* mem_start, size_t mem_size);
-
- // Get the debug info associated with the given wasm object.
- // If no debug info exists yet, it is created automatically.
- V8_EXPORT_PRIVATE static Handle<WasmDebugInfo> GetOrCreateDebugInfo(
- Handle<WasmInstanceObject>);
+ void SetRawMemory(byte* mem_start, size_t mem_size);
- V8_EXPORT_PRIVATE static Handle<WasmInstanceObject> New(
- Isolate*, Handle<WasmModuleObject>);
+ static Handle<WasmInstanceObject> New(Isolate*, Handle<WasmModuleObject>);
Address GetCallTarget(uint32_t func_index);
@@ -536,10 +524,9 @@ class WasmInstanceObject : public JSObject {
// cache of the given {instance}, or creates a new {WasmExportedFunction} if
// it does not exist yet. The new {WasmExportedFunction} is added to the
// cache of the {instance} immediately.
- V8_EXPORT_PRIVATE static Handle<WasmExternalFunction>
- GetOrCreateWasmExternalFunction(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- int function_index);
+ static Handle<WasmExternalFunction> GetOrCreateWasmExternalFunction(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ int function_index);
static void SetWasmExternalFunction(Isolate* isolate,
Handle<WasmInstanceObject> instance,
@@ -578,6 +565,11 @@ class WasmInstanceObject : public JSObject {
Handle<WasmInstanceObject>,
uint32_t memory_index);
+ // Get the name of a table in the given instance by index.
+ static MaybeHandle<String> GetTableNameOrNull(Isolate*,
+ Handle<WasmInstanceObject>,
+ uint32_t table_index);
+
OBJECT_CONSTRUCTORS(WasmInstanceObject, JSObject);
private:
@@ -619,7 +611,7 @@ class WasmExceptionObject : public JSObject {
};
// A Wasm exception that has been thrown out of Wasm code.
-class WasmExceptionPackage : public JSReceiver {
+class V8_EXPORT_PRIVATE WasmExceptionPackage : public JSReceiver {
public:
static Handle<WasmExceptionPackage> New(
Isolate* isolate, Handle<WasmExceptionTag> exception_tag,
@@ -812,42 +804,6 @@ class WasmJSFunctionData : public Struct {
OBJECT_CONSTRUCTORS(WasmJSFunctionData, Struct);
};
-// Debug info used for wasm debugging in the interpreter. For Liftoff debugging,
-// all information is held off-heap in {wasm::DebugInfo}.
-class WasmDebugInfo : public Struct {
- public:
- NEVER_READ_ONLY_SPACE
- DECL_ACCESSORS(wasm_instance, WasmInstanceObject)
- DECL_ACCESSORS(interpreter_handle, Object) // Foreign or undefined
- DECL_ACCESSORS(interpreter_reference_stack, Cell)
- DECL_OPTIONAL_ACCESSORS(c_wasm_entries, FixedArray)
- DECL_OPTIONAL_ACCESSORS(c_wasm_entry_map, Managed<wasm::SignatureMap>)
-
- DECL_CAST(WasmDebugInfo)
-
- // Dispatched behavior.
- DECL_PRINTER(WasmDebugInfo)
- DECL_VERIFIER(WasmDebugInfo)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_WASM_DEBUG_INFO_FIELDS)
-
- static Handle<WasmDebugInfo> New(Handle<WasmInstanceObject>);
-
- // Setup a WasmDebugInfo with an existing WasmInstance struct.
- // Returns a pointer to the interpreter instantiated inside this
- // WasmDebugInfo.
- // Use for testing only.
- V8_EXPORT_PRIVATE static wasm::WasmInterpreter* SetupForTesting(
- Handle<WasmInstanceObject>);
-
- V8_EXPORT_PRIVATE static Handle<Code> GetCWasmEntry(Handle<WasmDebugInfo>,
- const wasm::FunctionSig*);
-
- OBJECT_CONSTRUCTORS(WasmDebugInfo, Struct);
-};
-
class WasmScript : public AllStatic {
public:
// Set a breakpoint on the given byte position inside the given module.
diff --git a/chromium/v8/src/wasm/wasm-objects.tq b/chromium/v8/src/wasm/wasm-objects.tq
index e611ced16ef..cbaa35b47d9 100644
--- a/chromium/v8/src/wasm/wasm-objects.tq
+++ b/chromium/v8/src/wasm/wasm-objects.tq
@@ -41,14 +41,6 @@ extern class WasmIndirectFunctionTable extends Struct {
refs: FixedArray;
}
-extern class WasmDebugInfo extends Struct {
- instance: WasmInstanceObject;
- interpreter_handle: Foreign|Undefined;
- interpreter_reference_stack: Cell;
- c_wasm_entries: FixedArray|Undefined;
- c_wasm_entry_map: Foreign|Undefined; // Managed<wasm::SignatureMap>
-}
-
@generateCppClass
extern class WasmExceptionTag extends Struct {
// Note that this index is only useful for debugging purposes and it is not
@@ -78,16 +70,13 @@ extern class WasmMemoryObject extends JSObject {
}
type WasmValueType extends uint8 constexpr 'wasm::ValueType::Kind';
-bitfield struct WasmGlobalObjectFlags extends uint31 {
- Type: WasmValueType: 8 bit; // "type" is a reserved word.
- is_mutable: bool: 1 bit;
-}
extern class WasmGlobalObject extends JSObject {
untagged_buffer: JSArrayBuffer|Undefined;
tagged_buffer: FixedArray|Undefined;
offset: Smi;
- flags: SmiTagged<WasmGlobalObjectFlags>;
+ raw_type: Smi;
+ is_mutable: Smi;
}
extern class WasmExceptionObject extends JSObject {
diff --git a/chromium/v8/src/wasm/wasm-opcodes-inl.h b/chromium/v8/src/wasm/wasm-opcodes-inl.h
new file mode 100644
index 00000000000..2d9268a9bc7
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-opcodes-inl.h
@@ -0,0 +1,631 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_OPCODES_INL_H_
+#define V8_WASM_WASM_OPCODES_INL_H_
+
+#include <array>
+
+#include "src/base/template-utils.h"
+#include "src/codegen/signature.h"
+#include "src/execution/messages.h"
+#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#define CASE_OP(name, str) \
+ case kExpr##name: \
+ return str;
+#define CASE_I32_OP(name, str) CASE_OP(I32##name, "i32." str)
+#define CASE_I64_OP(name, str) CASE_OP(I64##name, "i64." str)
+#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
+#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
+#define CASE_REF_OP(name, str) CASE_OP(Ref##name, "ref." str)
+#define CASE_F64x2_OP(name, str) CASE_OP(F64x2##name, "f64x2." str)
+#define CASE_F32x4_OP(name, str) CASE_OP(F32x4##name, "f32x4." str)
+#define CASE_I64x2_OP(name, str) CASE_OP(I64x2##name, "i64x2." str)
+#define CASE_I32x4_OP(name, str) CASE_OP(I32x4##name, "i32x4." str)
+#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
+#define CASE_I8x16_OP(name, str) CASE_OP(I8x16##name, "i8x16." str)
+#define CASE_S128_OP(name, str) CASE_OP(S128##name, "s128." str)
+#define CASE_S64x2_OP(name, str) CASE_OP(S64x2##name, "s64x2." str)
+#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
+#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
+#define CASE_S8x16_OP(name, str) CASE_OP(S8x16##name, "s8x16." str)
+#define CASE_V64x2_OP(name, str) CASE_OP(V64x2##name, "v64x2." str)
+#define CASE_V32x4_OP(name, str) CASE_OP(V32x4##name, "v32x4." str)
+#define CASE_V16x8_OP(name, str) CASE_OP(V16x8##name, "v16x8." str)
+#define CASE_V8x16_OP(name, str) CASE_OP(V8x16##name, "v8x16." str)
+#define CASE_INT_OP(name, str) CASE_I32_OP(name, str) CASE_I64_OP(name, str)
+#define CASE_FLOAT_OP(name, str) CASE_F32_OP(name, str) CASE_F64_OP(name, str)
+#define CASE_ALL_OP(name, str) CASE_FLOAT_OP(name, str) CASE_INT_OP(name, str)
+#define CASE_SIMD_OP(name, str) \
+ CASE_F64x2_OP(name, str) CASE_I64x2_OP(name, str) CASE_F32x4_OP(name, str) \
+ CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) \
+ CASE_I8x16_OP(name, str)
+#define CASE_SIMDF_OP(name, str) \
+ CASE_F32x4_OP(name, str) CASE_F64x2_OP(name, str)
+#define CASE_SIMDI_OP(name, str) \
+ CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) CASE_I8x16_OP(name, str)
+#define CASE_SIMDV_OP(name, str) \
+ CASE_V32x4_OP(name, str) CASE_V16x8_OP(name, str) CASE_V8x16_OP(name, str)
+#define CASE_SIGN_OP(TYPE, name, str) \
+ CASE_##TYPE##_OP(name##S, str "_s") CASE_##TYPE##_OP(name##U, str "_u")
+#define CASE_UNSIGNED_OP(TYPE, name, str) CASE_##TYPE##_OP(name##U, str "_u")
+#define CASE_ALL_SIGN_OP(name, str) \
+ CASE_FLOAT_OP(name, str) CASE_SIGN_OP(INT, name, str)
+#define CASE_CONVERT_OP(name, RES, SRC, src_suffix, str) \
+ CASE_##RES##_OP(U##name##SRC, str "_" src_suffix "_u") \
+ CASE_##RES##_OP(S##name##SRC, str "_" src_suffix "_s")
+#define CASE_CONVERT_SAT_OP(name, RES, SRC, src_suffix, str) \
+ CASE_##RES##_OP(U##name##Sat##SRC, str "_sat_" src_suffix "_u") \
+ CASE_##RES##_OP(S##name##Sat##SRC, str "_sat_" src_suffix "_s")
+#define CASE_L32_OP(name, str) \
+ CASE_SIGN_OP(I32, name##8, str "8") \
+ CASE_SIGN_OP(I32, name##16, str "16") \
+ CASE_I32_OP(name, str "32")
+#define CASE_U32_OP(name, str) \
+ CASE_I32_OP(name, str "32") \
+ CASE_UNSIGNED_OP(I32, name##8, str "8") \
+ CASE_UNSIGNED_OP(I32, name##16, str "16")
+#define CASE_UNSIGNED_ALL_OP(name, str) \
+ CASE_U32_OP(name, str) \
+ CASE_I64_OP(name, str "64") \
+ CASE_UNSIGNED_OP(I64, name##8, str "8") \
+ CASE_UNSIGNED_OP(I64, name##16, str "16") \
+ CASE_UNSIGNED_OP(I64, name##32, str "32")
+
+// static
+constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
+ switch (opcode) {
+ // clang-format off
+
+ // Standard opcodes
+ CASE_INT_OP(Eqz, "eqz")
+ CASE_ALL_OP(Eq, "eq")
+ CASE_ALL_OP(Ne, "ne")
+ CASE_ALL_OP(Add, "add")
+ CASE_ALL_OP(Sub, "sub")
+ CASE_ALL_OP(Mul, "mul")
+ CASE_ALL_SIGN_OP(Lt, "lt")
+ CASE_ALL_SIGN_OP(Gt, "gt")
+ CASE_ALL_SIGN_OP(Le, "le")
+ CASE_ALL_SIGN_OP(Ge, "ge")
+ CASE_INT_OP(Clz, "clz")
+ CASE_INT_OP(Ctz, "ctz")
+ CASE_INT_OP(Popcnt, "popcnt")
+ CASE_ALL_SIGN_OP(Div, "div")
+ CASE_SIGN_OP(INT, Rem, "rem")
+ CASE_INT_OP(And, "and")
+ CASE_INT_OP(Ior, "or")
+ CASE_INT_OP(Xor, "xor")
+ CASE_INT_OP(Shl, "shl")
+ CASE_SIGN_OP(INT, Shr, "shr")
+ CASE_INT_OP(Rol, "rol")
+ CASE_INT_OP(Ror, "ror")
+ CASE_FLOAT_OP(Abs, "abs")
+ CASE_FLOAT_OP(Neg, "neg")
+ CASE_FLOAT_OP(Ceil, "ceil")
+ CASE_FLOAT_OP(Floor, "floor")
+ CASE_FLOAT_OP(Trunc, "trunc")
+ CASE_FLOAT_OP(NearestInt, "nearest")
+ CASE_FLOAT_OP(Sqrt, "sqrt")
+ CASE_FLOAT_OP(Min, "min")
+ CASE_FLOAT_OP(Max, "max")
+ CASE_FLOAT_OP(CopySign, "copysign")
+ CASE_REF_OP(Null, "null")
+ CASE_REF_OP(IsNull, "is_null")
+ CASE_REF_OP(Func, "func")
+ CASE_REF_OP(AsNonNull, "as_non_null")
+ CASE_I32_OP(ConvertI64, "wrap_i64")
+ CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
+ CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
+ CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
+ CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
+ CASE_CONVERT_OP(Convert, F32, I64, "i64", "convert")
+ CASE_F32_OP(ConvertF64, "demote_f64")
+ CASE_CONVERT_OP(Convert, F64, I32, "i32", "convert")
+ CASE_CONVERT_OP(Convert, F64, I64, "i64", "convert")
+ CASE_F64_OP(ConvertF32, "promote_f32")
+ CASE_I32_OP(ReinterpretF32, "reinterpret_f32")
+ CASE_I64_OP(ReinterpretF64, "reinterpret_f64")
+ CASE_F32_OP(ReinterpretI32, "reinterpret_i32")
+ CASE_F64_OP(ReinterpretI64, "reinterpret_i64")
+ CASE_INT_OP(SExtendI8, "extend8_s")
+ CASE_INT_OP(SExtendI16, "extend16_s")
+ CASE_I64_OP(SExtendI32, "extend32_s")
+ CASE_OP(Unreachable, "unreachable")
+ CASE_OP(Nop, "nop")
+ CASE_OP(Block, "block")
+ CASE_OP(Loop, "loop")
+ CASE_OP(If, "if")
+ CASE_OP(Else, "else")
+ CASE_OP(End, "end")
+ CASE_OP(Br, "br")
+ CASE_OP(BrIf, "br_if")
+ CASE_OP(BrTable, "br_table")
+ CASE_OP(Return, "return")
+ CASE_OP(CallFunction, "call")
+ CASE_OP(CallIndirect, "call_indirect")
+ CASE_OP(ReturnCall, "return_call")
+ CASE_OP(ReturnCallIndirect, "return_call_indirect")
+ CASE_OP(BrOnNull, "br_on_null")
+ CASE_OP(Drop, "drop")
+ CASE_OP(Select, "select")
+ CASE_OP(SelectWithType, "select")
+ CASE_OP(LocalGet, "local.get")
+ CASE_OP(LocalSet, "local.set")
+ CASE_OP(LocalTee, "local.tee")
+ CASE_OP(GlobalGet, "global.get")
+ CASE_OP(GlobalSet, "global.set")
+ CASE_OP(TableGet, "table.get")
+ CASE_OP(TableSet, "table.set")
+ CASE_ALL_OP(Const, "const")
+ CASE_OP(MemorySize, "memory.size")
+ CASE_OP(MemoryGrow, "memory.grow")
+ CASE_ALL_OP(LoadMem, "load")
+ CASE_SIGN_OP(INT, LoadMem8, "load8")
+ CASE_SIGN_OP(INT, LoadMem16, "load16")
+ CASE_SIGN_OP(I64, LoadMem32, "load32")
+ CASE_S128_OP(LoadMem, "load128")
+ CASE_ALL_OP(StoreMem, "store")
+ CASE_INT_OP(StoreMem8, "store8")
+ CASE_INT_OP(StoreMem16, "store16")
+ CASE_I64_OP(StoreMem32, "store32")
+ CASE_S128_OP(StoreMem, "store128")
+
+ // Exception handling opcodes.
+ CASE_OP(Try, "try")
+ CASE_OP(Catch, "catch")
+ CASE_OP(Throw, "throw")
+ CASE_OP(Rethrow, "rethrow")
+ CASE_OP(BrOnExn, "br_on_exn")
+
+ // asm.js-only opcodes.
+ CASE_F64_OP(Acos, "acos")
+ CASE_F64_OP(Asin, "asin")
+ CASE_F64_OP(Atan, "atan")
+ CASE_F64_OP(Cos, "cos")
+ CASE_F64_OP(Sin, "sin")
+ CASE_F64_OP(Tan, "tan")
+ CASE_F64_OP(Exp, "exp")
+ CASE_F64_OP(Log, "log")
+ CASE_F64_OP(Atan2, "atan2")
+ CASE_F64_OP(Pow, "pow")
+ CASE_F64_OP(Mod, "mod")
+ CASE_F32_OP(AsmjsLoadMem, "asmjs_load")
+ CASE_F64_OP(AsmjsLoadMem, "asmjs_load")
+ CASE_L32_OP(AsmjsLoadMem, "asmjs_load")
+ CASE_I32_OP(AsmjsStoreMem, "asmjs_store")
+ CASE_F32_OP(AsmjsStoreMem, "asmjs_store")
+ CASE_F64_OP(AsmjsStoreMem, "asmjs_store")
+ CASE_I32_OP(AsmjsStoreMem8, "asmjs_store8")
+ CASE_I32_OP(AsmjsStoreMem16, "asmjs_store16")
+ CASE_SIGN_OP(I32, AsmjsDiv, "asmjs_div")
+ CASE_SIGN_OP(I32, AsmjsRem, "asmjs_rem")
+ CASE_I32_OP(AsmjsSConvertF32, "asmjs_convert_f32_s")
+ CASE_I32_OP(AsmjsUConvertF32, "asmjs_convert_f32_u")
+ CASE_I32_OP(AsmjsSConvertF64, "asmjs_convert_f64_s")
+ CASE_I32_OP(AsmjsUConvertF64, "asmjs_convert_f64_u")
+
+ // Numeric Opcodes.
+ CASE_CONVERT_SAT_OP(Convert, I32, F32, "f32", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I32, F64, "f64", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I64, F32, "f32", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I64, F64, "f64", "trunc")
+ CASE_OP(MemoryInit, "memory.init")
+ CASE_OP(DataDrop, "data.drop")
+ CASE_OP(MemoryCopy, "memory.copy")
+ CASE_OP(MemoryFill, "memory.fill")
+ CASE_OP(TableInit, "table.init")
+ CASE_OP(ElemDrop, "elem.drop")
+ CASE_OP(TableCopy, "table.copy")
+ CASE_OP(TableGrow, "table.grow")
+ CASE_OP(TableSize, "table.size")
+ CASE_OP(TableFill, "table.fill")
+
+ // SIMD opcodes.
+ CASE_SIMD_OP(Splat, "splat")
+ CASE_SIMD_OP(Neg, "neg")
+ CASE_SIMDF_OP(Sqrt, "sqrt")
+ CASE_SIMD_OP(Eq, "eq")
+ CASE_SIMD_OP(Ne, "ne")
+ CASE_SIMD_OP(Add, "add")
+ CASE_SIMD_OP(Sub, "sub")
+ CASE_SIMD_OP(Mul, "mul")
+ CASE_SIMDF_OP(Div, "div")
+ CASE_SIMDF_OP(Lt, "lt")
+ CASE_SIMDF_OP(Le, "le")
+ CASE_SIMDF_OP(Gt, "gt")
+ CASE_SIMDF_OP(Ge, "ge")
+ CASE_SIMDF_OP(Abs, "abs")
+ CASE_F32x4_OP(AddHoriz, "add_horizontal")
+ CASE_F32x4_OP(RecipApprox, "recip_approx")
+ CASE_F32x4_OP(RecipSqrtApprox, "recip_sqrt_approx")
+ CASE_SIMDF_OP(Min, "min")
+ CASE_SIMDF_OP(Max, "max")
+ CASE_CONVERT_OP(Convert, F32x4, I32x4, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I32x4, F32x4, "f32", "convert")
+ CASE_CONVERT_OP(Convert, I32x4, I16x8Low, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I32x4, I16x8High, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I16x8, I32x4, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I16x8, I8x16Low, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I16x8, I8x16High, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I8x16, I16x8, "i32", "convert")
+ CASE_SIMDF_OP(ExtractLane, "extract_lane")
+ CASE_SIMDF_OP(ReplaceLane, "replace_lane")
+ CASE_I64x2_OP(ExtractLane, "extract_lane")
+ CASE_I64x2_OP(ReplaceLane, "replace_lane")
+ CASE_I32x4_OP(ExtractLane, "extract_lane")
+ CASE_SIGN_OP(I16x8, ExtractLane, "extract_lane")
+ CASE_SIGN_OP(I8x16, ExtractLane, "extract_lane")
+ CASE_SIMDI_OP(ReplaceLane, "replace_lane")
+ CASE_SIGN_OP(SIMDI, Min, "min")
+ CASE_SIGN_OP(I64x2, Min, "min")
+ CASE_SIGN_OP(SIMDI, Max, "max")
+ CASE_SIGN_OP(I64x2, Max, "max")
+ CASE_SIGN_OP(SIMDI, Lt, "lt")
+ CASE_SIGN_OP(I64x2, Lt, "lt")
+ CASE_SIGN_OP(SIMDI, Le, "le")
+ CASE_SIGN_OP(I64x2, Le, "le")
+ CASE_SIGN_OP(SIMDI, Gt, "gt")
+ CASE_SIGN_OP(I64x2, Gt, "gt")
+ CASE_SIGN_OP(SIMDI, Ge, "ge")
+ CASE_SIGN_OP(I64x2, Ge, "ge")
+ CASE_SIGN_OP(SIMDI, Shr, "shr")
+ CASE_SIGN_OP(I64x2, Shr, "shr")
+ CASE_SIMDI_OP(Shl, "shl")
+ CASE_I64x2_OP(Shl, "shl")
+ CASE_I32x4_OP(AddHoriz, "add_horizontal")
+ CASE_I16x8_OP(AddHoriz, "add_horizontal")
+ CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
+ CASE_SIGN_OP(I8x16, AddSaturate, "add_saturate")
+ CASE_SIGN_OP(I16x8, SubSaturate, "sub_saturate")
+ CASE_SIGN_OP(I8x16, SubSaturate, "sub_saturate")
+ CASE_S128_OP(And, "and")
+ CASE_S128_OP(Or, "or")
+ CASE_S128_OP(Xor, "xor")
+ CASE_S128_OP(Not, "not")
+ CASE_S128_OP(Select, "select")
+ CASE_S128_OP(AndNot, "andnot")
+ CASE_S8x16_OP(Swizzle, "swizzle")
+ CASE_S8x16_OP(Shuffle, "shuffle")
+ CASE_SIMDV_OP(AnyTrue, "any_true")
+ CASE_SIMDV_OP(AllTrue, "all_true")
+ CASE_V64x2_OP(AnyTrue, "any_true")
+ CASE_V64x2_OP(AllTrue, "all_true")
+ CASE_SIMDF_OP(Qfma, "qfma")
+ CASE_SIMDF_OP(Qfms, "qfms")
+
+ CASE_S8x16_OP(LoadSplat, "load_splat")
+ CASE_S16x8_OP(LoadSplat, "load_splat")
+ CASE_S32x4_OP(LoadSplat, "load_splat")
+ CASE_S64x2_OP(LoadSplat, "load_splat")
+ CASE_I16x8_OP(Load8x8S, "load8x8_s")
+ CASE_I16x8_OP(Load8x8U, "load8x8_u")
+ CASE_I32x4_OP(Load16x4S, "load16x4_s")
+ CASE_I32x4_OP(Load16x4U, "load16x4_u")
+ CASE_I64x2_OP(Load32x2S, "load32x2_s")
+ CASE_I64x2_OP(Load32x2U, "load32x2_u")
+
+ CASE_I8x16_OP(RoundingAverageU, "avgr_u")
+ CASE_I16x8_OP(RoundingAverageU, "avgr_u")
+
+ CASE_I8x16_OP(Abs, "abs")
+ CASE_I16x8_OP(Abs, "abs")
+ CASE_I32x4_OP(Abs, "abs")
+
+ CASE_I8x16_OP(BitMask, "bitmask")
+ CASE_I16x8_OP(BitMask, "bitmask")
+ CASE_I32x4_OP(BitMask, "bitmask")
+
+ CASE_F32x4_OP(Pmin, "pmin")
+ CASE_F32x4_OP(Pmax, "pmax")
+ CASE_F64x2_OP(Pmin, "pmin")
+ CASE_F64x2_OP(Pmax, "pmax")
+
+ CASE_F32x4_OP(Ceil, "ceil")
+ CASE_F32x4_OP(Floor, "floor")
+ CASE_F32x4_OP(Trunc, "trunc")
+ CASE_F32x4_OP(NearestInt, "nearest")
+ CASE_F64x2_OP(Ceil, "ceil")
+ CASE_F64x2_OP(Floor, "floor")
+ CASE_F64x2_OP(Trunc, "trunc")
+ CASE_F64x2_OP(NearestInt, "nearest")
+
+ CASE_I32x4_OP(DotI16x8S, "dot_i16x8_s")
+
+ // Atomic operations.
+ CASE_OP(AtomicNotify, "atomic.notify")
+ CASE_INT_OP(AtomicWait, "atomic.wait")
+ CASE_OP(AtomicFence, "atomic.fence")
+ CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic.load")
+ CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic.store")
+ CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic.add")
+ CASE_UNSIGNED_ALL_OP(AtomicSub, "atomic.sub")
+ CASE_UNSIGNED_ALL_OP(AtomicAnd, "atomic.and")
+ CASE_UNSIGNED_ALL_OP(AtomicOr, "atomic.or")
+ CASE_UNSIGNED_ALL_OP(AtomicXor, "atomic.xor")
+ CASE_UNSIGNED_ALL_OP(AtomicExchange, "atomic.xchng")
+ CASE_UNSIGNED_ALL_OP(AtomicCompareExchange, "atomic.cmpxchng")
+
+ // GC operations.
+ CASE_OP(StructNew, "struct.new")
+ CASE_OP(StructNewSub, "struct.new_sub")
+ CASE_OP(StructNewDefault, "struct.new_default")
+ CASE_OP(StructGet, "struct.get")
+ CASE_OP(StructGetS, "struct.get_s")
+ CASE_OP(StructGetU, "struct.get_u")
+ CASE_OP(StructSet, "struct.set")
+ CASE_OP(ArrayNew, "array.new")
+ CASE_OP(ArrayNewSub, "array.new_sub")
+ CASE_OP(ArrayNewDefault, "array.new_default")
+ CASE_OP(ArrayGet, "array.get")
+ CASE_OP(ArrayGetS, "array.get_s")
+ CASE_OP(ArrayGetU, "array.get_u")
+ CASE_OP(ArrayLen, "array.len")
+ CASE_OP(ArraySet, "array.set")
+ CASE_OP(I31New, "i31.new")
+ CASE_OP(I31GetS, "i31.get_s")
+ CASE_OP(I31GetU, "i31.get_u")
+ CASE_OP(RttCanon, "rtt.canon")
+ CASE_OP(RttSub, "rtt.sub")
+ CASE_OP(RefTest, "ref.test")
+ CASE_OP(RefCast, "ref.cast")
+ CASE_OP(BrOnCast, "br_on_cast")
+ CASE_OP(RefEq, "ref.eq")
+ CASE_OP(Let, "let")
+
+
+ case kNumericPrefix:
+ case kSimdPrefix:
+ case kAtomicPrefix:
+ case kGCPrefix:
+ return "unknown";
+ // clang-format on
+ }
+ // Even though the switch above handles all well-defined enum values,
+ // random modules (e.g. fuzzer generated) can call this function with
+ // random (invalid) opcodes. Handle those here:
+ return "invalid opcode";
+}
+
+#undef CASE_OP
+#undef CASE_I32_OP
+#undef CASE_I64_OP
+#undef CASE_F32_OP
+#undef CASE_F64_OP
+#undef CASE_REF_OP
+#undef CASE_F64x2_OP
+#undef CASE_F32x4_OP
+#undef CASE_I64x2_OP
+#undef CASE_I32x4_OP
+#undef CASE_I16x8_OP
+#undef CASE_I8x16_OP
+#undef CASE_S128_OP
+#undef CASE_S64x2_OP
+#undef CASE_S32x4_OP
+#undef CASE_S16x8_OP
+#undef CASE_S8x16_OP
+#undef CASE_INT_OP
+#undef CASE_FLOAT_OP
+#undef CASE_ALL_OP
+#undef CASE_SIMD_OP
+#undef CASE_SIMDI_OP
+#undef CASE_SIGN_OP
+#undef CASE_UNSIGNED_OP
+#undef CASE_UNSIGNED_ALL_OP
+#undef CASE_ALL_SIGN_OP
+#undef CASE_CONVERT_OP
+#undef CASE_CONVERT_SAT_OP
+#undef CASE_L32_OP
+#undef CASE_U32_OP
+
+// static
+constexpr bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+#define CHECK_PREFIX(name, opcode) case k##name##Prefix:
+ FOREACH_PREFIX(CHECK_PREFIX)
+#undef CHECK_PREFIX
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+constexpr bool WasmOpcodes::IsControlOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
+ FOREACH_CONTROL_OPCODE(CHECK_OPCODE)
+#undef CHECK_OPCODE
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+constexpr bool WasmOpcodes::IsUnconditionalJump(WasmOpcode opcode) {
+ switch (opcode) {
+ case kExprUnreachable:
+ case kExprBr:
+ case kExprBrTable:
+ case kExprReturn:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+constexpr bool WasmOpcodes::IsBreakable(WasmOpcode opcode) {
+ switch (opcode) {
+ case kExprBlock:
+ case kExprTry:
+ case kExprCatch:
+ case kExprLoop:
+ case kExprElse:
+ return false;
+ default:
+ return true;
+ }
+}
+
+// static
+constexpr bool WasmOpcodes::IsExternRefOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+ case kExprRefNull:
+ case kExprRefIsNull:
+ case kExprRefFunc:
+ case kExprRefAsNonNull:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+constexpr bool WasmOpcodes::IsThrowingOpcode(WasmOpcode opcode) {
+ // TODO(8729): Trapping opcodes are not yet considered to be throwing.
+ switch (opcode) {
+ case kExprThrow:
+ case kExprRethrow:
+ case kExprCallFunction:
+ case kExprCallIndirect:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+constexpr bool WasmOpcodes::IsSimdPostMvpOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
+ FOREACH_SIMD_POST_MVP_OPCODE(CHECK_OPCODE)
+#undef CHECK_OPCODE
+ return true;
+ default:
+ return false;
+ }
+}
+
+namespace impl {
+
+#define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
+enum WasmOpcodeSig : byte {
+ kSigEnum_None,
+ FOREACH_SIGNATURE(DECLARE_SIG_ENUM)
+};
+#undef DECLARE_SIG_ENUM
+#define DECLARE_SIG(name, ...) \
+ constexpr ValueType kTypes_##name[] = {__VA_ARGS__}; \
+ constexpr int kReturnsCount_##name = kTypes_##name[0] == kWasmStmt ? 0 : 1; \
+ constexpr FunctionSig kSig_##name( \
+ kReturnsCount_##name, static_cast<int>(arraysize(kTypes_##name)) - 1, \
+ kTypes_##name + (1 - kReturnsCount_##name));
+FOREACH_SIGNATURE(DECLARE_SIG)
+#undef DECLARE_SIG
+
+#define DECLARE_SIG_ENTRY(name, ...) &kSig_##name,
+constexpr const FunctionSig* kCachedSigs[] = {
+ nullptr, FOREACH_SIGNATURE(DECLARE_SIG_ENTRY)};
+#undef DECLARE_SIG_ENTRY
+
+constexpr WasmOpcodeSig GetShortOpcodeSigIndex(byte opcode) {
+#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
+ return FOREACH_SIMPLE_OPCODE(CASE) FOREACH_SIMPLE_PROTOTYPE_OPCODE(CASE)
+ kSigEnum_None;
+#undef CASE
+}
+
+constexpr WasmOpcodeSig GetAsmJsOpcodeSigIndex(byte opcode) {
+#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
+ return FOREACH_ASMJS_COMPAT_OPCODE(CASE) kSigEnum_None;
+#undef CASE
+}
+
+constexpr WasmOpcodeSig GetSimdOpcodeSigIndex(byte opcode) {
+#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
+ return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE)
+ kSigEnum_None;
+#undef CASE
+}
+
+constexpr WasmOpcodeSig GetAtomicOpcodeSigIndex(byte opcode) {
+#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
+ return FOREACH_ATOMIC_OPCODE(CASE) FOREACH_ATOMIC_0_OPERAND_OPCODE(CASE)
+ kSigEnum_None;
+#undef CASE
+}
+
+constexpr WasmOpcodeSig GetNumericOpcodeSigIndex(byte opcode) {
+#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
+ return FOREACH_NUMERIC_OPCODE(CASE) kSigEnum_None;
+#undef CASE
+}
+
+constexpr std::array<WasmOpcodeSig, 256> kShortSigTable =
+ base::make_array<256>(GetShortOpcodeSigIndex);
+constexpr std::array<WasmOpcodeSig, 256> kSimpleAsmjsExprSigTable =
+ base::make_array<256>(GetAsmJsOpcodeSigIndex);
+constexpr std::array<WasmOpcodeSig, 256> kSimdExprSigTable =
+ base::make_array<256>(GetSimdOpcodeSigIndex);
+constexpr std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
+ base::make_array<256>(GetAtomicOpcodeSigIndex);
+constexpr std::array<WasmOpcodeSig, 256> kNumericExprSigTable =
+ base::make_array<256>(GetNumericOpcodeSigIndex);
+
+} // namespace impl
+
+constexpr const FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
+ switch (opcode >> 8) {
+ case 0:
+ return impl::kCachedSigs[impl::kShortSigTable[opcode]];
+ case kSimdPrefix:
+ return impl::kCachedSigs[impl::kSimdExprSigTable[opcode & 0xFF]];
+ case kAtomicPrefix:
+ return impl::kCachedSigs[impl::kAtomicExprSigTable[opcode & 0xFF]];
+ case kNumericPrefix:
+ return impl::kCachedSigs[impl::kNumericExprSigTable[opcode & 0xFF]];
+ default:
+#if V8_HAS_CXX14_CONSTEXPR
+ UNREACHABLE(); // invalid prefix.
+#else
+ return nullptr;
+#endif
+ }
+}
+
+constexpr const FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
+ CONSTEXPR_DCHECK(opcode < impl::kSimpleAsmjsExprSigTable.size());
+ return impl::kCachedSigs[impl::kSimpleAsmjsExprSigTable[opcode]];
+}
+
+constexpr MessageTemplate WasmOpcodes::TrapReasonToMessageId(
+ TrapReason reason) {
+ switch (reason) {
+#define TRAPREASON_TO_MESSAGE(name) \
+ case k##name: \
+ return MessageTemplate::kWasm##name;
+ FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
+#undef TRAPREASON_TO_MESSAGE
+ default:
+ return MessageTemplate::kNone;
+ }
+}
+
+const char* WasmOpcodes::TrapReasonMessage(TrapReason reason) {
+ return MessageFormatter::TemplateString(TrapReasonToMessageId(reason));
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_OPCODES_INL_H_
diff --git a/chromium/v8/src/wasm/wasm-opcodes.cc b/chromium/v8/src/wasm/wasm-opcodes.cc
index 53869e86a58..1bf29e241ee 100644
--- a/chromium/v8/src/wasm/wasm-opcodes.cc
+++ b/chromium/v8/src/wasm/wasm-opcodes.cc
@@ -6,500 +6,14 @@
#include <array>
-#include "src/base/template-utils.h"
#include "src/codegen/signature.h"
-#include "src/execution/messages.h"
-#include "src/runtime/runtime.h"
#include "src/wasm/wasm-features.h"
+#include "src/wasm/wasm-opcodes-inl.h"
namespace v8 {
namespace internal {
namespace wasm {
-#define CASE_OP(name, str) \
- case kExpr##name: \
- return str;
-#define CASE_I32_OP(name, str) CASE_OP(I32##name, "i32." str)
-#define CASE_I64_OP(name, str) CASE_OP(I64##name, "i64." str)
-#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
-#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
-#define CASE_REF_OP(name, str) CASE_OP(Ref##name, "ref." str)
-#define CASE_F64x2_OP(name, str) CASE_OP(F64x2##name, "f64x2." str)
-#define CASE_F32x4_OP(name, str) CASE_OP(F32x4##name, "f32x4." str)
-#define CASE_I64x2_OP(name, str) CASE_OP(I64x2##name, "i64x2." str)
-#define CASE_I32x4_OP(name, str) CASE_OP(I32x4##name, "i32x4." str)
-#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
-#define CASE_I8x16_OP(name, str) CASE_OP(I8x16##name, "i8x16." str)
-#define CASE_S128_OP(name, str) CASE_OP(S128##name, "s128." str)
-#define CASE_S64x2_OP(name, str) CASE_OP(S64x2##name, "s64x2." str)
-#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
-#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
-#define CASE_S8x16_OP(name, str) CASE_OP(S8x16##name, "s8x16." str)
-#define CASE_S1x2_OP(name, str) CASE_OP(S1x2##name, "s1x2." str)
-#define CASE_S1x4_OP(name, str) CASE_OP(S1x4##name, "s1x4." str)
-#define CASE_S1x8_OP(name, str) CASE_OP(S1x8##name, "s1x8." str)
-#define CASE_S1x16_OP(name, str) CASE_OP(S1x16##name, "s1x16." str)
-#define CASE_INT_OP(name, str) CASE_I32_OP(name, str) CASE_I64_OP(name, str)
-#define CASE_FLOAT_OP(name, str) CASE_F32_OP(name, str) CASE_F64_OP(name, str)
-#define CASE_ALL_OP(name, str) CASE_FLOAT_OP(name, str) CASE_INT_OP(name, str)
-#define CASE_SIMD_OP(name, str) \
- CASE_F64x2_OP(name, str) CASE_I64x2_OP(name, str) CASE_F32x4_OP(name, str) \
- CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) \
- CASE_I8x16_OP(name, str)
-#define CASE_SIMDF_OP(name, str) \
- CASE_F32x4_OP(name, str) CASE_F64x2_OP(name, str)
-#define CASE_SIMDI_OP(name, str) \
- CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) CASE_I8x16_OP(name, str)
-#define CASE_SIGN_OP(TYPE, name, str) \
- CASE_##TYPE##_OP(name##S, str "_s") CASE_##TYPE##_OP(name##U, str "_u")
-#define CASE_UNSIGNED_OP(TYPE, name, str) CASE_##TYPE##_OP(name##U, str "_u")
-#define CASE_ALL_SIGN_OP(name, str) \
- CASE_FLOAT_OP(name, str) CASE_SIGN_OP(INT, name, str)
-#define CASE_CONVERT_OP(name, RES, SRC, src_suffix, str) \
- CASE_##RES##_OP(U##name##SRC, str "_" src_suffix "_u") \
- CASE_##RES##_OP(S##name##SRC, str "_" src_suffix "_s")
-#define CASE_CONVERT_SAT_OP(name, RES, SRC, src_suffix, str) \
- CASE_##RES##_OP(U##name##Sat##SRC, str "_sat_" src_suffix "_u") \
- CASE_##RES##_OP(S##name##Sat##SRC, str "_sat_" src_suffix "_s")
-#define CASE_L32_OP(name, str) \
- CASE_SIGN_OP(I32, name##8, str "8") \
- CASE_SIGN_OP(I32, name##16, str "16") \
- CASE_I32_OP(name, str "32")
-#define CASE_U32_OP(name, str) \
- CASE_I32_OP(name, str "32") \
- CASE_UNSIGNED_OP(I32, name##8, str "8") \
- CASE_UNSIGNED_OP(I32, name##16, str "16")
-#define CASE_UNSIGNED_ALL_OP(name, str) \
- CASE_U32_OP(name, str) \
- CASE_I64_OP(name, str "64") \
- CASE_UNSIGNED_OP(I64, name##8, str "8") \
- CASE_UNSIGNED_OP(I64, name##16, str "16") \
- CASE_UNSIGNED_OP(I64, name##32, str "32")
-
-const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
- switch (opcode) {
- // clang-format off
-
- // Standard opcodes
- CASE_INT_OP(Eqz, "eqz")
- CASE_ALL_OP(Eq, "eq")
- CASE_ALL_OP(Ne, "ne")
- CASE_ALL_OP(Add, "add")
- CASE_ALL_OP(Sub, "sub")
- CASE_ALL_OP(Mul, "mul")
- CASE_ALL_SIGN_OP(Lt, "lt")
- CASE_ALL_SIGN_OP(Gt, "gt")
- CASE_ALL_SIGN_OP(Le, "le")
- CASE_ALL_SIGN_OP(Ge, "ge")
- CASE_INT_OP(Clz, "clz")
- CASE_INT_OP(Ctz, "ctz")
- CASE_INT_OP(Popcnt, "popcnt")
- CASE_ALL_SIGN_OP(Div, "div")
- CASE_SIGN_OP(INT, Rem, "rem")
- CASE_INT_OP(And, "and")
- CASE_INT_OP(Ior, "or")
- CASE_INT_OP(Xor, "xor")
- CASE_INT_OP(Shl, "shl")
- CASE_SIGN_OP(INT, Shr, "shr")
- CASE_INT_OP(Rol, "rol")
- CASE_INT_OP(Ror, "ror")
- CASE_FLOAT_OP(Abs, "abs")
- CASE_FLOAT_OP(Neg, "neg")
- CASE_FLOAT_OP(Ceil, "ceil")
- CASE_FLOAT_OP(Floor, "floor")
- CASE_FLOAT_OP(Trunc, "trunc")
- CASE_FLOAT_OP(NearestInt, "nearest")
- CASE_FLOAT_OP(Sqrt, "sqrt")
- CASE_FLOAT_OP(Min, "min")
- CASE_FLOAT_OP(Max, "max")
- CASE_FLOAT_OP(CopySign, "copysign")
- CASE_REF_OP(Null, "null")
- CASE_REF_OP(IsNull, "is_null")
- CASE_REF_OP(Func, "func")
- CASE_REF_OP(AsNonNull, "as_non_null")
- CASE_I32_OP(ConvertI64, "wrap_i64")
- CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
- CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
- CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
- CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
- CASE_CONVERT_OP(Convert, F32, I64, "i64", "convert")
- CASE_F32_OP(ConvertF64, "demote_f64")
- CASE_CONVERT_OP(Convert, F64, I32, "i32", "convert")
- CASE_CONVERT_OP(Convert, F64, I64, "i64", "convert")
- CASE_F64_OP(ConvertF32, "promote_f32")
- CASE_I32_OP(ReinterpretF32, "reinterpret_f32")
- CASE_I64_OP(ReinterpretF64, "reinterpret_f64")
- CASE_F32_OP(ReinterpretI32, "reinterpret_i32")
- CASE_F64_OP(ReinterpretI64, "reinterpret_i64")
- CASE_INT_OP(SExtendI8, "extend8_s")
- CASE_INT_OP(SExtendI16, "extend16_s")
- CASE_I64_OP(SExtendI32, "extend32_s")
- CASE_OP(Unreachable, "unreachable")
- CASE_OP(Nop, "nop")
- CASE_OP(Block, "block")
- CASE_OP(Loop, "loop")
- CASE_OP(If, "if")
- CASE_OP(Else, "else")
- CASE_OP(End, "end")
- CASE_OP(Br, "br")
- CASE_OP(BrIf, "br_if")
- CASE_OP(BrTable, "br_table")
- CASE_OP(Return, "return")
- CASE_OP(CallFunction, "call")
- CASE_OP(CallIndirect, "call_indirect")
- CASE_OP(ReturnCall, "return_call")
- CASE_OP(ReturnCallIndirect, "return_call_indirect")
- CASE_OP(BrOnNull, "br_on_null")
- CASE_OP(Drop, "drop")
- CASE_OP(Select, "select")
- CASE_OP(SelectWithType, "select")
- CASE_OP(LocalGet, "local.get")
- CASE_OP(LocalSet, "local.set")
- CASE_OP(LocalTee, "local.tee")
- CASE_OP(GlobalGet, "global.get")
- CASE_OP(GlobalSet, "global.set")
- CASE_OP(TableGet, "table.get")
- CASE_OP(TableSet, "table.set")
- CASE_ALL_OP(Const, "const")
- CASE_OP(MemorySize, "memory.size")
- CASE_OP(MemoryGrow, "memory.grow")
- CASE_ALL_OP(LoadMem, "load")
- CASE_SIGN_OP(INT, LoadMem8, "load8")
- CASE_SIGN_OP(INT, LoadMem16, "load16")
- CASE_SIGN_OP(I64, LoadMem32, "load32")
- CASE_S128_OP(LoadMem, "load128")
- CASE_ALL_OP(StoreMem, "store")
- CASE_INT_OP(StoreMem8, "store8")
- CASE_INT_OP(StoreMem16, "store16")
- CASE_I64_OP(StoreMem32, "store32")
- CASE_S128_OP(StoreMem, "store128")
-
- // Exception handling opcodes.
- CASE_OP(Try, "try")
- CASE_OP(Catch, "catch")
- CASE_OP(Throw, "throw")
- CASE_OP(Rethrow, "rethrow")
- CASE_OP(BrOnExn, "br_on_exn")
-
- // asm.js-only opcodes.
- CASE_F64_OP(Acos, "acos")
- CASE_F64_OP(Asin, "asin")
- CASE_F64_OP(Atan, "atan")
- CASE_F64_OP(Cos, "cos")
- CASE_F64_OP(Sin, "sin")
- CASE_F64_OP(Tan, "tan")
- CASE_F64_OP(Exp, "exp")
- CASE_F64_OP(Log, "log")
- CASE_F64_OP(Atan2, "atan2")
- CASE_F64_OP(Pow, "pow")
- CASE_F64_OP(Mod, "mod")
- CASE_F32_OP(AsmjsLoadMem, "asmjs_load")
- CASE_F64_OP(AsmjsLoadMem, "asmjs_load")
- CASE_L32_OP(AsmjsLoadMem, "asmjs_load")
- CASE_I32_OP(AsmjsStoreMem, "asmjs_store")
- CASE_F32_OP(AsmjsStoreMem, "asmjs_store")
- CASE_F64_OP(AsmjsStoreMem, "asmjs_store")
- CASE_I32_OP(AsmjsStoreMem8, "asmjs_store8")
- CASE_I32_OP(AsmjsStoreMem16, "asmjs_store16")
- CASE_SIGN_OP(I32, AsmjsDiv, "asmjs_div")
- CASE_SIGN_OP(I32, AsmjsRem, "asmjs_rem")
- CASE_I32_OP(AsmjsSConvertF32, "asmjs_convert_f32_s")
- CASE_I32_OP(AsmjsUConvertF32, "asmjs_convert_f32_u")
- CASE_I32_OP(AsmjsSConvertF64, "asmjs_convert_f64_s")
- CASE_I32_OP(AsmjsUConvertF64, "asmjs_convert_f64_u")
-
- // Numeric Opcodes.
- CASE_CONVERT_SAT_OP(Convert, I32, F32, "f32", "trunc")
- CASE_CONVERT_SAT_OP(Convert, I32, F64, "f64", "trunc")
- CASE_CONVERT_SAT_OP(Convert, I64, F32, "f32", "trunc")
- CASE_CONVERT_SAT_OP(Convert, I64, F64, "f64", "trunc")
- CASE_OP(MemoryInit, "memory.init")
- CASE_OP(DataDrop, "data.drop")
- CASE_OP(MemoryCopy, "memory.copy")
- CASE_OP(MemoryFill, "memory.fill")
- CASE_OP(TableInit, "table.init")
- CASE_OP(ElemDrop, "elem.drop")
- CASE_OP(TableCopy, "table.copy")
- CASE_OP(TableGrow, "table.grow")
- CASE_OP(TableSize, "table.size")
- CASE_OP(TableFill, "table.fill")
-
- // SIMD opcodes.
- CASE_SIMD_OP(Splat, "splat")
- CASE_SIMD_OP(Neg, "neg")
- CASE_SIMDF_OP(Sqrt, "sqrt")
- CASE_SIMD_OP(Eq, "eq")
- CASE_SIMD_OP(Ne, "ne")
- CASE_SIMD_OP(Add, "add")
- CASE_SIMD_OP(Sub, "sub")
- CASE_SIMD_OP(Mul, "mul")
- CASE_SIMDF_OP(Div, "div")
- CASE_SIMDF_OP(Lt, "lt")
- CASE_SIMDF_OP(Le, "le")
- CASE_SIMDF_OP(Gt, "gt")
- CASE_SIMDF_OP(Ge, "ge")
- CASE_SIMDF_OP(Abs, "abs")
- CASE_F32x4_OP(AddHoriz, "add_horizontal")
- CASE_F32x4_OP(RecipApprox, "recip_approx")
- CASE_F32x4_OP(RecipSqrtApprox, "recip_sqrt_approx")
- CASE_SIMDF_OP(Min, "min")
- CASE_SIMDF_OP(Max, "max")
- CASE_CONVERT_OP(Convert, F32x4, I32x4, "i32", "convert")
- CASE_CONVERT_OP(Convert, I32x4, F32x4, "f32", "convert")
- CASE_CONVERT_OP(Convert, I32x4, I16x8Low, "i32", "convert")
- CASE_CONVERT_OP(Convert, I32x4, I16x8High, "i32", "convert")
- CASE_CONVERT_OP(Convert, I16x8, I32x4, "i32", "convert")
- CASE_CONVERT_OP(Convert, I16x8, I8x16Low, "i32", "convert")
- CASE_CONVERT_OP(Convert, I16x8, I8x16High, "i32", "convert")
- CASE_CONVERT_OP(Convert, I8x16, I16x8, "i32", "convert")
- CASE_SIMDF_OP(ExtractLane, "extract_lane")
- CASE_SIMDF_OP(ReplaceLane, "replace_lane")
- CASE_I64x2_OP(ExtractLane, "extract_lane")
- CASE_I64x2_OP(ReplaceLane, "replace_lane")
- CASE_I32x4_OP(ExtractLane, "extract_lane")
- CASE_SIGN_OP(I16x8, ExtractLane, "extract_lane")
- CASE_SIGN_OP(I8x16, ExtractLane, "extract_lane")
- CASE_SIMDI_OP(ReplaceLane, "replace_lane")
- CASE_SIGN_OP(SIMDI, Min, "min")
- CASE_SIGN_OP(I64x2, Min, "min")
- CASE_SIGN_OP(SIMDI, Max, "max")
- CASE_SIGN_OP(I64x2, Max, "max")
- CASE_SIGN_OP(SIMDI, Lt, "lt")
- CASE_SIGN_OP(I64x2, Lt, "lt")
- CASE_SIGN_OP(SIMDI, Le, "le")
- CASE_SIGN_OP(I64x2, Le, "le")
- CASE_SIGN_OP(SIMDI, Gt, "gt")
- CASE_SIGN_OP(I64x2, Gt, "gt")
- CASE_SIGN_OP(SIMDI, Ge, "ge")
- CASE_SIGN_OP(I64x2, Ge, "ge")
- CASE_SIGN_OP(SIMDI, Shr, "shr")
- CASE_SIGN_OP(I64x2, Shr, "shr")
- CASE_SIMDI_OP(Shl, "shl")
- CASE_I64x2_OP(Shl, "shl")
- CASE_I32x4_OP(AddHoriz, "add_horizontal")
- CASE_I16x8_OP(AddHoriz, "add_horizontal")
- CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
- CASE_SIGN_OP(I8x16, AddSaturate, "add_saturate")
- CASE_SIGN_OP(I16x8, SubSaturate, "sub_saturate")
- CASE_SIGN_OP(I8x16, SubSaturate, "sub_saturate")
- CASE_S128_OP(And, "and")
- CASE_S128_OP(Or, "or")
- CASE_S128_OP(Xor, "xor")
- CASE_S128_OP(Not, "not")
- CASE_S128_OP(Select, "select")
- CASE_S128_OP(AndNot, "andnot")
- CASE_S8x16_OP(Swizzle, "swizzle")
- CASE_S8x16_OP(Shuffle, "shuffle")
- CASE_S1x2_OP(AnyTrue, "any_true")
- CASE_S1x2_OP(AllTrue, "all_true")
- CASE_S1x4_OP(AnyTrue, "any_true")
- CASE_S1x4_OP(AllTrue, "all_true")
- CASE_S1x8_OP(AnyTrue, "any_true")
- CASE_S1x8_OP(AllTrue, "all_true")
- CASE_S1x16_OP(AnyTrue, "any_true")
- CASE_S1x16_OP(AllTrue, "all_true")
- CASE_SIMDF_OP(Qfma, "qfma")
- CASE_SIMDF_OP(Qfms, "qfms")
-
- CASE_S8x16_OP(LoadSplat, "load_splat")
- CASE_S16x8_OP(LoadSplat, "load_splat")
- CASE_S32x4_OP(LoadSplat, "load_splat")
- CASE_S64x2_OP(LoadSplat, "load_splat")
- CASE_I16x8_OP(Load8x8S, "load8x8_s")
- CASE_I16x8_OP(Load8x8U, "load8x8_u")
- CASE_I32x4_OP(Load16x4S, "load16x4_s")
- CASE_I32x4_OP(Load16x4U, "load16x4_u")
- CASE_I64x2_OP(Load32x2S, "load32x2_s")
- CASE_I64x2_OP(Load32x2U, "load32x2_u")
-
- CASE_I8x16_OP(RoundingAverageU, "avgr_u")
- CASE_I16x8_OP(RoundingAverageU, "avgr_u")
-
- CASE_I8x16_OP(Abs, "abs")
- CASE_I16x8_OP(Abs, "abs")
- CASE_I32x4_OP(Abs, "abs")
-
- CASE_I8x16_OP(BitMask, "bitmask")
- CASE_I16x8_OP(BitMask, "bitmask")
- CASE_I32x4_OP(BitMask, "bitmask")
-
- CASE_F32x4_OP(Pmin, "pmin")
- CASE_F32x4_OP(Pmax, "pmax")
- CASE_F64x2_OP(Pmin, "pmin")
- CASE_F64x2_OP(Pmax, "pmax")
-
- // Atomic operations.
- CASE_OP(AtomicNotify, "atomic.notify")
- CASE_INT_OP(AtomicWait, "atomic.wait")
- CASE_OP(AtomicFence, "atomic.fence")
- CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic.load")
- CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic.store")
- CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic.add")
- CASE_UNSIGNED_ALL_OP(AtomicSub, "atomic.sub")
- CASE_UNSIGNED_ALL_OP(AtomicAnd, "atomic.and")
- CASE_UNSIGNED_ALL_OP(AtomicOr, "atomic.or")
- CASE_UNSIGNED_ALL_OP(AtomicXor, "atomic.xor")
- CASE_UNSIGNED_ALL_OP(AtomicExchange, "atomic.xchng")
- CASE_UNSIGNED_ALL_OP(AtomicCompareExchange, "atomic.cmpxchng")
-
- // GC operations.
- CASE_OP(StructNew, "struct.new")
- CASE_OP(StructNewSub, "struct.new_sub")
- CASE_OP(StructNewDefault, "struct.new_default")
- CASE_OP(StructGet, "struct.get")
- CASE_OP(StructGetS, "struct.get_s")
- CASE_OP(StructGetU, "struct.get_u")
- CASE_OP(StructSet, "struct.set")
- CASE_OP(ArrayNew, "array.new")
- CASE_OP(ArrayNewSub, "array.new_sub")
- CASE_OP(ArrayNewDefault, "array.new_default")
- CASE_OP(ArrayGet, "array.get")
- CASE_OP(ArrayGetS, "array.get_s")
- CASE_OP(ArrayGetU, "array.get_u")
- CASE_OP(ArrayLen, "array.len")
- CASE_OP(ArraySet, "array.set")
- CASE_OP(I31New, "i31.new")
- CASE_OP(I31GetS, "i31.get_s")
- CASE_OP(I31GetU, "i31.get_u")
- CASE_OP(RttGet, "rtt.get")
- CASE_OP(RttSub, "rtt.sub")
- CASE_OP(RefTest, "ref.test")
- CASE_OP(RefCast, "ref.cast")
- CASE_OP(BrOnCast, "br_on_cast")
- CASE_OP(RefEq, "ref.eq")
-
-
- case kNumericPrefix:
- case kSimdPrefix:
- case kAtomicPrefix:
- case kGCPrefix:
- return "unknown";
- // clang-format on
- }
- // Even though the switch above handles all well-defined enum values,
- // random modules (e.g. fuzzer generated) can call this function with
- // random (invalid) opcodes. Handle those here:
- return "invalid opcode";
-}
-
-#undef CASE_OP
-#undef CASE_I32_OP
-#undef CASE_I64_OP
-#undef CASE_F32_OP
-#undef CASE_F64_OP
-#undef CASE_REF_OP
-#undef CASE_F64x2_OP
-#undef CASE_F32x4_OP
-#undef CASE_I64x2_OP
-#undef CASE_I32x4_OP
-#undef CASE_I16x8_OP
-#undef CASE_I8x16_OP
-#undef CASE_S128_OP
-#undef CASE_S64x2_OP
-#undef CASE_S32x4_OP
-#undef CASE_S16x8_OP
-#undef CASE_S8x16_OP
-#undef CASE_S1x2_OP
-#undef CASE_S1x4_OP
-#undef CASE_S1x8_OP
-#undef CASE_S1x16_OP
-#undef CASE_INT_OP
-#undef CASE_FLOAT_OP
-#undef CASE_ALL_OP
-#undef CASE_SIMD_OP
-#undef CASE_SIMDI_OP
-#undef CASE_SIGN_OP
-#undef CASE_UNSIGNED_OP
-#undef CASE_UNSIGNED_ALL_OP
-#undef CASE_ALL_SIGN_OP
-#undef CASE_CONVERT_OP
-#undef CASE_CONVERT_SAT_OP
-#undef CASE_L32_OP
-#undef CASE_U32_OP
-
-bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
- switch (opcode) {
-#define CHECK_PREFIX(name, opcode) case k##name##Prefix:
- FOREACH_PREFIX(CHECK_PREFIX)
-#undef CHECK_PREFIX
- return true;
- default:
- return false;
- }
-}
-
-bool WasmOpcodes::IsControlOpcode(WasmOpcode opcode) {
- switch (opcode) {
-#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
- FOREACH_CONTROL_OPCODE(CHECK_OPCODE)
-#undef CHECK_OPCODE
- return true;
- default:
- return false;
- }
-}
-
-bool WasmOpcodes::IsUnconditionalJump(WasmOpcode opcode) {
- switch (opcode) {
- case kExprUnreachable:
- case kExprBr:
- case kExprBrTable:
- case kExprReturn:
- return true;
- default:
- return false;
- }
-}
-
-bool WasmOpcodes::IsBreakable(WasmOpcode opcode) {
- switch (opcode) {
- case kExprBlock:
- case kExprTry:
- case kExprCatch:
- case kExprLoop:
- case kExprElse:
- return false;
- default:
- return true;
- }
-}
-
-bool WasmOpcodes::IsAnyRefOpcode(WasmOpcode opcode) {
- switch (opcode) {
- case kExprRefNull:
- case kExprRefIsNull:
- case kExprRefFunc:
- case kExprRefAsNonNull:
- return true;
- default:
- return false;
- }
-}
-
-bool WasmOpcodes::IsThrowingOpcode(WasmOpcode opcode) {
- // TODO(8729): Trapping opcodes are not yet considered to be throwing.
- switch (opcode) {
- case kExprThrow:
- case kExprRethrow:
- case kExprCallFunction:
- case kExprCallIndirect:
- return true;
- default:
- return false;
- }
-}
-
-bool WasmOpcodes::IsSimdPostMvpOpcode(WasmOpcode opcode) {
- switch (opcode) {
-#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
- FOREACH_SIMD_POST_MVP_OPCODE(CHECK_OPCODE)
-#undef CHECK_OPCODE
- return true;
- default:
- return false;
- }
-}
-
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
for (auto ret : sig.returns()) {
@@ -528,95 +42,6 @@ bool IsJSCompatibleSignature(const FunctionSig* sig,
return true;
}
-namespace {
-
-#define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
-enum WasmOpcodeSig : byte {
- kSigEnum_None,
- FOREACH_SIGNATURE(DECLARE_SIG_ENUM)
-};
-#undef DECLARE_SIG_ENUM
-#define DECLARE_SIG(name, ...) \
- constexpr ValueType kTypes_##name[] = {__VA_ARGS__}; \
- constexpr int kReturnsCount_##name = kTypes_##name[0] == kWasmStmt ? 0 : 1; \
- constexpr FunctionSig kSig_##name( \
- kReturnsCount_##name, static_cast<int>(arraysize(kTypes_##name)) - 1, \
- kTypes_##name + (1 - kReturnsCount_##name));
-FOREACH_SIGNATURE(DECLARE_SIG)
-#undef DECLARE_SIG
-
-#define DECLARE_SIG_ENTRY(name, ...) &kSig_##name,
-constexpr const FunctionSig* kCachedSigs[] = {
- nullptr, FOREACH_SIGNATURE(DECLARE_SIG_ENTRY)};
-#undef DECLARE_SIG_ENTRY
-
-constexpr WasmOpcodeSig GetShortOpcodeSigIndex(byte opcode) {
-#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
- return FOREACH_SIMPLE_OPCODE(CASE) FOREACH_SIMPLE_PROTOTYPE_OPCODE(CASE)
- kSigEnum_None;
-#undef CASE
-}
-
-constexpr WasmOpcodeSig GetAsmJsOpcodeSigIndex(byte opcode) {
-#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
- return FOREACH_ASMJS_COMPAT_OPCODE(CASE) kSigEnum_None;
-#undef CASE
-}
-
-constexpr WasmOpcodeSig GetSimdOpcodeSigIndex(byte opcode) {
-#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
- return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE)
- kSigEnum_None;
-#undef CASE
-}
-
-constexpr WasmOpcodeSig GetAtomicOpcodeSigIndex(byte opcode) {
-#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
- return FOREACH_ATOMIC_OPCODE(CASE) FOREACH_ATOMIC_0_OPERAND_OPCODE(CASE)
- kSigEnum_None;
-#undef CASE
-}
-
-constexpr WasmOpcodeSig GetNumericOpcodeSigIndex(byte opcode) {
-#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
- return FOREACH_NUMERIC_OPCODE(CASE) kSigEnum_None;
-#undef CASE
-}
-
-constexpr std::array<WasmOpcodeSig, 256> kShortSigTable =
- base::make_array<256>(GetShortOpcodeSigIndex);
-constexpr std::array<WasmOpcodeSig, 256> kSimpleAsmjsExprSigTable =
- base::make_array<256>(GetAsmJsOpcodeSigIndex);
-constexpr std::array<WasmOpcodeSig, 256> kSimdExprSigTable =
- base::make_array<256>(GetSimdOpcodeSigIndex);
-constexpr std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
- base::make_array<256>(GetAtomicOpcodeSigIndex);
-constexpr std::array<WasmOpcodeSig, 256> kNumericExprSigTable =
- base::make_array<256>(GetNumericOpcodeSigIndex);
-
-} // namespace
-
-const FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
- switch (opcode >> 8) {
- case 0:
- return kCachedSigs[kShortSigTable[opcode]];
- case kSimdPrefix:
- return kCachedSigs[kSimdExprSigTable[opcode & 0xFF]];
- case kAtomicPrefix:
- return kCachedSigs[kAtomicExprSigTable[opcode & 0xFF]];
- case kNumericPrefix:
- return kCachedSigs[kNumericExprSigTable[opcode & 0xFF]];
- default:
- UNREACHABLE(); // invalid prefix.
- return nullptr;
- }
-}
-
-const FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
- DCHECK_GT(kSimpleAsmjsExprSigTable.size(), opcode);
- return kCachedSigs[kSimpleAsmjsExprSigTable[opcode]];
-}
-
// Define constexpr arrays.
constexpr uint8_t LoadType::kLoadSizeLog2[];
constexpr ValueType LoadType::kValueType[];
@@ -625,21 +50,6 @@ constexpr uint8_t StoreType::kStoreSizeLog2[];
constexpr ValueType StoreType::kValueType[];
constexpr MachineRepresentation StoreType::kMemRep[];
-MessageTemplate WasmOpcodes::TrapReasonToMessageId(TrapReason reason) {
- switch (reason) {
-#define TRAPREASON_TO_MESSAGE(name) \
- case k##name: \
- return MessageTemplate::kWasm##name;
- FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
-#undef TRAPREASON_TO_MESSAGE
- default:
- return MessageTemplate::kNone;
- }
-}
-
-const char* WasmOpcodes::TrapReasonMessage(TrapReason reason) {
- return MessageFormatter::TemplateString(TrapReasonToMessageId(reason));
-}
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-opcodes.h b/chromium/v8/src/wasm/wasm-opcodes.h
index 8a17b9984e8..4728ee76b0c 100644
--- a/chromium/v8/src/wasm/wasm-opcodes.h
+++ b/chromium/v8/src/wasm/wasm-opcodes.h
@@ -38,7 +38,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(BrIf, 0x0d, _) \
V(BrTable, 0x0e, _) \
V(Return, 0x0f, _) \
- V(BrOnNull, 0xd4, _) /* gc prototype */
+ V(Let, 0x17, _ /* gc prototype */) \
+ V(BrOnNull, 0xd4, _ /* gc prototype */)
// Constants, locals, globals, and calls.
#define FOREACH_MISC_OPCODE(V) \
@@ -61,6 +62,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(F32Const, 0x43, _) \
V(F64Const, 0x44, _) \
V(RefNull, 0xd0, _) \
+ V(RefIsNull, 0xd1, _) \
V(RefFunc, 0xd2, _) \
V(RefAsNonNull, 0xd3, _)
@@ -229,9 +231,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I64SExtendI16, 0xc3, l_l) \
V(I64SExtendI32, 0xc4, l_l)
-#define FOREACH_SIMPLE_PROTOTYPE_OPCODE(V) \
- V(RefIsNull, 0xd1, i_r) \
- V(RefEq, 0xd5, i_rr) // made-up opcode, guessing future spec (GC)
+#define FOREACH_SIMPLE_PROTOTYPE_OPCODE(V) V(RefEq, 0xd5, i_qq)
// For compatibility with Asm.js.
// These opcodes are not spec'ed (or visible) externally; the idea is
@@ -343,8 +343,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(S128Select, 0xfd52, s_sss) \
V(I8x16Abs, 0xfd60, s_s) \
V(I8x16Neg, 0xfd61, s_s) \
- V(S1x16AnyTrue, 0xfd62, i_s) \
- V(S1x16AllTrue, 0xfd63, i_s) \
+ V(V8x16AnyTrue, 0xfd62, i_s) \
+ V(V8x16AllTrue, 0xfd63, i_s) \
V(I8x16SConvertI16x8, 0xfd65, s_ss) \
V(I8x16UConvertI16x8, 0xfd66, s_ss) \
V(I8x16Shl, 0xfd6b, s_si) \
@@ -363,8 +363,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I8x16RoundingAverageU, 0xfd7b, s_ss) \
V(I16x8Abs, 0xfd80, s_s) \
V(I16x8Neg, 0xfd81, s_s) \
- V(S1x8AnyTrue, 0xfd82, i_s) \
- V(S1x8AllTrue, 0xfd83, i_s) \
+ V(V16x8AnyTrue, 0xfd82, i_s) \
+ V(V16x8AllTrue, 0xfd83, i_s) \
V(I16x8SConvertI32x4, 0xfd85, s_ss) \
V(I16x8UConvertI32x4, 0xfd86, s_ss) \
V(I16x8SConvertI8x16Low, 0xfd87, s_s) \
@@ -388,8 +388,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I16x8RoundingAverageU, 0xfd9b, s_ss) \
V(I32x4Abs, 0xfda0, s_s) \
V(I32x4Neg, 0xfda1, s_s) \
- V(S1x4AnyTrue, 0xfda2, i_s) \
- V(S1x4AllTrue, 0xfda3, i_s) \
+ V(V32x4AnyTrue, 0xfda2, i_s) \
+ V(V32x4AllTrue, 0xfda3, i_s) \
V(I32x4SConvertI16x8Low, 0xfda7, s_s) \
V(I32x4SConvertI16x8High, 0xfda8, s_s) \
V(I32x4UConvertI16x8Low, 0xfda9, s_s) \
@@ -439,8 +439,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I8x16BitMask, 0xfd64, i_s) \
V(I16x8BitMask, 0xfd84, i_s) \
V(I32x4BitMask, 0xfda4, i_s) \
- V(S1x2AnyTrue, 0xfdc2, i_s) \
- V(S1x2AllTrue, 0xfdc3, i_s) \
+ V(V64x2AnyTrue, 0xfdc2, i_s) \
+ V(V64x2AllTrue, 0xfdc3, i_s) \
V(I64x2Eq, 0xfdc0, s_ss) \
V(I64x2Ne, 0xfdc4, s_ss) \
V(I64x2LtS, 0xfdc5, s_ss) \
@@ -453,21 +453,30 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I64x2GeU, 0xfdd0, s_ss) \
V(I64x2MinS, 0xfdd6, s_ss) \
V(I64x2MinU, 0xfdd7, s_ss) \
- V(I64x2MaxS, 0xfdd8, s_ss) \
- V(I64x2MaxU, 0xfdd9, s_ss) \
+ V(I64x2MaxS, 0xfde2, s_ss) \
+ V(I64x2MaxU, 0xfdee, s_ss) \
V(F32x4Qfma, 0xfdfc, s_sss) \
V(F32x4Qfms, 0xfdfd, s_sss) \
V(F64x2Qfma, 0xfdfe, s_sss) \
V(F64x2Qfms, 0xfdff, s_sss) \
V(I16x8AddHoriz, 0xfdaf, s_ss) \
V(I32x4AddHoriz, 0xfdb0, s_ss) \
+ V(I32x4DotI16x8S, 0xfdba, s_ss) \
V(F32x4AddHoriz, 0xfdb2, s_ss) \
V(F32x4RecipApprox, 0xfdb3, s_s) \
- V(F32x4RecipSqrtApprox, 0xfdba, s_s) \
- V(F32x4Pmin, 0xfdda, s_ss) \
- V(F32x4Pmax, 0xfddb, s_ss) \
- V(F64x2Pmin, 0xfddc, s_ss) \
- V(F64x2Pmax, 0xfddd, s_ss)
+ V(F32x4RecipSqrtApprox, 0xfdbc, s_s) \
+ V(F32x4Pmin, 0xfdea, s_ss) \
+ V(F32x4Pmax, 0xfdeb, s_ss) \
+ V(F32x4Ceil, 0xfdd8, s_s) \
+ V(F32x4Floor, 0xfdd9, s_s) \
+ V(F32x4Trunc, 0xfdda, s_s) \
+ V(F32x4NearestInt, 0xfddb, s_s) \
+ V(F64x2Pmin, 0xfdf6, s_ss) \
+ V(F64x2Pmax, 0xfdf7, s_ss) \
+ V(F64x2Ceil, 0xfddc, s_s) \
+ V(F64x2Floor, 0xfddd, s_s) \
+ V(F64x2Trunc, 0xfdde, s_s) \
+ V(F64x2NearestInt, 0xfddf, s_s)
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
V(I8x16ExtractLaneS, 0xfd15, _) \
@@ -495,25 +504,28 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V)
-#define FOREACH_NUMERIC_OPCODE(V) \
- V(I32SConvertSatF32, 0xfc00, i_f) \
- V(I32UConvertSatF32, 0xfc01, i_f) \
- V(I32SConvertSatF64, 0xfc02, i_d) \
- V(I32UConvertSatF64, 0xfc03, i_d) \
- V(I64SConvertSatF32, 0xfc04, l_f) \
- V(I64UConvertSatF32, 0xfc05, l_f) \
- V(I64SConvertSatF64, 0xfc06, l_d) \
- V(I64UConvertSatF64, 0xfc07, l_d) \
- V(MemoryInit, 0xfc08, v_iii) \
- V(DataDrop, 0xfc09, v_v) \
- V(MemoryCopy, 0xfc0a, v_iii) \
- V(MemoryFill, 0xfc0b, v_iii) \
- V(TableInit, 0xfc0c, v_iii) \
- V(ElemDrop, 0xfc0d, v_v) \
- V(TableCopy, 0xfc0e, v_iii) \
- V(TableGrow, 0xfc0f, i_ai) \
- V(TableSize, 0xfc10, i_v) \
- /*TableFill is polymorph in the second parameter. It's anyref or funcref.*/ \
+#define FOREACH_NUMERIC_OPCODE(V) \
+ V(I32SConvertSatF32, 0xfc00, i_f) \
+ V(I32UConvertSatF32, 0xfc01, i_f) \
+ V(I32SConvertSatF64, 0xfc02, i_d) \
+ V(I32UConvertSatF64, 0xfc03, i_d) \
+ V(I64SConvertSatF32, 0xfc04, l_f) \
+ V(I64UConvertSatF32, 0xfc05, l_f) \
+ V(I64SConvertSatF64, 0xfc06, l_d) \
+ V(I64UConvertSatF64, 0xfc07, l_d) \
+ V(MemoryInit, 0xfc08, v_iii) \
+ V(DataDrop, 0xfc09, v_v) \
+ V(MemoryCopy, 0xfc0a, v_iii) \
+ V(MemoryFill, 0xfc0b, v_iii) \
+ V(TableInit, 0xfc0c, v_iii) \
+ V(ElemDrop, 0xfc0d, v_v) \
+ V(TableCopy, 0xfc0e, v_iii) \
+ /* TableGrow is polymorphic in the first parameter. */ \
+ /* It's whatever the table type is. */ \
+ V(TableGrow, 0xfc0f, i_ci) \
+ V(TableSize, 0xfc10, i_v) \
+ /* TableFill is polymorphic in the second parameter. */ \
+ /* It's whatever the table type is. */ \
V(TableFill, 0xfc11, v_iii)
#define FOREACH_ATOMIC_OPCODE(V) \
@@ -605,7 +617,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I31New, 0xfb20, _) \
V(I31GetS, 0xfb21, _) \
V(I31GetU, 0xfb22, _) \
- V(RttGet, 0xfb30, _) \
+ V(RttCanon, 0xfb30, _) \
V(RttSub, 0xfb31, _) \
V(RefTest, 0xfb40, _) \
V(RefCast, 0xfb41, _) \
@@ -674,9 +686,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(l_ill, kWasmI64, kWasmI32, kWasmI64, kWasmI64) \
V(i_iil, kWasmI32, kWasmI32, kWasmI32, kWasmI64) \
V(i_ill, kWasmI32, kWasmI32, kWasmI64, kWasmI64) \
- V(i_r, kWasmI32, kWasmAnyRef) \
- V(i_ai, kWasmI32, kWasmFuncRef, kWasmI32) \
- V(i_rr, kWasmI32, kWasmEqRef, kWasmEqRef)
+ V(i_e, kWasmI32, kWasmExternRef) \
+ V(i_ci, kWasmI32, kWasmFuncRef, kWasmI32) \
+ V(i_qq, kWasmI32, kWasmEqRef, kWasmEqRef)
#define FOREACH_SIMD_SIGNATURE(V) \
V(s_s, kWasmS128, kWasmS128) \
@@ -716,21 +728,21 @@ enum TrapReason {
// A collection of opcode-related static methods.
class V8_EXPORT_PRIVATE WasmOpcodes {
public:
- static const char* OpcodeName(WasmOpcode);
- static const FunctionSig* Signature(WasmOpcode);
- static const FunctionSig* AsmjsSignature(WasmOpcode);
- static bool IsPrefixOpcode(WasmOpcode);
- static bool IsControlOpcode(WasmOpcode);
- static bool IsAnyRefOpcode(WasmOpcode);
- static bool IsThrowingOpcode(WasmOpcode);
- static bool IsSimdPostMvpOpcode(WasmOpcode);
+ static constexpr const char* OpcodeName(WasmOpcode);
+ static constexpr const FunctionSig* Signature(WasmOpcode);
+ static constexpr const FunctionSig* AsmjsSignature(WasmOpcode);
+ static constexpr bool IsPrefixOpcode(WasmOpcode);
+ static constexpr bool IsControlOpcode(WasmOpcode);
+ static constexpr bool IsExternRefOpcode(WasmOpcode);
+ static constexpr bool IsThrowingOpcode(WasmOpcode);
+ static constexpr bool IsSimdPostMvpOpcode(WasmOpcode);
// Check whether the given opcode always jumps, i.e. all instructions after
// this one in the current block are dead. Returns false for |end|.
- static bool IsUnconditionalJump(WasmOpcode);
- static bool IsBreakable(WasmOpcode);
+ static constexpr bool IsUnconditionalJump(WasmOpcode);
+ static constexpr bool IsBreakable(WasmOpcode);
- static MessageTemplate TrapReasonToMessageId(TrapReason);
- static const char* TrapReasonMessage(TrapReason);
+ static constexpr MessageTemplate TrapReasonToMessageId(TrapReason);
+ static inline const char* TrapReasonMessage(TrapReason);
};
// Representation of an initializer expression.
@@ -760,13 +772,16 @@ struct WasmInitExpr {
explicit WasmInitExpr(int64_t v) : kind(kI64Const) { val.i64_const = v; }
explicit WasmInitExpr(float v) : kind(kF32Const) { val.f32_const = v; }
explicit WasmInitExpr(double v) : kind(kF64Const) { val.f64_const = v; }
+
+ explicit WasmInitExpr(WasmInitKind kind) : kind(kind) {
+ DCHECK_EQ(kind, kRefNullConst);
+ }
+
WasmInitExpr(WasmInitKind kind, uint32_t index) : kind(kind) {
if (kind == kGlobalIndex) {
val.global_index = index;
} else if (kind == kRefFuncConst) {
val.function_index = index;
- } else if (kind == kRefNullConst) {
- // Nothing to do.
} else {
// For the other types, the other initializers should be used.
UNREACHABLE();
diff --git a/chromium/v8/src/wasm/wasm-subtyping.cc b/chromium/v8/src/wasm/wasm-subtyping.cc
new file mode 100644
index 00000000000..6be554b24c7
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-subtyping.cc
@@ -0,0 +1,167 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-subtyping.h"
+
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+
+bool IsEquivalent(ValueType type1, ValueType type2, const WasmModule* module);
+
+bool IsArrayTypeEquivalent(uint32_t type_index_1, uint32_t type_index_2,
+ const WasmModule* module) {
+ if (module->type_kinds[type_index_1] != kWasmArrayTypeCode ||
+ module->type_kinds[type_index_2] != kWasmArrayTypeCode) {
+ return false;
+ }
+
+ const ArrayType* sub_array = module->types[type_index_1].array_type;
+ const ArrayType* super_array = module->types[type_index_2].array_type;
+ if (sub_array->mutability() != super_array->mutability()) return false;
+
+ // Temporarily cache type equivalence for the recursive call.
+ module->cache_type_equivalence(type_index_1, type_index_2);
+ if (IsEquivalent(sub_array->element_type(), super_array->element_type(),
+ module)) {
+ return true;
+ } else {
+ module->uncache_type_equivalence(type_index_1, type_index_2);
+ // TODO(7748): Consider caching negative results as well.
+ return false;
+ }
+}
+
+bool IsStructTypeEquivalent(uint32_t type_index_1, uint32_t type_index_2,
+ const WasmModule* module) {
+ if (module->type_kinds[type_index_1] != kWasmStructTypeCode ||
+ module->type_kinds[type_index_2] != kWasmStructTypeCode) {
+ return false;
+ }
+ const StructType* sub_struct = module->types[type_index_1].struct_type;
+ const StructType* super_struct = module->types[type_index_2].struct_type;
+
+ if (sub_struct->field_count() != super_struct->field_count()) {
+ return false;
+ }
+
+ // Temporarily cache type equivalence for the recursive call.
+ module->cache_type_equivalence(type_index_1, type_index_2);
+ for (uint32_t i = 0; i < sub_struct->field_count(); i++) {
+ if (sub_struct->mutability(i) != super_struct->mutability(i) ||
+ !IsEquivalent(sub_struct->field(i), super_struct->field(i), module)) {
+ module->uncache_type_equivalence(type_index_1, type_index_2);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool IsEquivalent(ValueType type1, ValueType type2, const WasmModule* module) {
+ if (type1 == type2) return true;
+ if (type1.kind() != type2.kind()) return false;
+ // At this point, the types can only be both rtts, refs, or optrefs,
+ // but with different indexed types.
+
+ // Rtts need to have the same depth.
+ if (type1.has_depth() && type1.depth() != type2.depth()) return false;
+ // In all three cases, the indexed types have to be equivalent.
+ if (module->is_cached_equivalent_type(type1.ref_index(), type2.ref_index())) {
+ return true;
+ }
+ return IsArrayTypeEquivalent(type1.ref_index(), type2.ref_index(), module) ||
+ IsStructTypeEquivalent(type1.ref_index(), type2.ref_index(), module);
+}
+
+bool IsStructSubtype(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* module) {
+ if (module->type_kinds[subtype_index] != kWasmStructTypeCode ||
+ module->type_kinds[supertype_index] != kWasmStructTypeCode) {
+ return false;
+ }
+ const StructType* sub_struct = module->types[subtype_index].struct_type;
+ const StructType* super_struct = module->types[supertype_index].struct_type;
+
+ if (sub_struct->field_count() < super_struct->field_count()) {
+ return false;
+ }
+
+ module->cache_subtype(subtype_index, supertype_index);
+ for (uint32_t i = 0; i < super_struct->field_count(); i++) {
+ bool sub_mut = sub_struct->mutability(i);
+ bool super_mut = super_struct->mutability(i);
+ if (sub_mut != super_mut ||
+ (sub_mut &&
+ !IsEquivalent(sub_struct->field(i), super_struct->field(i), module)) ||
+ (!sub_mut &&
+ !IsSubtypeOf(sub_struct->field(i), super_struct->field(i), module))) {
+ module->uncache_subtype(subtype_index, supertype_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool IsArraySubtype(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* module) {
+ if (module->type_kinds[subtype_index] != kWasmArrayTypeCode ||
+ module->type_kinds[supertype_index] != kWasmArrayTypeCode) {
+ return false;
+ }
+ const ArrayType* sub_array = module->types[subtype_index].array_type;
+ const ArrayType* super_array = module->types[supertype_index].array_type;
+ bool sub_mut = sub_array->mutability();
+ bool super_mut = super_array->mutability();
+ module->cache_subtype(subtype_index, supertype_index);
+ if (sub_mut != super_mut ||
+ (sub_mut && !IsEquivalent(sub_array->element_type(),
+ super_array->element_type(), module)) ||
+ (!sub_mut && !IsSubtypeOf(sub_array->element_type(),
+ super_array->element_type(), module))) {
+ module->uncache_subtype(subtype_index, supertype_index);
+ return false;
+ } else {
+ return true;
+ }
+}
+} // namespace
+
+// TODO(7748): Extend this with function and any-heap subtyping.
+V8_EXPORT_PRIVATE bool IsSubtypeOfHeap(HeapType subtype, HeapType supertype,
+ const WasmModule* module) {
+ DCHECK(!module->has_signature(subtype) && !module->has_signature(supertype));
+ if (subtype == supertype) {
+ return true;
+ }
+ // eqref is a supertype of all reference types except funcref.
+ if (supertype == kHeapEq) {
+ return subtype != kHeapFunc;
+ }
+ // At the moment, generic heap types are not subtyping-related otherwise.
+ if (is_generic_heap_type(subtype) || is_generic_heap_type(supertype)) {
+ return false;
+ }
+
+ if (module->is_cached_subtype(subtype, supertype)) {
+ return true;
+ }
+ return IsStructSubtype(subtype, supertype, module) ||
+ IsArraySubtype(subtype, supertype, module);
+}
+
+// TODO(7748): Extend this with function subtyping.
+ValueType CommonSubtype(ValueType a, ValueType b, const WasmModule* module) {
+ if (a == b) return a;
+ if (IsSubtypeOf(a, b, module)) return a;
+ if (IsSubtypeOf(b, a, module)) return b;
+ return kWasmBottom;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-subtyping.h b/chromium/v8/src/wasm/wasm-subtyping.h
new file mode 100644
index 00000000000..6edf52dd31b
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-subtyping.h
@@ -0,0 +1,42 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_SUBTYPING_H_
+#define V8_WASM_WASM_SUBTYPING_H_
+
+#include "src/wasm/value-type.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+struct WasmModule;
+V8_EXPORT_PRIVATE bool IsSubtypeOfHeap(HeapType subtype, HeapType supertype,
+ const WasmModule* module);
+
+// The subtyping between value types is described by the following rules:
+// - All types are a supertype of bottom.
+// - All reference types, except funcref, are subtypes of eqref.
+// - optref(ht1) <: optref(ht2) iff ht1 <: ht2.
+// - ref(ht1) <: ref/optref(ht2) iff ht1 <: ht2.
+V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype,
+ const WasmModule* module) {
+ if (subtype == supertype) return true;
+ bool compatible_references = (subtype.kind() == ValueType::kRef &&
+ supertype.kind() == ValueType::kRef) ||
+ (subtype.kind() == ValueType::kRef &&
+ supertype.kind() == ValueType::kOptRef) ||
+ (subtype.kind() == ValueType::kOptRef &&
+ supertype.kind() == ValueType::kOptRef);
+ if (!compatible_references) return false;
+ return IsSubtypeOfHeap(subtype.heap_type(), supertype.heap_type(), module);
+}
+
+ValueType CommonSubtype(ValueType a, ValueType b, const WasmModule* module);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_SUBTYPING_H_
diff --git a/chromium/v8/src/wasm/wasm-value.h b/chromium/v8/src/wasm/wasm-value.h
index 9a6f0ca7262..5189eb86768 100644
--- a/chromium/v8/src/wasm/wasm-value.h
+++ b/chromium/v8/src/wasm/wasm-value.h
@@ -63,7 +63,7 @@ class Simd128 {
V(f64, kWasmF64, double) \
V(f64_boxed, kWasmF64, Float64) \
V(s128, kWasmS128, Simd128) \
- V(anyref, kWasmAnyRef, Handle<Object>)
+ V(externref, kWasmExternRef, Handle<Object>)
ASSERT_TRIVIALLY_COPYABLE(Handle<Object>);
diff --git a/chromium/v8/src/zone/OWNERS b/chromium/v8/src/zone/OWNERS
index e4e653da5ba..04bfcc5ec5a 100644
--- a/chromium/v8/src/zone/OWNERS
+++ b/chromium/v8/src/zone/OWNERS
@@ -1,3 +1,4 @@
clemensb@chromium.org
+ishell@chromium.org
sigurds@chromium.org
verwaest@chromium.org
diff --git a/chromium/v8/src/zone/accounting-allocator.h b/chromium/v8/src/zone/accounting-allocator.h
index 69a649e75ea..bd2590df35d 100644
--- a/chromium/v8/src/zone/accounting-allocator.h
+++ b/chromium/v8/src/zone/accounting-allocator.h
@@ -8,6 +8,7 @@
#include <atomic>
#include "src/base/macros.h"
+#include "src/logging/tracing-flags.h"
namespace v8 {
namespace internal {
@@ -21,11 +22,11 @@ class V8_EXPORT_PRIVATE AccountingAllocator {
virtual ~AccountingAllocator();
// Allocates a new segment. Returns nullptr on failed allocation.
- virtual Segment* AllocateSegment(size_t bytes);
+ Segment* AllocateSegment(size_t bytes);
// Return unneeded segments to either insert them into the pool or release
// them if the pool is already full or memory pressure is high.
- virtual void ReturnSegment(Segment* memory);
+ void ReturnSegment(Segment* memory);
size_t GetCurrentMemoryUsage() const {
return current_memory_usage_.load(std::memory_order_relaxed);
@@ -35,8 +36,25 @@ class V8_EXPORT_PRIVATE AccountingAllocator {
return max_memory_usage_.load(std::memory_order_relaxed);
}
- virtual void ZoneCreation(const Zone* zone) {}
- virtual void ZoneDestruction(const Zone* zone) {}
+ void TraceZoneCreation(const Zone* zone) {
+ if (V8_LIKELY(!TracingFlags::is_zone_stats_enabled())) return;
+ TraceZoneCreationImpl(zone);
+ }
+
+ void TraceZoneDestruction(const Zone* zone) {
+ if (V8_LIKELY(!TracingFlags::is_zone_stats_enabled())) return;
+ TraceZoneDestructionImpl(zone);
+ }
+
+ void TraceAllocateSegment(Segment* segment) {
+ if (V8_LIKELY(!TracingFlags::is_zone_stats_enabled())) return;
+ TraceAllocateSegmentImpl(segment);
+ }
+
+ protected:
+ virtual void TraceZoneCreationImpl(const Zone* zone) {}
+ virtual void TraceZoneDestructionImpl(const Zone* zone) {}
+ virtual void TraceAllocateSegmentImpl(Segment* segment) {}
private:
std::atomic<size_t> current_memory_usage_{0};
diff --git a/chromium/v8/src/zone/zone.cc b/chromium/v8/src/zone/zone.cc
index 81fc9c7d8b3..34882d966ca 100644
--- a/chromium/v8/src/zone/zone.cc
+++ b/chromium/v8/src/zone/zone.cc
@@ -36,11 +36,10 @@ Zone::Zone(AccountingAllocator* allocator, const char* name)
segment_head_(nullptr),
name_(name),
sealed_(false) {
- allocator_->ZoneCreation(this);
+ allocator_->TraceZoneCreation(this);
}
Zone::~Zone() {
- allocator_->ZoneDestruction(this);
DeleteAll();
DCHECK_EQ(segment_bytes_allocated_, 0);
@@ -74,14 +73,23 @@ void* Zone::AsanNew(size_t size) {
}
void Zone::ReleaseMemory() {
- allocator_->ZoneDestruction(this);
DeleteAll();
- allocator_->ZoneCreation(this);
+ allocator_->TraceZoneCreation(this);
}
void Zone::DeleteAll() {
+ Segment* current = segment_head_;
+ if (current) {
+ // Commit the allocation_size_ of segment_head_ and disconnect the segments
+ // list from the zone in order to ensure that tracing accounting allocator
+ // will observe value including memory from the head segment.
+ allocation_size_ = allocation_size();
+ segment_head_ = nullptr;
+ }
+ allocator_->TraceZoneDestruction(this);
+
// Traverse the chained list of segments and return them all to the allocator.
- for (Segment* current = segment_head_; current;) {
+ while (current) {
Segment* next = current->next();
size_t size = current->total_size();
@@ -96,30 +104,14 @@ void Zone::DeleteAll() {
position_ = limit_ = 0;
allocation_size_ = 0;
- segment_head_ = nullptr;
-}
-
-// Creates a new segment, sets its size, and pushes it to the front
-// of the segment chain. Returns the new segment.
-Segment* Zone::NewSegment(size_t requested_size) {
- Segment* result = allocator_->AllocateSegment(requested_size);
- if (!result) return nullptr;
- DCHECK_GE(result->total_size(), requested_size);
- segment_bytes_allocated_ += result->total_size();
- result->set_zone(this);
- result->set_next(segment_head_);
- segment_head_ = result;
- return result;
}
Address Zone::NewExpand(size_t size) {
// Make sure the requested size is already properly aligned and that
// there isn't enough room in the Zone to satisfy the request.
DCHECK_EQ(size, RoundDown(size, kAlignmentInBytes));
- DCHECK(limit_ - position_ < size);
+ DCHECK_LT(limit_ - position_, size);
- // Commit the allocation_size_ of segment_head_ if any.
- allocation_size_ = allocation_size();
// Compute the new segment size. We use a 'high water mark'
// strategy, where we increase the segment size every time we expand
// except that we employ a maximum segment size when we delete. This
@@ -148,12 +140,24 @@ Address Zone::NewExpand(size_t size) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
}
- Segment* segment = NewSegment(new_size);
+
+ Segment* segment = allocator_->AllocateSegment(new_size);
if (segment == nullptr) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
}
+ DCHECK_GE(segment->total_size(), new_size);
+ segment_bytes_allocated_ += segment->total_size();
+ segment->set_zone(this);
+ segment->set_next(segment_head_);
+ // Commit the allocation_size_ of segment_head_ if any, in order to ensure
+ // that tracing accounting allocator will observe value including memory
+ // from the previous head segment.
+ allocation_size_ = allocation_size();
+ segment_head_ = segment;
+ allocator_->TraceAllocateSegment(segment);
+
// Recompute 'top' and 'limit' based on the new segment.
Address result = RoundUp(segment->start(), kAlignmentInBytes);
position_ = result + size;
diff --git a/chromium/v8/src/zone/zone.h b/chromium/v8/src/zone/zone.h
index df72864c5a9..6c30af8132a 100644
--- a/chromium/v8/src/zone/zone.h
+++ b/chromium/v8/src/zone/zone.h
@@ -79,13 +79,21 @@ class V8_EXPORT_PRIVATE Zone final {
return segment_bytes_allocated_ > kExcessLimit;
}
+ size_t segment_bytes_allocated() const { return segment_bytes_allocated_; }
+
const char* name() const { return name_; }
+ // Returns precise value of used zone memory, allowed to be called only
+ // from thread owning the zone.
size_t allocation_size() const {
size_t extra = segment_head_ ? position_ - segment_head_->start() : 0;
return allocation_size_ + extra;
}
+ // Returns used zone memory not including the head segment, can be called
+ // from threads not owning the zone.
+ size_t allocation_size_for_tracing() const { return allocation_size_; }
+
AccountingAllocator* allocator() const { return allocator_; }
private:
@@ -118,10 +126,6 @@ class V8_EXPORT_PRIVATE Zone final {
// room in the Zone already.
Address NewExpand(size_t size);
- // Creates a new segment, sets it size, and pushes it to the front
- // of the segment chain. Returns the new segment.
- inline Segment* NewSegment(size_t requested_size);
-
// The free region in the current (front) segment is represented as
// the half-open interval [position, limit). The 'position' variable
// is guaranteed to be aligned as dictated by kAlignment.