summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/api/api-inl.h1
-rw-r--r--deps/v8/src/api/api-natives.cc13
-rw-r--r--deps/v8/src/api/api.cc282
-rw-r--r--deps/v8/src/api/api.h3
-rw-r--r--deps/v8/src/asmjs/asm-js.cc2
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc2
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc17
-rw-r--r--deps/v8/src/ast/ast-value-factory.h7
-rw-r--r--deps/v8/src/ast/ast.h26
-rw-r--r--deps/v8/src/ast/prettyprinter.cc4
-rw-r--r--deps/v8/src/ast/scopes.cc113
-rw-r--r--deps/v8/src/ast/scopes.h27
-rw-r--r--deps/v8/src/base/atomic-utils.h25
-rw-r--r--deps/v8/src/base/atomicops.h58
-rw-r--r--deps/v8/src/base/bounded-page-allocator.cc5
-rw-r--r--deps/v8/src/base/emulated-virtual-address-subspace.cc138
-rw-r--r--deps/v8/src/base/emulated-virtual-address-subspace.h113
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc280
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc96
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc160
-rw-r--r--deps/v8/src/base/platform/platform.h98
-rw-r--r--deps/v8/src/base/platform/yield-processor.h55
-rw-r--r--deps/v8/src/base/region-allocator.cc30
-rw-r--r--deps/v8/src/base/region-allocator.h34
-rw-r--r--deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc61
-rw-r--r--deps/v8/src/base/sanitizer/lsan-virtual-address-space.h63
-rw-r--r--deps/v8/src/base/small-vector.h72
-rw-r--r--deps/v8/src/base/virtual-address-space-page-allocator.cc69
-rw-r--r--deps/v8/src/base/virtual-address-space-page-allocator.h72
-rw-r--r--deps/v8/src/base/virtual-address-space.cc262
-rw-r--r--deps/v8/src/base/virtual-address-space.h136
-rw-r--r--deps/v8/src/base/win32-headers.h4
-rw-r--r--deps/v8/src/baseline/baseline-batch-compiler.cc20
-rw-r--r--deps/v8/src/baseline/baseline-compiler.cc46
-rw-r--r--deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h21
-rw-r--r--deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h24
-rw-r--r--deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h24
-rw-r--r--deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h1
-rw-r--r--deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h224
-rw-r--r--deps/v8/src/bigint/CPPLINT.cfg1
-rw-r--r--deps/v8/src/bigint/bigint-internal.cc4
-rw-r--r--deps/v8/src/bigint/bigint.h19
-rw-r--r--deps/v8/src/bigint/bitwise.cc106
-rw-r--r--deps/v8/src/bigint/digit-arithmetic.h4
-rw-r--r--deps/v8/src/bigint/div-barrett.cc28
-rw-r--r--deps/v8/src/bigint/div-burnikel.cc14
-rw-r--r--deps/v8/src/bigint/div-helpers.cc6
-rw-r--r--deps/v8/src/bigint/div-schoolbook.cc7
-rw-r--r--deps/v8/src/bigint/fromstring.cc10
-rw-r--r--deps/v8/src/bigint/mul-fft.cc40
-rw-r--r--deps/v8/src/bigint/mul-karatsuba.cc8
-rw-r--r--deps/v8/src/bigint/mul-schoolbook.cc4
-rw-r--r--deps/v8/src/bigint/tostring.cc18
-rw-r--r--deps/v8/src/bigint/vector-arithmetic.cc2
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc8
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc29
-rw-r--r--deps/v8/src/builtins/array-from.tq8
-rw-r--r--deps/v8/src/builtins/array-join.tq4
-rw-r--r--deps/v8/src/builtins/base.tq9
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc70
-rw-r--r--deps/v8/src/builtins/builtins-date.cc12
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h12
-rw-r--r--deps/v8/src/builtins/builtins-function.cc6
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc19
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc3
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc38
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-microtask-queue-gen.cc23
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc29
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-string.tq25
-rw-r--r--deps/v8/src/builtins/builtins-temporal.cc2
-rw-r--r--deps/v8/src/builtins/builtins-trace.cc4
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc19
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-utils.h11
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins.cc48
-rw-r--r--deps/v8/src/builtins/builtins.h18
-rw-r--r--deps/v8/src/builtins/collections.tq7
-rw-r--r--deps/v8/src/builtins/convert.tq6
-rw-r--r--deps/v8/src/builtins/data-view.tq102
-rw-r--r--deps/v8/src/builtins/finalization-registry.tq4
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc8
-rw-r--r--deps/v8/src/builtins/iterator.tq2
-rw-r--r--deps/v8/src/builtins/loong64/builtins-loong64.cc11
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc11
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc11
-rw-r--r--deps/v8/src/builtins/object-fromentries.tq4
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc161
-rw-r--r--deps/v8/src/builtins/promise-abstract-operations.tq2
-rw-r--r--deps/v8/src/builtins/promise-all.tq12
-rw-r--r--deps/v8/src/builtins/promise-any.tq10
-rw-r--r--deps/v8/src/builtins/promise-constructor.tq2
-rw-r--r--deps/v8/src/builtins/promise-jobs.tq2
-rw-r--r--deps/v8/src/builtins/promise-misc.tq4
-rw-r--r--deps/v8/src/builtins/promise-race.tq12
-rw-r--r--deps/v8/src/builtins/promise-reaction-job.tq4
-rw-r--r--deps/v8/src/builtins/promise-resolve.tq2
-rw-r--r--deps/v8/src/builtins/riscv64/builtins-riscv64.cc11
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc680
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc7
-rw-r--r--deps/v8/src/builtins/torque-internal.tq31
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq7
-rw-r--r--deps/v8/src/builtins/typed-array.tq11
-rw-r--r--deps/v8/src/builtins/wasm.tq59
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc102
-rw-r--r--deps/v8/src/codegen/OWNERS2
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm-inl.h2
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc10
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h12
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc2
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h2
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h4
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc8
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h12
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc2
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h2
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h2
-rw-r--r--deps/v8/src/codegen/assembler.h7
-rw-r--r--deps/v8/src/codegen/code-reference.cc24
-rw-r--r--deps/v8/src/codegen/code-reference.h21
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc226
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h51
-rw-r--r--deps/v8/src/codegen/compilation-cache.cc14
-rw-r--r--deps/v8/src/codegen/compiler.cc813
-rw-r--r--deps/v8/src/codegen/compiler.h89
-rw-r--r--deps/v8/src/codegen/constant-pool.h8
-rw-r--r--deps/v8/src/codegen/cpu-features.h2
-rw-r--r--deps/v8/src/codegen/external-reference-table.cc8
-rw-r--r--deps/v8/src/codegen/external-reference.cc49
-rw-r--r--deps/v8/src/codegen/external-reference.h34
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32-inl.h6
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc55
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h199
-rw-r--r--deps/v8/src/codegen/ia32/fma-instr.h58
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc4
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h2
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64.cc6
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64.h11
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.cc25
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.h8
-rw-r--r--deps/v8/src/codegen/machine-type.h4
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc12
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.h15
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc25
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h6
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc8
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.h15
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc26
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h6
-rw-r--r--deps/v8/src/codegen/pending-optimization-table.cc7
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc-inl.h2
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc17
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h16
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h14
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc51
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h6
-rw-r--r--deps/v8/src/codegen/reloc-info.cc6
-rw-r--r--deps/v8/src/codegen/reloc-info.h6
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc285
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.h121
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.h97
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc101
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h13
-rw-r--r--deps/v8/src/codegen/riscv64/register-riscv64.h14
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390-inl.h2
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc6
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.h10
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h20
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc43
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h4
-rw-r--r--deps/v8/src/codegen/safepoint-table.cc340
-rw-r--r--deps/v8/src/codegen/safepoint-table.h244
-rw-r--r--deps/v8/src/codegen/script-details.h2
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc95
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h11
-rw-r--r--deps/v8/src/codegen/turbo-assembler.cc12
-rw-r--r--deps/v8/src/codegen/turbo-assembler.h1
-rw-r--r--deps/v8/src/codegen/unoptimized-compilation-info.cc5
-rw-r--r--deps/v8/src/codegen/unoptimized-compilation-info.h8
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h4
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc109
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h47
-rw-r--r--deps/v8/src/codegen/x64/fma-instr.h42
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc111
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h13
-rw-r--r--deps/v8/src/common/globals.h88
-rw-r--r--deps/v8/src/common/high-allocation-throughput-scope.h37
-rw-r--r--deps/v8/src/common/message-template.h13
-rw-r--r--deps/v8/src/common/ptr-compr-inl.h2
-rw-r--r--deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc522
-rw-r--r--deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.h134
-rw-r--r--deps/v8/src/compiler/access-builder.cc10
-rw-r--r--deps/v8/src/compiler/access-info.cc29
-rw-r--r--deps/v8/src/compiler/access-info.h5
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc76
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc5
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc60
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h21
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc21
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc143
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector-impl.h4
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc11
-rw-r--r--deps/v8/src/compiler/backend/instruction.h4
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc5
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc6
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc75
-rw-r--r--deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc1036
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h26
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc26
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc227
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc375
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc19
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc510
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h17
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.cc3
-rw-r--r--deps/v8/src/compiler/diamond.h4
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc11
-rw-r--r--deps/v8/src/compiler/fast-api-calls.cc1
-rw-r--r--deps/v8/src/compiler/functional-list.h12
-rw-r--r--deps/v8/src/compiler/globals.h4
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc9
-rw-r--r--deps/v8/src/compiler/graph-assembler.h1
-rw-r--r--deps/v8/src/compiler/heap-refs.cc40
-rw-r--r--deps/v8/src/compiler/heap-refs.h1
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc10
-rw-r--r--deps/v8/src/compiler/int64-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc134
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h1
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc2
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc18
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h14
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc20
-rw-r--r--deps/v8/src/compiler/js-inlining.cc18
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc23
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc5
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc7
-rw-r--r--deps/v8/src/compiler/linkage.cc23
-rw-r--r--deps/v8/src/compiler/linkage.h18
-rw-r--r--deps/v8/src/compiler/load-elimination.cc49
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc34
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc28
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc2
-rw-r--r--deps/v8/src/compiler/machine-operator.cc2
-rw-r--r--deps/v8/src/compiler/memory-lowering.cc12
-rw-r--r--deps/v8/src/compiler/node-properties.cc16
-rw-r--r--deps/v8/src/compiler/persistent-map.h9
-rw-r--r--deps/v8/src/compiler/pipeline.cc3
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc1
-rw-r--r--deps/v8/src/compiler/typer.cc15
-rw-r--r--deps/v8/src/compiler/types.cc31
-rw-r--r--deps/v8/src/compiler/types.h100
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.cc14
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc543
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h52
-rw-r--r--deps/v8/src/compiler/wasm-escape-analysis.cc35
-rw-r--r--deps/v8/src/compiler/wasm-inlining.cc43
-rw-r--r--deps/v8/src/compiler/wasm-inlining.h4
-rw-r--r--deps/v8/src/d8/d8-test.cc187
-rw-r--r--deps/v8/src/d8/d8.cc404
-rw-r--r--deps/v8/src/d8/d8.h46
-rw-r--r--deps/v8/src/date/dateparser-inl.h12
-rw-r--r--deps/v8/src/debug/debug-coverage.cc2
-rw-r--r--deps/v8/src/debug/debug-interface.cc71
-rw-r--r--deps/v8/src/debug/debug-interface.h31
-rw-r--r--deps/v8/src/debug/debug-property-iterator.cc15
-rw-r--r--deps/v8/src/debug/debug-scopes.cc7
-rw-r--r--deps/v8/src/debug/debug-scopes.h2
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc3
-rw-r--r--deps/v8/src/debug/debug-wasm-objects.cc15
-rw-r--r--deps/v8/src/debug/debug.cc118
-rw-r--r--deps/v8/src/debug/debug.h12
-rw-r--r--deps/v8/src/debug/liveedit.cc20
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/gdb-server.cc3
-rw-r--r--deps/v8/src/debug/wasm/gdb-server/gdb-server.h7
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc5
-rw-r--r--deps/v8/src/deoptimizer/translated-state.cc44
-rw-r--r--deps/v8/src/deoptimizer/translated-state.h18
-rw-r--r--deps/v8/src/diagnostics/basic-block-profiler.cc4
-rw-r--r--deps/v8/src/diagnostics/disassembler.cc8
-rw-r--r--deps/v8/src/diagnostics/ia32/disasm-ia32.cc92
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc44
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc41
-rw-r--r--deps/v8/src/diagnostics/perf-jit.cc2
-rw-r--r--deps/v8/src/diagnostics/ppc/disasm-ppc.cc4
-rw-r--r--deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc132
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc30
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc123
-rw-r--r--deps/v8/src/execution/DEPS5
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc148
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.h3
-rw-r--r--deps/v8/src/execution/embedder-state.cc45
-rw-r--r--deps/v8/src/execution/embedder-state.h39
-rw-r--r--deps/v8/src/execution/encoded-c-signature.cc41
-rw-r--r--deps/v8/src/execution/encoded-c-signature.h60
-rw-r--r--deps/v8/src/execution/execution.cc25
-rw-r--r--deps/v8/src/execution/execution.h2
-rw-r--r--deps/v8/src/execution/frame-constants.h16
-rw-r--r--deps/v8/src/execution/frames-inl.h4
-rw-r--r--deps/v8/src/execution/frames.cc146
-rw-r--r--deps/v8/src/execution/frames.h27
-rw-r--r--deps/v8/src/execution/isolate-data.h39
-rw-r--r--deps/v8/src/execution/isolate-inl.h1
-rw-r--r--deps/v8/src/execution/isolate-utils-inl.h22
-rw-r--r--deps/v8/src/execution/isolate.cc446
-rw-r--r--deps/v8/src/execution/isolate.h111
-rw-r--r--deps/v8/src/execution/local-isolate.cc18
-rw-r--r--deps/v8/src/execution/local-isolate.h20
-rw-r--r--deps/v8/src/execution/messages.cc46
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc61
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.cc756
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.h24
-rw-r--r--deps/v8/src/execution/runtime-profiler.cc19
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc62
-rw-r--r--deps/v8/src/execution/simulator-base.cc25
-rw-r--r--deps/v8/src/execution/simulator-base.h53
-rw-r--r--deps/v8/src/execution/simulator.h10
-rw-r--r--deps/v8/src/execution/thread-local-top.cc1
-rw-r--r--deps/v8/src/execution/thread-local-top.h6
-rw-r--r--deps/v8/src/flags/flag-definitions.h129
-rw-r--r--deps/v8/src/handles/handles-inl.h4
-rw-r--r--deps/v8/src/handles/handles.h8
-rw-r--r--deps/v8/src/heap/allocation-observer.cc4
-rw-r--r--deps/v8/src/heap/base/worklist.h38
-rw-r--r--deps/v8/src/heap/code-range.cc10
-rw-r--r--deps/v8/src/heap/code-range.h2
-rw-r--r--deps/v8/src/heap/code-stats.cc13
-rw-r--r--deps/v8/src/heap/collection-barrier.cc23
-rw-r--r--deps/v8/src/heap/collection-barrier.h23
-rw-r--r--deps/v8/src/heap/concurrent-allocator.cc38
-rw-r--r--deps/v8/src/heap/concurrent-allocator.h6
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc78
-rw-r--r--deps/v8/src/heap/concurrent-marking.h1
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc100
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h37
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.cc6
-rw-r--r--deps/v8/src/heap/cppgc/caged-heap.h9
-rw-r--r--deps/v8/src/heap/cppgc/compactor.cc10
-rw-r--r--deps/v8/src/heap/cppgc/compactor.h1
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc9
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h16
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h35
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.cc11
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.h3
-rw-r--r--deps/v8/src/heap/cppgc/heap-state.cc7
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc31
-rw-r--r--deps/v8/src/heap/cppgc/heap.h3
-rw-r--r--deps/v8/src/heap/cppgc/incremental-marking-schedule.cc1
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc37
-rw-r--r--deps/v8/src/heap/cppgc/marker.h5
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h18
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc10
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.cc3
-rw-r--r--deps/v8/src/heap/cppgc/object-poisoner.h3
-rw-r--r--deps/v8/src/heap/cppgc/object-size-trait.cc6
-rw-r--r--deps/v8/src/heap/cppgc/object-view.h21
-rw-r--r--deps/v8/src/heap/cppgc/persistent-node.cc12
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.cc4
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc90
-rw-r--r--deps/v8/src/heap/cppgc/visitor.cc2
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc72
-rw-r--r--deps/v8/src/heap/embedder-tracing.h33
-rw-r--r--deps/v8/src/heap/factory-base.cc128
-rw-r--r--deps/v8/src/heap/factory-base.h33
-rw-r--r--deps/v8/src/heap/factory.cc168
-rw-r--r--deps/v8/src/heap/factory.h53
-rw-r--r--deps/v8/src/heap/gc-tracer.cc50
-rw-r--r--deps/v8/src/heap/gc-tracer.h16
-rw-r--r--deps/v8/src/heap/heap-controller.cc4
-rw-r--r--deps/v8/src/heap/heap-inl.h29
-rw-r--r--deps/v8/src/heap/heap-layout-tracer.cc73
-rw-r--r--deps/v8/src/heap/heap-layout-tracer.h33
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h10
-rw-r--r--deps/v8/src/heap/heap-write-barrier.cc13
-rw-r--r--deps/v8/src/heap/heap-write-barrier.h3
-rw-r--r--deps/v8/src/heap/heap.cc353
-rw-r--r--deps/v8/src/heap/heap.h96
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h8
-rw-r--r--deps/v8/src/heap/incremental-marking.cc33
-rw-r--r--deps/v8/src/heap/incremental-marking.h20
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h4
-rw-r--r--deps/v8/src/heap/large-spaces.cc16
-rw-r--r--deps/v8/src/heap/linear-allocation-area.h8
-rw-r--r--deps/v8/src/heap/local-allocator-inl.h2
-rw-r--r--deps/v8/src/heap/local-factory.cc3
-rw-r--r--deps/v8/src/heap/local-factory.h20
-rw-r--r--deps/v8/src/heap/local-heap-inl.h20
-rw-r--r--deps/v8/src/heap/local-heap.cc152
-rw-r--r--deps/v8/src/heap/local-heap.h16
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h4
-rw-r--r--deps/v8/src/heap/mark-compact.cc736
-rw-r--r--deps/v8/src/heap/mark-compact.h107
-rw-r--r--deps/v8/src/heap/marking-barrier-inl.h2
-rw-r--r--deps/v8/src/heap/marking-barrier.cc14
-rw-r--r--deps/v8/src/heap/marking-barrier.h1
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h26
-rw-r--r--deps/v8/src/heap/marking-visitor.h43
-rw-r--r--deps/v8/src/heap/memory-allocator.cc14
-rw-r--r--deps/v8/src/heap/memory-allocator.h10
-rw-r--r--deps/v8/src/heap/memory-chunk.cc9
-rw-r--r--deps/v8/src/heap/memory-chunk.h5
-rw-r--r--deps/v8/src/heap/memory-measurement-inl.h1
-rw-r--r--deps/v8/src/heap/memory-measurement.cc9
-rw-r--r--deps/v8/src/heap/new-spaces-inl.h12
-rw-r--r--deps/v8/src/heap/new-spaces.cc84
-rw-r--r--deps/v8/src/heap/new-spaces.h23
-rw-r--r--deps/v8/src/heap/object-stats.cc27
-rw-r--r--deps/v8/src/heap/objects-visiting.h5
-rw-r--r--deps/v8/src/heap/paged-spaces-inl.h21
-rw-r--r--deps/v8/src/heap/paged-spaces.cc106
-rw-r--r--deps/v8/src/heap/paged-spaces.h30
-rw-r--r--deps/v8/src/heap/parked-scope.h2
-rw-r--r--deps/v8/src/heap/read-only-spaces.cc11
-rw-r--r--deps/v8/src/heap/remembered-set.h6
-rw-r--r--deps/v8/src/heap/safepoint.cc267
-rw-r--r--deps/v8/src/heap/safepoint.h109
-rw-r--r--deps/v8/src/heap/scavenger-inl.h102
-rw-r--r--deps/v8/src/heap/scavenger.cc62
-rw-r--r--deps/v8/src/heap/scavenger.h73
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc14
-rw-r--r--deps/v8/src/heap/slot-set.h2
-rw-r--r--deps/v8/src/heap/spaces.cc43
-rw-r--r--deps/v8/src/heap/spaces.h24
-rw-r--r--deps/v8/src/heap/sweeper.cc2
-rw-r--r--deps/v8/src/heap/weak-object-worklists.cc21
-rw-r--r--deps/v8/src/heap/weak-object-worklists.h22
-rw-r--r--deps/v8/src/heap/worklist.h453
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc51
-rw-r--r--deps/v8/src/ic/accessor-assembler.h2
-rw-r--r--deps/v8/src/ic/binary-op-assembler.cc35
-rw-r--r--deps/v8/src/ic/binary-op-assembler.h39
-rw-r--r--deps/v8/src/ic/ic-inl.h6
-rw-r--r--deps/v8/src/ic/ic.cc24
-rw-r--r--deps/v8/src/ic/ic.h2
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc2
-rw-r--r--deps/v8/src/init/bootstrapper.cc106
-rw-r--r--deps/v8/src/init/heap-symbols.h12
-rw-r--r--deps/v8/src/init/v8.cc19
-rw-r--r--deps/v8/src/init/v8.h6
-rw-r--r--deps/v8/src/inspector/DEPS1
-rw-r--r--deps/v8/src/inspector/string-util.h4
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc59
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h4
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc160
-rw-r--r--deps/v8/src/inspector/v8-debugger.h37
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc24
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h5
-rw-r--r--deps/v8/src/inspector/value-mirror.cc88
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h11
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc132
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h24
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc49
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h35
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc86
-rw-r--r--deps/v8/src/interpreter/interpreter.cc18
-rw-r--r--deps/v8/src/interpreter/interpreter.h2
-rw-r--r--deps/v8/src/json/json-parser.cc7
-rw-r--r--deps/v8/src/json/json-parser.h3
-rw-r--r--deps/v8/src/json/json-stringifier.cc272
-rw-r--r--deps/v8/src/libsampler/sampler.cc4
-rw-r--r--deps/v8/src/logging/counters-definitions.h25
-rw-r--r--deps/v8/src/logging/counters.cc130
-rw-r--r--deps/v8/src/logging/counters.h252
-rw-r--r--deps/v8/src/logging/log.cc14
-rw-r--r--deps/v8/src/logging/runtime-call-stats-scope.h9
-rw-r--r--deps/v8/src/logging/runtime-call-stats.h11
-rw-r--r--deps/v8/src/numbers/conversions.cc2
-rw-r--r--deps/v8/src/numbers/conversions.h2
-rw-r--r--deps/v8/src/numbers/hash-seed-inl.h7
-rw-r--r--deps/v8/src/objects/all-objects-inl.h2
-rw-r--r--deps/v8/src/objects/allocation-site-inl.h9
-rw-r--r--deps/v8/src/objects/allocation-site.h2
-rw-r--r--deps/v8/src/objects/api-callbacks.h10
-rw-r--r--deps/v8/src/objects/arguments.h4
-rw-r--r--deps/v8/src/objects/backing-store.cc5
-rw-r--r--deps/v8/src/objects/backing-store.h5
-rw-r--r--deps/v8/src/objects/bigint.cc95
-rw-r--r--deps/v8/src/objects/code-inl.h142
-rw-r--r--deps/v8/src/objects/code.cc236
-rw-r--r--deps/v8/src/objects/code.h192
-rw-r--r--deps/v8/src/objects/contexts.cc11
-rw-r--r--deps/v8/src/objects/contexts.h3
-rw-r--r--deps/v8/src/objects/contexts.tq4
-rw-r--r--deps/v8/src/objects/debug-objects.h7
-rw-r--r--deps/v8/src/objects/descriptor-array.h3
-rw-r--r--deps/v8/src/objects/descriptor-array.tq6
-rw-r--r--deps/v8/src/objects/elements.cc54
-rw-r--r--deps/v8/src/objects/feedback-vector.cc92
-rw-r--r--deps/v8/src/objects/feedback-vector.h12
-rw-r--r--deps/v8/src/objects/feedback-vector.tq2
-rw-r--r--deps/v8/src/objects/field-index.h2
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h6
-rw-r--r--deps/v8/src/objects/fixed-array.h14
-rw-r--r--deps/v8/src/objects/foreign.tq2
-rw-r--r--deps/v8/src/objects/free-space.h2
-rw-r--r--deps/v8/src/objects/function-kind.h5
-rw-r--r--deps/v8/src/objects/heap-number.h2
-rw-r--r--deps/v8/src/objects/heap-object.h14
-rw-r--r--deps/v8/src/objects/instance-type-inl.h50
-rw-r--r--deps/v8/src/objects/instance-type.h39
-rw-r--r--deps/v8/src/objects/intl-objects.cc107
-rw-r--r--deps/v8/src/objects/intl-objects.h7
-rw-r--r--deps/v8/src/objects/intl-objects.tq125
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h101
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc45
-rw-r--r--deps/v8/src/objects/js-array-buffer.h48
-rw-r--r--deps/v8/src/objects/js-array-buffer.tq45
-rw-r--r--deps/v8/src/objects/js-collator.cc6
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc34
-rw-r--r--deps/v8/src/objects/js-display-names.cc53
-rw-r--r--deps/v8/src/objects/js-function-inl.h12
-rw-r--r--deps/v8/src/objects/js-function.cc6
-rw-r--r--deps/v8/src/objects/js-function.h8
-rw-r--r--deps/v8/src/objects/js-generator.h3
-rw-r--r--deps/v8/src/objects/js-locale.cc47
-rw-r--r--deps/v8/src/objects/js-objects-inl.h12
-rw-r--r--deps/v8/src/objects/js-objects.cc45
-rw-r--r--deps/v8/src/objects/js-objects.h4
-rw-r--r--deps/v8/src/objects/keys.cc6
-rw-r--r--deps/v8/src/objects/literal-objects.cc22
-rw-r--r--deps/v8/src/objects/literal-objects.h5
-rw-r--r--deps/v8/src/objects/lookup-inl.h8
-rw-r--r--deps/v8/src/objects/lookup.cc62
-rw-r--r--deps/v8/src/objects/map-inl.h10
-rw-r--r--deps/v8/src/objects/map-updater.cc93
-rw-r--r--deps/v8/src/objects/map-updater.h7
-rw-r--r--deps/v8/src/objects/map.cc77
-rw-r--r--deps/v8/src/objects/map.h9
-rw-r--r--deps/v8/src/objects/map.tq4
-rw-r--r--deps/v8/src/objects/microtask.h6
-rw-r--r--deps/v8/src/objects/module.cc40
-rw-r--r--deps/v8/src/objects/module.h9
-rw-r--r--deps/v8/src/objects/object-list-macros.h13
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h276
-rw-r--r--deps/v8/src/objects/objects-body-descriptors.h6
-rw-r--r--deps/v8/src/objects/objects-definitions.h16
-rw-r--r--deps/v8/src/objects/objects-inl.h113
-rw-r--r--deps/v8/src/objects/objects.cc116
-rw-r--r--deps/v8/src/objects/objects.h14
-rw-r--r--deps/v8/src/objects/oddball.tq8
-rw-r--r--deps/v8/src/objects/promise.h14
-rw-r--r--deps/v8/src/objects/property-cell-inl.h4
-rw-r--r--deps/v8/src/objects/property-descriptor-object.h2
-rw-r--r--deps/v8/src/objects/property-descriptor.cc8
-rw-r--r--deps/v8/src/objects/property-details.h111
-rw-r--r--deps/v8/src/objects/property.cc20
-rw-r--r--deps/v8/src/objects/prototype-info.tq4
-rw-r--r--deps/v8/src/objects/scope-info.cc8
-rw-r--r--deps/v8/src/objects/scope-info.h2
-rw-r--r--deps/v8/src/objects/script.h3
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h63
-rw-r--r--deps/v8/src/objects/shared-function-info.cc107
-rw-r--r--deps/v8/src/objects/shared-function-info.h59
-rw-r--r--deps/v8/src/objects/shared-function-info.tq44
-rw-r--r--deps/v8/src/objects/source-text-module.cc95
-rw-r--r--deps/v8/src/objects/source-text-module.h9
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc55
-rw-r--r--deps/v8/src/objects/stack-frame-info.h10
-rw-r--r--deps/v8/src/objects/string-inl.h230
-rw-r--r--deps/v8/src/objects/string-table.cc28
-rw-r--r--deps/v8/src/objects/string.cc340
-rw-r--r--deps/v8/src/objects/string.h104
-rw-r--r--deps/v8/src/objects/string.tq3
-rw-r--r--deps/v8/src/objects/struct.h8
-rw-r--r--deps/v8/src/objects/synthetic-module.cc28
-rw-r--r--deps/v8/src/objects/tagged-field.h2
-rw-r--r--deps/v8/src/objects/template-objects.h6
-rw-r--r--deps/v8/src/objects/templates.cc4
-rw-r--r--deps/v8/src/objects/templates.h10
-rw-r--r--deps/v8/src/objects/transitions-inl.h2
-rw-r--r--deps/v8/src/objects/transitions.cc8
-rw-r--r--deps/v8/src/objects/turbofan-types.h9
-rw-r--r--deps/v8/src/objects/turbofan-types.tq82
-rw-r--r--deps/v8/src/objects/value-serializer.cc51
-rw-r--r--deps/v8/src/objects/visitors.h9
-rw-r--r--deps/v8/src/parsing/expression-scope.h8
-rw-r--r--deps/v8/src/parsing/parse-info.cc108
-rw-r--r--deps/v8/src/parsing/parse-info.h164
-rw-r--r--deps/v8/src/parsing/parser-base.h49
-rw-r--r--deps/v8/src/parsing/parser.cc384
-rw-r--r--deps/v8/src/parsing/parser.h30
-rw-r--r--deps/v8/src/parsing/parsing.cc14
-rw-r--r--deps/v8/src/parsing/preparse-data-impl.h4
-rw-r--r--deps/v8/src/parsing/preparse-data.cc18
-rw-r--r--deps/v8/src/parsing/preparse-data.h2
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc145
-rw-r--r--deps/v8/src/parsing/scanner.cc2
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc52
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h22
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc83
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h4
-rw-r--r--deps/v8/src/profiler/profile-generator.cc25
-rw-r--r--deps/v8/src/profiler/profile-generator.h17
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc33
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc10
-rw-r--r--deps/v8/src/profiler/symbolizer.cc4
-rw-r--r--deps/v8/src/profiler/tick-sample.cc21
-rw-r--r--deps/v8/src/profiler/tick-sample.h9
-rw-r--r--deps/v8/src/regexp/experimental/experimental-bytecode.cc12
-rw-r--r--deps/v8/src/regexp/experimental/experimental-bytecode.h4
-rw-r--r--deps/v8/src/regexp/experimental/experimental-compiler.cc2
-rw-r--r--deps/v8/src/regexp/experimental/experimental-interpreter.cc17
-rw-r--r--deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc174
-rw-r--r--deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h14
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc175
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h14
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc175
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h14
-rw-r--r--deps/v8/src/regexp/regexp-ast.cc16
-rw-r--r--deps/v8/src/regexp/regexp-ast.h10
-rw-r--r--deps/v8/src/regexp/regexp-compiler-tonode.cc56
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc10
-rw-r--r--deps/v8/src/regexp/regexp-dotprinter.cc4
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.cc3
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc230
-rw-r--r--deps/v8/src/roots/roots.h14
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc8
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc16
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc10
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc21
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc12
-rw-r--r--deps/v8/src/runtime/runtime-object.cc36
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc7
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc14
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc2
-rw-r--r--deps/v8/src/runtime/runtime-test.cc99
-rw-r--r--deps/v8/src/runtime/runtime-trace.cc33
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc80
-rw-r--r--deps/v8/src/runtime/runtime.cc2
-rw-r--r--deps/v8/src/runtime/runtime.h8
-rw-r--r--deps/v8/src/security/caged-pointer-inl.h23
-rw-r--r--deps/v8/src/security/caged-pointer.h10
-rw-r--r--deps/v8/src/security/vm-cage.cc271
-rw-r--r--deps/v8/src/security/vm-cage.h64
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc43
-rw-r--r--deps/v8/src/snapshot/context-serializer.cc6
-rw-r--r--deps/v8/src/snapshot/deserializer.cc34
-rw-r--r--deps/v8/src/snapshot/deserializer.h2
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.cc38
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.h33
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc2
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.cc2
-rw-r--r--deps/v8/src/snapshot/roots-serializer.cc4
-rw-r--r--deps/v8/src/snapshot/serializer-deserializer.h4
-rw-r--r--deps/v8/src/snapshot/serializer.cc21
-rw-r--r--deps/v8/src/snapshot/serializer.h17
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h4
-rw-r--r--deps/v8/src/snapshot/snapshot.cc43
-rw-r--r--deps/v8/src/snapshot/snapshot.h3
-rw-r--r--deps/v8/src/strings/string-builder-inl.h56
-rw-r--r--deps/v8/src/strings/string-builder.cc4
-rw-r--r--deps/v8/src/strings/string-stream.cc2
-rw-r--r--deps/v8/src/temporal/OWNERS2
-rw-r--r--deps/v8/src/temporal/temporal-parser.cc1220
-rw-r--r--deps/v8/src/temporal/temporal-parser.h147
-rw-r--r--deps/v8/src/torque/ast.h2
-rw-r--r--deps/v8/src/torque/constants.h21
-rw-r--r--deps/v8/src/torque/earley-parser.cc6
-rw-r--r--deps/v8/src/torque/earley-parser.h7
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc45
-rw-r--r--deps/v8/src/torque/instance-type-generator.cc31
-rw-r--r--deps/v8/src/torque/source-positions.h3
-rw-r--r--deps/v8/src/torque/torque-parser.cc128
-rw-r--r--deps/v8/src/torque/type-visitor.cc26
-rw-r--r--deps/v8/src/torque/types.cc14
-rw-r--r--deps/v8/src/torque/types.h20
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h2
-rw-r--r--deps/v8/src/utils/allocation.cc21
-rw-r--r--deps/v8/src/utils/allocation.h4
-rw-r--r--deps/v8/src/utils/identity-map.h1
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h4
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h2
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h2
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc198
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h2
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h247
-rw-r--r--deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h866
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h60
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h4
-rw-r--r--deps/v8/src/wasm/c-api.cc26
-rw-r--r--deps/v8/src/wasm/code-space-access.cc5
-rw-r--r--deps/v8/src/wasm/compilation-environment.h21
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h110
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc69
-rw-r--r--deps/v8/src/wasm/init-expr-interface.cc4
-rw-r--r--deps/v8/src/wasm/init-expr-interface.h5
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc22
-rw-r--r--deps/v8/src/wasm/memory-protection-key.cc8
-rw-r--r--deps/v8/src/wasm/memory-protection-key.h5
-rw-r--r--deps/v8/src/wasm/module-compiler.cc201
-rw-r--r--deps/v8/src/wasm/module-compiler.h8
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc114
-rw-r--r--deps/v8/src/wasm/stacks.h63
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc23
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc70
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h21
-rw-r--r--deps/v8/src/wasm/wasm-constants.h4
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc37
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc15
-rw-r--r--deps/v8/src/wasm/wasm-init-expr.cc2
-rw-r--r--deps/v8/src/wasm/wasm-init-expr.h87
-rw-r--r--deps/v8/src/wasm/wasm-js.cc130
-rw-r--r--deps/v8/src/wasm/wasm-limits.h2
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc14
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h2
-rw-r--r--deps/v8/src/wasm/wasm-module.h8
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h81
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc445
-rw-r--r--deps/v8/src/wasm/wasm-objects.h132
-rw-r--r--deps/v8/src/wasm/wasm-objects.tq45
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc4
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.cc759
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.h48
724 files changed, 24857 insertions, 11971 deletions
diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h
index 20a3d910ce..17f8bd94bc 100644
--- a/deps/v8/src/api/api-inl.h
+++ b/deps/v8/src/api/api-inl.h
@@ -318,6 +318,7 @@ inline bool V8_EXPORT TryToCopyAndConvertArrayToCppBuffer(Local<Array> src,
namespace internal {
Handle<Context> HandleScopeImplementer::LastEnteredContext() {
+ DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity());
DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
for (size_t i = 0; i < entered_contexts_.size(); ++i) {
diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc
index c64107f3b8..75109e35b7 100644
--- a/deps/v8/src/api/api-natives.cc
+++ b/deps/v8/src/api/api-natives.cc
@@ -244,7 +244,7 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
PropertyAttributes attributes = details.attributes();
PropertyKind kind = details.kind();
- if (kind == kData) {
+ if (kind == PropertyKind::kData) {
auto prop_data = handle(properties->get(i++), isolate);
RETURN_ON_EXCEPTION(
isolate,
@@ -263,7 +263,7 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
// context.
PropertyDetails details(Smi::cast(properties->get(i++)));
PropertyAttributes attributes = details.attributes();
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
v8::Intrinsic intrinsic =
static_cast<v8::Intrinsic>(Smi::ToInt(properties->get(i++)));
@@ -625,7 +625,8 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
- PropertyDetails details(kData, attributes, PropertyConstness::kMutable);
+ PropertyDetails details(PropertyKind::kData, attributes,
+ PropertyConstness::kMutable);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -636,7 +637,8 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
PropertyAttributes attributes) {
auto value = handle(Smi::FromInt(intrinsic), isolate);
auto intrinsic_marker = isolate->factory()->true_value();
- PropertyDetails details(kData, attributes, PropertyConstness::kMutable);
+ PropertyDetails details(PropertyKind::kData, attributes,
+ PropertyConstness::kMutable);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, intrinsic_marker, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -650,7 +652,8 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
PropertyAttributes attributes) {
if (!getter.is_null()) getter->set_published(true);
if (!setter.is_null()) setter->set_published(true);
- PropertyDetails details(kAccessor, attributes, PropertyConstness::kMutable);
+ PropertyDetails details(PropertyKind::kAccessor, attributes,
+ PropertyConstness::kMutable);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, getter, setter};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index 3cc4f2b61e..7e9c504f8e 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -15,6 +15,7 @@
#include "include/v8-callbacks.h"
#include "include/v8-cppgc.h"
#include "include/v8-date.h"
+#include "include/v8-embedder-state-scope.h"
#include "include/v8-extension.h"
#include "include/v8-fast-api-calls.h"
#include "include/v8-function.h"
@@ -43,9 +44,13 @@
#include "src/common/globals.h"
#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/date/date.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/debug/debug-wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
#include "src/debug/liveedit.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/diagnostics/gdb-jit.h"
+#include "src/execution/embedder-state.h"
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
@@ -59,6 +64,8 @@
#include "src/handles/persistent-handles.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier.h"
+#include "src/heap/safepoint.h"
#include "src/init/bootstrapper.h"
#include "src/init/icu_util.h"
#include "src/init/startup-data-util.h"
@@ -170,8 +177,8 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
i::Handle<i::Script> script) {
i::Handle<i::Object> scriptName(script->GetNameOrSourceURL(), isolate);
i::Handle<i::Object> source_map_url(script->source_mapping_url(), isolate);
- i::Handle<i::FixedArray> host_defined_options(script->host_defined_options(),
- isolate);
+ i::Handle<i::Object> host_defined_options(script->host_defined_options(),
+ isolate);
ScriptOriginOptions options(script->origin_options());
bool is_wasm = false;
#if V8_ENABLE_WEBASSEMBLY
@@ -182,7 +189,7 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
script->line_offset(), script->column_offset(),
options.IsSharedCrossOrigin(), script->id(),
Utils::ToLocal(source_map_url), options.IsOpaque(), is_wasm,
- options.IsModule(), Utils::PrimitiveArrayToLocal(host_defined_options));
+ options.IsModule(), Utils::ToLocal(host_defined_options));
return origin;
}
@@ -191,7 +198,7 @@ ScriptOrigin::ScriptOrigin(
Local<Integer> column_offset, Local<Boolean> is_shared_cross_origin,
Local<Integer> script_id, Local<Value> source_map_url,
Local<Boolean> is_opaque, Local<Boolean> is_wasm, Local<Boolean> is_module,
- Local<PrimitiveArray> host_defined_options)
+ Local<Data> host_defined_options)
: ScriptOrigin(
Isolate::GetCurrent(), resource_name,
line_offset.IsEmpty() ? 0 : static_cast<int>(line_offset->Value()),
@@ -207,7 +214,7 @@ ScriptOrigin::ScriptOrigin(Local<Value> resource_name, int line_offset,
int column_offset, bool is_shared_cross_origin,
int script_id, Local<Value> source_map_url,
bool is_opaque, bool is_wasm, bool is_module,
- Local<PrimitiveArray> host_defined_options)
+ Local<Data> host_defined_options)
: isolate_(Isolate::GetCurrent()),
resource_name_(resource_name),
resource_line_offset_(line_offset),
@@ -217,16 +224,15 @@ ScriptOrigin::ScriptOrigin(Local<Value> resource_name, int line_offset,
source_map_url_(source_map_url),
host_defined_options_(host_defined_options) {}
-Local<Integer> ScriptOrigin::ResourceLineOffset() const {
- return v8::Integer::New(isolate_, resource_line_offset_);
-}
-
-Local<Integer> ScriptOrigin::ResourceColumnOffset() const {
- return v8::Integer::New(isolate_, resource_column_offset_);
-}
-
-Local<Integer> ScriptOrigin::ScriptID() const {
- return v8::Integer::New(isolate_, script_id_);
+Local<PrimitiveArray> ScriptOrigin::HostDefinedOptions() const {
+ // TODO(cbruni, chromium:1244145): remove once migrated to the context.
+ Utils::ApiCheck(!host_defined_options_->IsFixedArray(),
+ "ScriptOrigin::HostDefinedOptions",
+ "HostDefinedOptions is not a PrimitiveArray, please use "
+ "ScriptOrigin::GetHostDefinedOptions()");
+ i::Handle<i::FixedArray> options =
+ Utils::OpenHandle(*host_defined_options_.As<FixedArray>());
+ return Utils::PrimitiveArrayToLocal(options);
}
// --- E x c e p t i o n B e h a v i o r ---
@@ -665,6 +671,7 @@ StartupData SnapshotCreator::CreateBlob(
i::Snapshot::ClearReconstructableDataForSerialization(
isolate, function_code_handling == FunctionCodeHandling::kClear);
+ i::GlobalSafepointScope global_safepoint(isolate);
i::DisallowGarbageCollection no_gc_from_here_on;
// Create a vector with all contexts and clear associated Persistent fields.
@@ -702,7 +709,7 @@ StartupData SnapshotCreator::CreateBlob(
data->created_ = true;
return i::Snapshot::Create(isolate, &contexts, embedder_fields_serializers,
- no_gc_from_here_on);
+ global_safepoint, no_gc_from_here_on);
}
bool StartupData::CanBeRehashed() const {
@@ -1033,6 +1040,9 @@ void SealHandleScope::operator delete(void*, size_t) { base::OS::Abort(); }
void SealHandleScope::operator delete[](void*, size_t) { base::OS::Abort(); }
bool Data::IsModule() const { return Utils::OpenHandle(this)->IsModule(); }
+bool Data::IsFixedArray() const {
+ return Utils::OpenHandle(this)->IsFixedArray();
+}
bool Data::IsValue() const {
i::DisallowGarbageCollection no_gc;
@@ -1365,6 +1375,14 @@ Local<FunctionTemplate> FunctionTemplate::New(
// Changes to the environment cannot be captured in the snapshot. Expect no
// function templates when the isolate is created for serialization.
LOG_API(i_isolate, FunctionTemplate, New);
+
+ if (!Utils::ApiCheck(
+ !c_function || behavior == ConstructorBehavior::kThrow,
+ "FunctionTemplate::New",
+ "Fast API calls are not supported for constructor functions.")) {
+ return Local<FunctionTemplate>();
+ }
+
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
return FunctionTemplateNew(
i_isolate, callback, data, signature, length, behavior, false,
@@ -1380,17 +1398,17 @@ Local<FunctionTemplate> FunctionTemplate::NewWithCFunctionOverloads(
v8::Local<Signature> signature, int length, ConstructorBehavior behavior,
SideEffectType side_effect_type,
const MemorySpan<const CFunction>& c_function_overloads) {
- // TODO(mslekova): Once runtime overload resolution between sequences is
- // supported, check that if (c_function_overloads.size() == 2), then
- // c_function_overloads.data()[0].
- // CanResolveOverload(c_function_overloads.data()[1]). We won't support
- // the case where the size is greater than 2 for runtime resolution, until
- // we've added support for ArrayBuffers and ArrayBufferViews. OTOH the
- // overloads list might contain more than 2 functions with different arity,
- // the resolution between which is available at compile time.
-
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, FunctionTemplate, New);
+
+ if (!Utils::ApiCheck(
+ c_function_overloads.size() == 0 ||
+ behavior == ConstructorBehavior::kThrow,
+ "FunctionTemplate::NewWithCFunctionOverloads",
+ "Fast API calls are not supported for constructor functions.")) {
+ return Local<FunctionTemplate>();
+ }
+
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
return FunctionTemplateNew(i_isolate, callback, data, signature, length,
behavior, false, Local<Private>(),
@@ -1488,23 +1506,27 @@ i::Handle<i::AccessorInfo> MakeAccessorInfo(
if (redirected != i::kNullAddress) {
SET_FIELD_WRAPPED(isolate, obj, set_js_getter, redirected);
}
- if (data.IsEmpty()) {
- data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
- }
- obj->set_data(*Utils::OpenHandle(*data));
- obj->set_is_special_data_property(is_special_data_property);
- obj->set_replace_on_access(replace_on_access);
+
i::Handle<i::Name> accessor_name = Utils::OpenHandle(*name);
if (!accessor_name->IsUniqueName()) {
accessor_name = isolate->factory()->InternalizeString(
i::Handle<i::String>::cast(accessor_name));
}
- obj->set_name(*accessor_name);
- if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
- if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
- obj->set_initial_property_attributes(i::NONE);
+ i::DisallowGarbageCollection no_gc;
+ i::AccessorInfo raw_obj = *obj;
+ if (data.IsEmpty()) {
+ raw_obj.set_data(i::ReadOnlyRoots(isolate).undefined_value());
+ } else {
+ raw_obj.set_data(*Utils::OpenHandle(*data));
+ }
+ raw_obj.set_name(*accessor_name);
+ raw_obj.set_is_special_data_property(is_special_data_property);
+ raw_obj.set_replace_on_access(replace_on_access);
+ if (settings & ALL_CAN_READ) raw_obj.set_all_can_read(true);
+ if (settings & ALL_CAN_WRITE) raw_obj.set_all_can_write(true);
+ raw_obj.set_initial_property_attributes(i::NONE);
if (!signature.IsEmpty()) {
- obj->set_expected_receiver_type(*Utils::OpenHandle(*signature));
+ raw_obj.set_expected_receiver_type(*Utils::OpenHandle(*signature));
}
return obj;
}
@@ -1637,10 +1659,14 @@ static void TemplateSetAccessor(
i::Handle<i::AccessorInfo> accessor_info =
MakeAccessorInfo(isolate, name, getter, setter, data, settings, signature,
is_special_data_property, replace_on_access);
- accessor_info->set_initial_property_attributes(
- static_cast<i::PropertyAttributes>(attribute));
- accessor_info->set_getter_side_effect_type(getter_side_effect_type);
- accessor_info->set_setter_side_effect_type(setter_side_effect_type);
+ {
+ i::DisallowGarbageCollection no_gc;
+ i::AccessorInfo raw = *accessor_info;
+ raw.set_initial_property_attributes(
+ static_cast<i::PropertyAttributes>(attribute));
+ raw.set_getter_side_effect_type(getter_side_effect_type);
+ raw.set_setter_side_effect_type(setter_side_effect_type);
+ }
i::ApiNatives::AddNativeDataProperty(isolate, info, accessor_info);
}
@@ -2051,6 +2077,11 @@ Local<Value> UnboundScript::GetSourceMappingURL() {
}
MaybeLocal<Value> Script::Run(Local<Context> context) {
+ return Run(context, Local<Data>());
+}
+
+MaybeLocal<Value> Script::Run(Local<Context> context,
+ Local<Data> host_defined_options) {
auto v8_isolate = context->GetIsolate();
auto isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
@@ -2096,12 +2127,12 @@ MaybeLocal<Value> Script::Run(Local<Context> context) {
}
i::Handle<i::Object> receiver = isolate->global_proxy();
- i::Handle<i::FixedArray> host_defined_options(
+ // TODO(cbruni, chromium:1244145): Remove once migrated to the context.
+ i::Handle<i::Object> options(
i::Script::cast(fun->shared().script()).host_defined_options(), isolate);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(
- i::Execution::CallScript(isolate, fun, receiver, host_defined_options),
- &result);
+ i::Execution::CallScript(isolate, fun, receiver, options), &result);
if (i::FLAG_script_delay_fraction > 0.0) {
delta = v8::base::TimeDelta::FromMillisecondsD(
@@ -2125,11 +2156,15 @@ Local<Value> ScriptOrModule::GetResourceName() {
}
Local<PrimitiveArray> ScriptOrModule::GetHostDefinedOptions() {
+ return HostDefinedOptions().As<PrimitiveArray>();
+}
+
+Local<Data> ScriptOrModule::HostDefinedOptions() {
i::Handle<i::ScriptOrModule> obj = Utils::OpenHandle(this);
i::Isolate* isolate = i::GetIsolateFromWritableObject(*obj);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::Handle<i::FixedArray> val(obj->host_defined_options(), isolate);
- return ToApiHandle<PrimitiveArray>(val);
+ i::Handle<i::Object> val(obj->host_defined_options(), isolate);
+ return ToApiHandle<Data>(val);
}
Local<UnboundScript> Script::GetUnboundScript() {
@@ -2190,6 +2225,14 @@ Local<Primitive> PrimitiveArray::Get(Isolate* v8_isolate, int index) {
return ToApiHandle<Primitive>(i_item);
}
+void v8::PrimitiveArray::CheckCast(v8::Data* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(
+ obj->IsFixedArray(), "v8::PrimitiveArray::Cast",
+ "Value is not a PrimitiveArray. This is a temporary issue, v8::Data and "
+ "v8::PrimitiveArray will not be compatible in the future.");
+}
+
int FixedArray::Length() const {
i::Handle<i::FixedArray> self = Utils::OpenHandle(this);
return self->length();
@@ -2469,13 +2512,10 @@ Maybe<bool> Module::SetSyntheticModuleExport(Isolate* isolate,
namespace {
-i::ScriptDetails GetScriptDetails(i::Isolate* isolate,
- Local<Value> resource_name,
- int resource_line_offset,
- int resource_column_offset,
- Local<Value> source_map_url,
- Local<PrimitiveArray> host_defined_options,
- ScriptOriginOptions origin_options) {
+i::ScriptDetails GetScriptDetails(
+ i::Isolate* isolate, Local<Value> resource_name, int resource_line_offset,
+ int resource_column_offset, Local<Value> source_map_url,
+ Local<Data> host_defined_options, ScriptOriginOptions origin_options) {
i::ScriptDetails script_details(Utils::OpenHandle(*(resource_name), true),
origin_options);
script_details.line_offset = resource_line_offset;
@@ -2767,7 +2807,7 @@ i::MaybeHandle<i::SharedFunctionInfo> CompileStreamedSource(
i::ScriptDetails script_details =
GetScriptDetails(isolate, origin.ResourceName(), origin.LineOffset(),
origin.ColumnOffset(), origin.SourceMapUrl(),
- origin.HostDefinedOptions(), origin.Options());
+ origin.GetHostDefinedOptions(), origin.Options());
i::ScriptStreamingData* data = v8_source->impl();
return i::Compiler::GetSharedFunctionInfoForStreamedScript(
isolate, str, script_details, data);
@@ -3021,6 +3061,20 @@ ScriptOrigin Message::GetScriptOrigin() const {
return GetScriptOriginForScript(isolate, script);
}
+void ScriptOrigin::VerifyHostDefinedOptions() const {
+ // TODO(cbruni, chromium:1244145): Remove checks once we allow arbitrary
+ // host-defined options.
+ if (host_defined_options_.IsEmpty()) return;
+ Utils::ApiCheck(host_defined_options_->IsFixedArray(), "ScriptOrigin()",
+ "Host-defined options has to be a PrimitiveArray");
+ i::Handle<i::FixedArray> options =
+ Utils::OpenHandle(*host_defined_options_.As<FixedArray>());
+ for (int i = 0; i < options->length(); i++) {
+ Utils::ApiCheck(options->get(i).IsPrimitive(), "ScriptOrigin()",
+ "PrimitiveArray can only contain primtive values");
+ }
+}
+
v8::Local<Value> Message::GetScriptResourceName() const {
ASSERT_NO_SCRIPT_NO_EXCEPTION(Utils::OpenHandle(this)->GetIsolate());
return GetScriptOrigin().ResourceName();
@@ -3243,6 +3297,15 @@ Local<String> StackFrame::GetScriptSourceMappingURL() const {
Local<String> StackFrame::GetFunctionName() const {
auto self = Utils::OpenHandle(this);
+#if V8_ENABLE_WEBASSEMBLY
+ if (self->IsWasm()) {
+ auto isolate = self->GetIsolate();
+ auto instance = handle(self->GetWasmInstance(), isolate);
+ auto func_index = self->GetWasmFunctionIndex();
+ return Utils::ToLocal(
+ i::GetWasmFunctionDebugName(isolate, instance, func_index));
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
auto name = i::StackFrameInfo::GetFunctionName(self);
if (!name->IsString()) return {};
return Local<String>::Cast(Utils::ToLocal(name));
@@ -3890,6 +3953,12 @@ void v8::Private::CheckCast(v8::Data* that) {
"v8::Private::Cast", "Value is not a Private");
}
+void v8::FixedArray::CheckCast(v8::Data* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsFixedArray(), "v8::FixedArray::Cast",
+ "Value is not a FixedArray");
+}
+
void v8::ModuleRequest::CheckCast(v8::Data* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsModuleRequest(), "v8::ModuleRequest::Cast",
@@ -5813,7 +5882,8 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBaseSlow(
}
internal::Address string = str.ptr();
- int type = I::GetInstanceType(string) & I::kFullStringRepresentationMask;
+ int type =
+ I::GetInstanceType(string) & I::kStringRepresentationAndEncodingMask;
*encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
if (i::StringShape(str).IsExternalOneByte() ||
i::StringShape(str).IsExternalTwoByte()) {
@@ -5966,6 +6036,7 @@ void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
.store_aligned_pointer(obj->GetIsolate(), value),
location, "Unaligned pointer");
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
+ internal::WriteBarrier::MarkingFromInternalFields(i::JSObject::cast(*obj));
}
void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
@@ -5987,6 +6058,7 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
location, "Unaligned pointer");
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
}
+ internal::WriteBarrier::MarkingFromInternalFields(js_obj);
}
static void* ExternalValue(i::Object obj) {
@@ -6010,7 +6082,7 @@ bool v8::V8::InitializeVirtualMemoryCage() {
}
#endif
-void v8::V8::ShutdownPlatform() { i::V8::ShutdownPlatform(); }
+void v8::V8::DisposePlatform() { i::V8::DisposePlatform(); }
bool v8::V8::Initialize(const int build_config) {
const bool kEmbedderPointerCompression =
@@ -6105,7 +6177,7 @@ void v8::V8::SetReturnAddressLocationResolver(
}
bool v8::V8::Dispose() {
- i::V8::TearDown();
+ i::V8::Dispose();
return true;
}
@@ -6144,7 +6216,8 @@ HeapObjectStatistics::HeapObjectStatistics()
HeapCodeStatistics::HeapCodeStatistics()
: code_and_metadata_size_(0),
bytecode_and_metadata_size_(0),
- external_script_source_size_(0) {}
+ external_script_source_size_(0),
+ cpu_profiler_metadata_size_(0) {}
bool v8::V8::InitializeICU(const char* icu_data_file) {
return i::InitializeICU(icu_data_file);
@@ -7804,6 +7877,37 @@ MaybeLocal<WasmModuleObject> WasmModuleObject::FromCompiledModule(
#endif // V8_ENABLE_WEBASSEMBLY
}
+MaybeLocal<WasmModuleObject> WasmModuleObject::Compile(
+ Isolate* isolate, MemorySpan<const uint8_t> wire_bytes) {
+#if V8_ENABLE_WEBASSEMBLY
+ const uint8_t* start = wire_bytes.data();
+ size_t length = wire_bytes.size();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
+ return MaybeLocal<WasmModuleObject>();
+ }
+ i::MaybeHandle<i::JSObject> maybe_compiled;
+ {
+ i::wasm::ErrorThrower thrower(i_isolate, "WasmModuleObject::Compile()");
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
+ maybe_compiled = i::wasm::GetWasmEngine()->SyncCompile(
+ i_isolate, enabled_features, &thrower,
+ i::wasm::ModuleWireBytes(start, start + length));
+ }
+ CHECK_EQ(maybe_compiled.is_null(), i_isolate->has_pending_exception());
+ if (maybe_compiled.is_null()) {
+ i_isolate->OptionalRescheduleException(false);
+ return MaybeLocal<WasmModuleObject>();
+ }
+ return Local<WasmModuleObject>::Cast(
+ Utils::ToLocal(maybe_compiled.ToHandleChecked()));
+#else
+ Utils::ApiCheck(false, "WasmModuleObject::Compile",
+ "WebAssembly support is not enabled.");
+ UNREACHABLE();
+#endif // V8_ENABLE_WEBASSEMBLY
+}
+
WasmModuleObjectBuilderStreaming::WasmModuleObjectBuilderStreaming(
Isolate* isolate) {
USE(isolate_);
@@ -7945,21 +8049,20 @@ Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this);
- size_t byte_offset = self->byte_offset();
size_t bytes_to_copy = std::min(byte_length, self->byte_length());
if (bytes_to_copy) {
i::DisallowGarbageCollection no_gc;
i::Isolate* isolate = self->GetIsolate();
- i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(self->buffer()),
- isolate);
- const char* source = reinterpret_cast<char*>(buffer->backing_store());
- if (source == nullptr) {
- DCHECK(self->IsJSTypedArray());
- i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*self),
- isolate);
- source = reinterpret_cast<char*>(typed_array->DataPtr());
+ const char* source;
+ if (self->IsJSTypedArray()) {
+ i::Handle<i::JSTypedArray> array(i::JSTypedArray::cast(*self), isolate);
+ source = reinterpret_cast<char*>(array->DataPtr());
+ } else {
+ DCHECK(self->IsJSDataView());
+ i::Handle<i::JSDataView> data_view(i::JSDataView::cast(*self), isolate);
+ source = reinterpret_cast<char*>(data_view->data_pointer());
}
- memcpy(dest, source + byte_offset, bytes_to_copy);
+ memcpy(dest, source, bytes_to_copy);
}
return bytes_to_copy;
}
@@ -8397,6 +8500,7 @@ void Isolate::RemoveGCEpilogueCallback(GCCallback callback) {
void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ CHECK_NULL(isolate->heap()->cpp_heap());
isolate->heap()->SetEmbedderHeapTracer(tracer);
}
@@ -8412,6 +8516,7 @@ void Isolate::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) {
void Isolate::AttachCppHeap(CppHeap* cpp_heap) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ CHECK_NULL(GetEmbedderHeapTracer());
isolate->heap()->AttachCppHeap(cpp_heap);
}
@@ -8477,6 +8582,17 @@ void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) {
}
}
+void Isolate::RequestGarbageCollectionForTesting(
+ GarbageCollectionType type,
+ EmbedderHeapTracer::EmbedderStackState stack_state) {
+ if (type == kFullGarbageCollection) {
+ reinterpret_cast<i::Isolate*>(this)
+ ->heap()
+ ->SetEmbedderStackStateForNextFinalization(stack_state);
+ }
+ RequestGarbageCollectionForTesting(type);
+}
+
Isolate* Isolate::GetCurrent() {
i::Isolate* isolate = i::Isolate::Current();
return reinterpret_cast<Isolate*>(isolate);
@@ -8505,6 +8621,7 @@ Isolate::CreateParams::~CreateParams() = default;
void Isolate::Initialize(Isolate* isolate,
const v8::Isolate::CreateParams& params) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ TRACE_EVENT_CALL_STATS_SCOPED(i_isolate, "v8", "V8.IsolateInitialize");
if (auto allocator = params.array_buffer_allocator_shared) {
CHECK(params.array_buffer_allocator == nullptr ||
params.array_buffer_allocator == allocator.get());
@@ -8646,6 +8763,12 @@ void Isolate::SetHostImportModuleDynamicallyCallback(
isolate->SetHostImportModuleDynamicallyCallback(callback);
}
+void Isolate::SetHostImportModuleDynamicallyCallback(
+ HostImportModuleDynamicallyCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->SetHostImportModuleDynamicallyCallback(callback);
+}
+
void Isolate::SetHostInitializeImportMetaObjectCallback(
HostInitializeImportMetaObjectCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8897,6 +9020,10 @@ bool Isolate::GetHeapCodeAndMetadataStatistics(
isolate->bytecode_and_metadata_size();
code_statistics->external_script_source_size_ =
isolate->external_script_source_size();
+ code_statistics->cpu_profiler_metadata_size_ =
+ i::CpuProfiler::GetAllProfilersMemorySize(
+ reinterpret_cast<i::Isolate*>(isolate));
+
return true;
}
@@ -9427,7 +9554,6 @@ void v8::Isolate::LocaleConfigurationChangeNotification() {
#ifdef V8_INTL_SUPPORT
i_isolate->ResetDefaultLocale();
- i_isolate->clear_cached_icu_objects();
#endif // V8_INTL_SUPPORT
}
@@ -9752,6 +9878,16 @@ int64_t CpuProfile::GetSampleTimestamp(int index) const {
return profile->sample(index).timestamp.since_origin().InMicroseconds();
}
+StateTag CpuProfile::GetSampleState(int index) const {
+ const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
+ return profile->sample(index).state_tag;
+}
+
+EmbedderStateTag CpuProfile::GetSampleEmbedderState(int index) const {
+ const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
+ return profile->sample(index).embedder_state_tag;
+}
+
int64_t CpuProfile::GetStartTime() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return profile->start_time().since_origin().InMicroseconds();
@@ -10220,6 +10356,11 @@ void EmbedderHeapTracer::ResetHandleInNonTracingGC(
UNREACHABLE();
}
+EmbedderStateScope::EmbedderStateScope(Isolate* isolate,
+ Local<v8::Context> context,
+ EmbedderStateTag tag)
+ : embedder_state_(new internal::EmbedderState(isolate, context, tag)) {}
+
void TracedReferenceBase::CheckValue() const {
#ifdef V8_HOST_ARCH_64_BIT
if (!val_) return;
@@ -10388,6 +10529,11 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
v->VisitRootPointers(Root::kHandleScope, nullptr, start,
start + static_cast<int>(context_lists[i]->size()));
}
+ // The shape of |entered_contexts_| and |is_microtask_context_| stacks must
+ // be in sync.
+ is_microtask_context_.shrink_to_fit();
+ DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity());
+ DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
}
void HandleScopeImplementer::Iterate(RootVisitor* v) {
diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h
index 48f549bbb0..320346b22f 100644
--- a/deps/v8/src/api/api.h
+++ b/deps/v8/src/api/api.h
@@ -468,6 +468,7 @@ bool HandleScopeImplementer::HasSavedContexts() {
}
void HandleScopeImplementer::EnterContext(Context context) {
+ DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity());
DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
entered_contexts_.push_back(context);
is_microtask_context_.push_back(0);
@@ -475,6 +476,7 @@ void HandleScopeImplementer::EnterContext(Context context) {
void HandleScopeImplementer::LeaveContext() {
DCHECK(!entered_contexts_.empty());
+ DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity());
DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
entered_contexts_.pop_back();
is_microtask_context_.pop_back();
@@ -485,6 +487,7 @@ bool HandleScopeImplementer::LastEnteredContextWas(Context context) {
}
void HandleScopeImplementer::EnterMicrotaskContext(Context context) {
+ DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity());
DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size());
entered_contexts_.push_back(context);
is_microtask_context_.push_back(1);
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 28b44bf088..8791e4eae2 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -77,7 +77,7 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
return false; \
} \
DCHECK_EQ(shared.GetCode(), \
- isolate->builtins()->code(Builtin::kMath##FName)); \
+ isolate->builtins()->codet(Builtin::kMath##FName)); \
}
STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC)
#undef STDLIB_MATH_FUNC
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index c782bbaae7..3ff2a44201 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -239,7 +239,7 @@ void AsmJsParser::DeclareGlobal(VarInfo* info, bool mutable_variable,
WasmInitExpr init) {
info->kind = VarKind::kGlobal;
info->type = type;
- info->index = module_builder_->AddGlobal(vtype, true, std::move(init));
+ info->index = module_builder_->AddGlobal(vtype, true, init);
info->mutable_variable = mutable_variable;
}
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 307e1d06b7..4dab59fdae 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -338,10 +338,11 @@ const AstRawString* AstValueFactory::GetTwoByteStringInternal(
base::Vector<const byte>::cast(literal));
}
-const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
+const AstRawString* AstValueFactory::GetString(
+ String literal, const SharedStringAccessGuardIfNeeded& access_guard) {
const AstRawString* result = nullptr;
DisallowGarbageCollection no_gc;
- String::FlatContent content = literal->GetFlatContent(no_gc);
+ String::FlatContent content = literal.GetFlatContent(no_gc, access_guard);
if (content.IsOneByte()) {
result = GetOneByteStringInternal(content.ToOneByteVector());
} else {
@@ -351,15 +352,6 @@ const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
return result;
}
-const AstRawString* AstValueFactory::CloneFromOtherFactory(
- const AstRawString* raw_string) {
- const AstRawString* result =
- GetString(raw_string->raw_hash_field(), raw_string->is_one_byte(),
- base::Vector<const byte>(raw_string->raw_data(),
- raw_string->byte_length()));
- return result;
-}
-
AstConsString* AstValueFactory::NewConsString() {
return zone()->New<AstConsString>();
}
@@ -375,8 +367,6 @@ AstConsString* AstValueFactory::NewConsString(const AstRawString* str1,
template <typename IsolateT>
void AstValueFactory::Internalize(IsolateT* isolate) {
- if (!zone_) return;
-
// Strings need to be internalized before values, because values refer to
// strings.
for (AstRawString* current = strings_; current != nullptr;) {
@@ -386,7 +376,6 @@ void AstValueFactory::Internalize(IsolateT* isolate) {
}
ResetStrings();
- zone_ = nullptr;
}
template EXPORT_TEMPLATE_DEFINE(
V8_EXPORT_PRIVATE) void AstValueFactory::Internalize(Isolate* isolate);
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 67c761a8f8..d036d99604 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -340,11 +340,8 @@ class AstValueFactory {
const AstRawString* GetTwoByteString(base::Vector<const uint16_t> literal) {
return GetTwoByteStringInternal(literal);
}
- const AstRawString* GetString(Handle<String> literal);
-
- // Clones an AstRawString from another ast value factory, adding it to this
- // factory and returning the clone.
- const AstRawString* CloneFromOtherFactory(const AstRawString* raw_string);
+ const AstRawString* GetString(String literal,
+ const SharedStringAccessGuardIfNeeded&);
V8_EXPORT_PRIVATE AstConsString* NewConsString();
V8_EXPORT_PRIVATE AstConsString* NewConsString(const AstRawString* str);
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 0b2320860e..f7b3f247f7 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -2192,6 +2192,13 @@ class FunctionLiteral final : public Expression {
return HasDuplicateParameters::decode(bit_field_);
}
+ bool should_parallel_compile() const {
+ return ShouldParallelCompileField::decode(bit_field_);
+ }
+ void set_should_parallel_compile() {
+ bit_field_ = ShouldParallelCompileField::update(bit_field_, true);
+ }
+
// This is used as a heuristic on when to eagerly compile a function
// literal. We consider the following constructs as hints that the
// function will be called immediately:
@@ -2205,16 +2212,6 @@ class FunctionLiteral final : public Expression {
}
FunctionKind kind() const;
- bool dont_optimize() {
- return dont_optimize_reason() != BailoutReason::kNoReason;
- }
- BailoutReason dont_optimize_reason() {
- return DontOptimizeReasonField::decode(bit_field_);
- }
- void set_dont_optimize_reason(BailoutReason reason) {
- bit_field_ = DontOptimizeReasonField::update(bit_field_, reason);
- }
-
bool IsAnonymousFunctionDefinition() const {
return is_anonymous_expression();
}
@@ -2290,9 +2287,9 @@ class FunctionLiteral final : public Expression {
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters ==
kHasDuplicateParameters) |
- DontOptimizeReasonField::encode(BailoutReason::kNoReason) |
RequiresInstanceMembersInitializer::encode(false) |
- HasBracesField::encode(has_braces);
+ HasBracesField::encode(has_braces) |
+ ShouldParallelCompileField::encode(false);
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
}
@@ -2300,15 +2297,14 @@ class FunctionLiteral final : public Expression {
Expression::NextBitField<FunctionSyntaxKind, 3>;
using Pretenure = FunctionSyntaxKindBits::Next<bool, 1>;
using HasDuplicateParameters = Pretenure::Next<bool, 1>;
- using DontOptimizeReasonField =
- HasDuplicateParameters::Next<BailoutReason, 8>;
using RequiresInstanceMembersInitializer =
- DontOptimizeReasonField::Next<bool, 1>;
+ HasDuplicateParameters::Next<bool, 1>;
using ClassScopeHasPrivateBrandField =
RequiresInstanceMembersInitializer::Next<bool, 1>;
using HasStaticPrivateMethodsOrAccessorsField =
ClassScopeHasPrivateBrandField::Next<bool, 1>;
using HasBracesField = HasStaticPrivateMethodsOrAccessorsField::Next<bool, 1>;
+ using ShouldParallelCompileField = HasBracesField::Next<bool, 1>;
// expected_property_count_ is the sum of instance fields and properties.
// It can vary depending on whether a function is lazily or eagerly parsed.
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 44f4ea155f..c8be8bf47a 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -35,7 +35,7 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js,
is_user_js_ = is_user_js;
error_in_spread_args_ = error_in_spread_args;
spread_arg_ = nullptr;
- function_kind_ = kNormalFunction;
+ function_kind_ = FunctionKind::kNormalFunction;
InitializeAstVisitor(isolate);
}
@@ -823,7 +823,7 @@ const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
Init();
{ IndentedScope indent(this, "FUNC", program->position());
PrintIndented("KIND");
- Print(" %d\n", program->kind());
+ Print(" %d\n", static_cast<uint32_t>(program->kind()));
PrintIndented("LITERAL ID");
Print(" %d\n", program->function_literal_id());
PrintIndented("SUSPEND COUNT");
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index c179776571..6758079823 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -143,8 +143,9 @@ DeclarationScope::DeclarationScope(Zone* zone,
AstValueFactory* ast_value_factory,
REPLMode repl_mode)
: Scope(zone),
- function_kind_(repl_mode == REPLMode::kYes ? kAsyncFunction
- : kNormalFunction),
+ function_kind_(repl_mode == REPLMode::kYes
+ ? FunctionKind::kAsyncFunction
+ : FunctionKind::kNormalFunction),
params_(4, zone) {
DCHECK_EQ(scope_type_, SCRIPT_SCOPE);
SetDefaults();
@@ -165,14 +166,15 @@ DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
ModuleScope::ModuleScope(DeclarationScope* script_scope,
AstValueFactory* avfactory)
- : DeclarationScope(avfactory->zone(), script_scope, MODULE_SCOPE, kModule),
+ : DeclarationScope(avfactory->zone(), script_scope, MODULE_SCOPE,
+ FunctionKind::kModule),
module_descriptor_(avfactory->zone()->New<SourceTextModuleDescriptor>(
avfactory->zone())) {
set_language_mode(LanguageMode::kStrict);
DeclareThis(avfactory);
}
-ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
+ModuleScope::ModuleScope(Handle<ScopeInfo> scope_info,
AstValueFactory* avfactory)
: DeclarationScope(avfactory->zone(), MODULE_SCOPE, avfactory, scope_info),
module_descriptor_(nullptr) {
@@ -186,7 +188,8 @@ ClassScope::ClassScope(Zone* zone, Scope* outer_scope, bool is_anonymous)
set_language_mode(LanguageMode::kStrict);
}
-ClassScope::ClassScope(Isolate* isolate, Zone* zone,
+template <typename IsolateT>
+ClassScope::ClassScope(IsolateT* isolate, Zone* zone,
AstValueFactory* ast_value_factory,
Handle<ScopeInfo> scope_info)
: Scope(zone, CLASS_SCOPE, ast_value_factory, scope_info),
@@ -212,12 +215,20 @@ ClassScope::ClassScope(Isolate* isolate, Zone* zone,
DCHECK_EQ(scope_info->ContextLocalMaybeAssignedFlag(index),
MaybeAssignedFlag::kMaybeAssigned);
Variable* var = DeclareClassVariable(
- ast_value_factory, ast_value_factory->GetString(handle(name, isolate)),
+ ast_value_factory,
+ ast_value_factory->GetString(name,
+ SharedStringAccessGuardIfNeeded(isolate)),
kNoSourcePosition);
var->AllocateTo(VariableLocation::CONTEXT,
Context::MIN_CONTEXT_SLOTS + index);
}
}
+template ClassScope::ClassScope(Isolate* isolate, Zone* zone,
+ AstValueFactory* ast_value_factory,
+ Handle<ScopeInfo> scope_info);
+template ClassScope::ClassScope(LocalIsolate* isolate, Zone* zone,
+ AstValueFactory* ast_value_factory,
+ Handle<ScopeInfo> scope_info);
Scope::Scope(Zone* zone, ScopeType scope_type,
AstValueFactory* ast_value_factory, Handle<ScopeInfo> scope_info)
@@ -394,7 +405,8 @@ bool Scope::ContainsAsmModule() const {
}
#endif // V8_ENABLE_WEBASSEMBLY
-Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
+template <typename IsolateT>
+Scope* Scope::DeserializeScopeChain(IsolateT* isolate, Zone* zone,
ScopeInfo scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
@@ -450,7 +462,7 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
handle(scope_info, isolate));
}
} else if (scope_info.scope_type() == MODULE_SCOPE) {
- outer_scope = zone->New<ModuleScope>(isolate, handle(scope_info, isolate),
+ outer_scope = zone->New<ModuleScope>(handle(scope_info, isolate),
ast_value_factory);
} else {
DCHECK_EQ(scope_info.scope_type(), CATCH_SCOPE);
@@ -460,9 +472,11 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
String name = scope_info.ContextLocalName(0);
MaybeAssignedFlag maybe_assigned =
scope_info.ContextLocalMaybeAssignedFlag(0);
- outer_scope = zone->New<Scope>(
- zone, ast_value_factory->GetString(handle(name, isolate)),
- maybe_assigned, handle(scope_info, isolate));
+ outer_scope =
+ zone->New<Scope>(zone,
+ ast_value_factory->GetString(
+ name, SharedStringAccessGuardIfNeeded(isolate)),
+ maybe_assigned, handle(scope_info, isolate));
}
if (deserialization_mode == DeserializationMode::kScopesOnly) {
outer_scope->scope_info_ = Handle<ScopeInfo>::null();
@@ -496,6 +510,26 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
return innermost_scope;
}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Scope* Scope::DeserializeScopeChain(
+ Isolate* isolate, Zone* zone, ScopeInfo scope_info,
+ DeclarationScope* script_scope, AstValueFactory* ast_value_factory,
+ DeserializationMode deserialization_mode);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Scope* Scope::DeserializeScopeChain(
+ LocalIsolate* isolate, Zone* zone, ScopeInfo scope_info,
+ DeclarationScope* script_scope, AstValueFactory* ast_value_factory,
+ DeserializationMode deserialization_mode);
+
+#ifdef DEBUG
+bool Scope::IsReparsedMemberInitializerScope() const {
+ return is_declaration_scope() &&
+ IsClassMembersInitializerFunction(
+ AsDeclarationScope()->function_kind()) &&
+ outer_scope()->AsClassScope()->is_reparsed_class_scope();
+}
+#endif
+
DeclarationScope* Scope::AsDeclarationScope() {
DCHECK(is_declaration_scope());
return static_cast<DeclarationScope*>(this);
@@ -639,8 +673,10 @@ bool DeclarationScope::Analyze(ParseInfo* info) {
// We are compiling one of four cases:
// 1) top-level code,
// 2) a function/eval/module on the top-level
- // 3) a function/eval in a scope that was already resolved.
+ // 4) a class member initializer function scope
+ // 3) 4 function/eval in a scope that was already resolved.
DCHECK(scope->is_script_scope() || scope->outer_scope()->is_script_scope() ||
+ scope->IsReparsedMemberInitializerScope() ||
scope->outer_scope()->already_resolved_);
// The outer scope is never lazy.
@@ -1819,7 +1855,7 @@ void Scope::Print(int n) {
// Print header.
FunctionKind function_kind = is_function_scope()
? AsDeclarationScope()->function_kind()
- : kNormalFunction;
+ : FunctionKind::kNormalFunction;
Indent(n0, Header(scope_type_, function_kind, is_declaration_scope()));
if (scope_name_ != nullptr && !scope_name_->IsEmpty()) {
PrintF(" ");
@@ -1868,6 +1904,8 @@ void Scope::Print(int n) {
if (scope->needs_private_name_context_chain_recalc()) {
Indent(n1, "// needs #-name context chain recalc\n");
}
+ Indent(n1, "// ");
+ PrintF("%s\n", FunctionKind2String(scope->function_kind()));
}
if (num_stack_slots_ > 0) {
Indent(n1, "// ");
@@ -2657,6 +2695,55 @@ bool IsComplementaryAccessorPair(VariableMode a, VariableMode b) {
}
}
+void ClassScope::ReplaceReparsedClassScope(Isolate* isolate,
+ AstValueFactory* ast_value_factory,
+ ClassScope* old_scope) {
+ DCHECK_EQ(outer_scope_, old_scope->outer_scope());
+ Scope* outer = outer_scope_;
+
+ outer->RemoveInnerScope(old_scope);
+ // The outer scope should only have this deserialized inner scope,
+ // otherwise we have to update the sibling scopes.
+ DCHECK_EQ(outer->inner_scope_, this);
+ DCHECK_NULL(sibling_);
+
+ DCHECK_NULL(old_scope->inner_scope_);
+
+ Handle<ScopeInfo> scope_info = old_scope->scope_info_;
+ DCHECK(!scope_info.is_null());
+ DCHECK(!scope_info->IsEmpty());
+
+ // Restore variable allocation results for context-allocated variables in
+ // the class scope from ScopeInfo, so that we don't need to run
+ // resolution and allocation on these variables again when generating
+ // code for the initializer function.
+ int context_local_count = scope_info->ContextLocalCount();
+ int context_header_length = scope_info->ContextHeaderLength();
+ DisallowGarbageCollection no_gc;
+ for (int i = 0; i < context_local_count; ++i) {
+ int slot_index = context_header_length + i;
+ DCHECK_LT(slot_index, scope_info->ContextLength());
+
+ String name = scope_info->ContextLocalName(i);
+ const AstRawString* string = ast_value_factory->GetString(
+ name, SharedStringAccessGuardIfNeeded(isolate));
+ Variable* var = nullptr;
+
+ var = string->IsPrivateName() ? LookupLocalPrivateName(string)
+ : LookupLocal(string);
+ DCHECK_NOT_NULL(var);
+ var->AllocateTo(VariableLocation::CONTEXT, slot_index);
+ }
+
+ scope_info_ = scope_info;
+
+ // Set this bit so that DelcarationScope::Analyze recognizes
+ // the reparsed instance member initializer scope.
+#ifdef DEBUG
+ is_reparsed_class_scope_ = true;
+#endif
+}
+
Variable* ClassScope::DeclarePrivateName(const AstRawString* name,
VariableMode mode,
IsStaticFlag is_static_flag,
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 2aa0c23767..c04d99b4b0 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -163,7 +163,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
enum class DeserializationMode { kIncludingVariables, kScopesOnly };
- static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone,
+ template <typename IsolateT>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ static Scope* DeserializeScopeChain(IsolateT* isolate, Zone* zone,
ScopeInfo scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
@@ -422,6 +424,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
return num_heap_slots() > 0;
}
+#ifdef DEBUG
+ bool IsReparsedMemberInitializerScope() const;
+#endif
// Use Scope::ForEach for depth first traversal of scopes.
// Before:
// void Scope::VisitRecursively() {
@@ -850,7 +855,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
public:
DeclarationScope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
- FunctionKind function_kind = kNormalFunction);
+ FunctionKind function_kind = FunctionKind::kNormalFunction);
DeclarationScope(Zone* zone, ScopeType scope_type,
AstValueFactory* ast_value_factory,
Handle<ScopeInfo> scope_info);
@@ -987,7 +992,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
void set_is_async_module() {
DCHECK(IsModule(function_kind_));
- function_kind_ = kAsyncModule;
+ function_kind_ = FunctionKind::kAsyncModule;
}
void DeclareThis(AstValueFactory* ast_value_factory);
@@ -1363,8 +1368,7 @@ class ModuleScope final : public DeclarationScope {
ModuleScope(DeclarationScope* script_scope, AstValueFactory* avfactory);
// Deserialization. Does not restore the module descriptor.
- ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
- AstValueFactory* avfactory);
+ ModuleScope(Handle<ScopeInfo> scope_info, AstValueFactory* avfactory);
// Returns nullptr in a deserialized scope.
SourceTextModuleDescriptor* module() const { return module_descriptor_; }
@@ -1381,7 +1385,8 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
public:
ClassScope(Zone* zone, Scope* outer_scope, bool is_anonymous);
// Deserialization.
- ClassScope(Isolate* isolate, Zone* zone, AstValueFactory* ast_value_factory,
+ template <typename IsolateT>
+ ClassScope(IsolateT* isolate, Zone* zone, AstValueFactory* ast_value_factory,
Handle<ScopeInfo> scope_info);
struct HeritageParsingScope {
@@ -1472,6 +1477,13 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
should_save_class_variable_index_ = true;
}
+ void ReplaceReparsedClassScope(Isolate* isolate,
+ AstValueFactory* ast_value_factory,
+ ClassScope* old_scope);
+#ifdef DEBUG
+ bool is_reparsed_class_scope() const { return is_reparsed_class_scope_; }
+#endif
+
private:
friend class Scope;
friend class PrivateNameScopeIterator;
@@ -1517,6 +1529,9 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
// This is only maintained during reparsing, restored from the
// preparsed data.
bool should_save_class_variable_index_ = false;
+#ifdef DEBUG
+ bool is_reparsed_class_scope_ = false;
+#endif
};
// Iterate over the private name scope chain. The iteration proceeds from the
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index ed034bfe06..84015af362 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -178,6 +178,27 @@ using AsAtomic8 = AsAtomicImpl<base::Atomic8>;
using AsAtomic32 = AsAtomicImpl<base::Atomic32>;
using AsAtomicWord = AsAtomicImpl<base::AtomicWord>;
+template <int Width>
+struct AtomicTypeFromByteWidth {};
+template <>
+struct AtomicTypeFromByteWidth<1> {
+ using type = base::Atomic8;
+};
+template <>
+struct AtomicTypeFromByteWidth<2> {
+ using type = base::Atomic16;
+};
+template <>
+struct AtomicTypeFromByteWidth<4> {
+ using type = base::Atomic32;
+};
+#if V8_HOST_ARCH_64_BIT
+template <>
+struct AtomicTypeFromByteWidth<8> {
+ using type = base::Atomic64;
+};
+#endif
+
// This is similar to AsAtomicWord but it explicitly deletes functionality
// provided atomic access to bit representation of stored values.
template <typename TAtomicStorageType>
@@ -211,11 +232,15 @@ inline void CheckedDecrement(
template <typename T>
V8_INLINE std::atomic<T>* AsAtomicPtr(T* t) {
+ STATIC_ASSERT(sizeof(T) == sizeof(std::atomic<T>));
+ STATIC_ASSERT(alignof(T) >= alignof(std::atomic<T>));
return reinterpret_cast<std::atomic<T>*>(t);
}
template <typename T>
V8_INLINE const std::atomic<T>* AsAtomicPtr(const T* t) {
+ STATIC_ASSERT(sizeof(T) == sizeof(std::atomic<T>));
+ STATIC_ASSERT(alignof(T) >= alignof(std::atomic<T>));
return reinterpret_cast<const std::atomic<T>*>(t);
}
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index 20efe3479c..56fd5f3094 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -378,6 +378,64 @@ inline void Relaxed_Memmove(volatile Atomic8* dst, volatile const Atomic8* src,
}
}
+namespace helper {
+inline int MemcmpNotEqualFundamental(Atomic8 u1, Atomic8 u2) {
+ DCHECK_NE(u1, u2);
+ return u1 < u2 ? -1 : 1;
+}
+inline int MemcmpNotEqualFundamental(AtomicWord u1, AtomicWord u2) {
+ DCHECK_NE(u1, u2);
+#if defined(V8_TARGET_BIG_ENDIAN)
+ return u1 < u2 ? -1 : 1;
+#else
+ for (size_t i = 0; i < sizeof(AtomicWord); ++i) {
+ uint8_t byte1 = u1 & 0xFF;
+ uint8_t byte2 = u2 & 0xFF;
+ if (byte1 != byte2) return byte1 < byte2 ? -1 : 1;
+ u1 >>= 8;
+ u2 >>= 8;
+ }
+ UNREACHABLE();
+#endif
+}
+} // namespace helper
+
+inline int Relaxed_Memcmp(volatile const Atomic8* s1,
+ volatile const Atomic8* s2, size_t len) {
+ constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
+ while (len > 0 &&
+ !(IsAligned(reinterpret_cast<uintptr_t>(s1), kAtomicWordSize) &&
+ IsAligned(reinterpret_cast<uintptr_t>(s2), kAtomicWordSize))) {
+ Atomic8 u1 = Relaxed_Load(s1++);
+ Atomic8 u2 = Relaxed_Load(s2++);
+ if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2);
+ --len;
+ }
+
+ if (IsAligned(reinterpret_cast<uintptr_t>(s1), kAtomicWordSize) &&
+ IsAligned(reinterpret_cast<uintptr_t>(s2), kAtomicWordSize)) {
+ while (len >= kAtomicWordSize) {
+ AtomicWord u1 =
+ Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(s1));
+ AtomicWord u2 =
+ Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(s2));
+ if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2);
+ s1 += kAtomicWordSize;
+ s2 += kAtomicWordSize;
+ len -= kAtomicWordSize;
+ }
+ }
+
+ while (len > 0) {
+ Atomic8 u1 = Relaxed_Load(s1++);
+ Atomic8 u2 = Relaxed_Load(s2++);
+ if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2);
+ --len;
+ }
+
+ return 0;
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc
index d33857845a..a51206aec6 100644
--- a/deps/v8/src/base/bounded-page-allocator.cc
+++ b/deps/v8/src/base/bounded-page-allocator.cc
@@ -142,6 +142,9 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
DCHECK_LT(new_size, size);
DCHECK(IsAligned(size - new_size, commit_page_size_));
+ // This must be held until the page permissions are updated.
+ MutexGuard guard(&mutex_);
+
// Check if we freed any allocatable pages by this release.
size_t allocated_size = RoundUp(size, allocate_page_size_);
size_t new_allocated_size = RoundUp(new_size, allocate_page_size_);
@@ -150,13 +153,11 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
{
// There must be an allocated region at given |address| of a size not
// smaller than |size|.
- MutexGuard guard(&mutex_);
DCHECK_EQ(allocated_size, region_allocator_.CheckRegion(address));
}
#endif
if (new_allocated_size < allocated_size) {
- MutexGuard guard(&mutex_);
region_allocator_.TrimRegion(address, new_allocated_size);
}
diff --git a/deps/v8/src/base/emulated-virtual-address-subspace.cc b/deps/v8/src/base/emulated-virtual-address-subspace.cc
new file mode 100644
index 0000000000..fbfb125569
--- /dev/null
+++ b/deps/v8/src/base/emulated-virtual-address-subspace.cc
@@ -0,0 +1,138 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/emulated-virtual-address-subspace.h"
+
+#include "src/base/bits.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/wrappers.h"
+
+namespace v8 {
+namespace base {
+
+EmulatedVirtualAddressSubspace::EmulatedVirtualAddressSubspace(
+ VirtualAddressSpace* parent_space, Address base, size_t mapped_size,
+ size_t total_size)
+ : VirtualAddressSpace(parent_space->page_size(),
+ parent_space->allocation_granularity(), base,
+ total_size),
+ mapped_size_(mapped_size),
+ parent_space_(parent_space),
+ region_allocator_(base, mapped_size, parent_space_->page_size()) {
+ // For simplicity, we currently require both the mapped and total size to be
+ // a power of two. This simplifies some things later on, for example, random
+ // addresses can be generated with a simply bitmask, and will then be inside
+ // the unmapped space with a probability >= 50% (mapped size == unmapped
+ // size) or never (mapped size == total size).
+ DCHECK(base::bits::IsPowerOfTwo(mapped_size));
+ DCHECK(base::bits::IsPowerOfTwo(total_size));
+}
+
+EmulatedVirtualAddressSubspace::~EmulatedVirtualAddressSubspace() {
+ CHECK(parent_space_->FreePages(base(), mapped_size_));
+}
+
+void EmulatedVirtualAddressSubspace::SetRandomSeed(int64_t seed) {
+ MutexGuard guard(&mutex_);
+ rng_.SetSeed(seed);
+}
+
+Address EmulatedVirtualAddressSubspace::RandomPageAddress() {
+ MutexGuard guard(&mutex_);
+ Address addr = base() + (rng_.NextInt64() % size());
+ return RoundDown(addr, allocation_granularity());
+}
+
+Address EmulatedVirtualAddressSubspace::AllocatePages(
+ Address hint, size_t size, size_t alignment, PagePermissions permissions) {
+ if (hint == kNoHint || MappedRegionContains(hint, size)) {
+ MutexGuard guard(&mutex_);
+
+ // Attempt to find a region in the mapped region.
+ Address address = region_allocator_.AllocateRegion(hint, size, alignment);
+ if (address != RegionAllocator::kAllocationFailure) {
+ // Success. Only need to adjust the page permissions.
+ if (parent_space_->SetPagePermissions(address, size, permissions)) {
+ return address;
+ }
+ // Probably ran out of memory, but still try to allocate in the unmapped
+ // space.
+ CHECK_EQ(size, region_allocator_.FreeRegion(address));
+ }
+ }
+
+ // No luck or hint is outside of the mapped region. Try to allocate pages in
+ // the unmapped space using page allocation hints instead.
+
+ // Somewhat arbitrary size limitation to ensure that the loop below for
+ // finding a fitting base address hint terminates quickly.
+ if (size >= (unmapped_size() / 2)) return kNullAddress;
+
+ static constexpr int kMaxAttempts = 10;
+ for (int i = 0; i < kMaxAttempts; i++) {
+ // If the hint wouldn't result in the entire allocation being inside the
+ // managed region, simply retry. There is at least a 50% chance of
+ // getting a usable address due to the size restriction above.
+ while (!UnmappedRegionContains(hint, size)) {
+ hint = RandomPageAddress();
+ }
+
+ Address region =
+ parent_space_->AllocatePages(hint, size, alignment, permissions);
+ if (region && UnmappedRegionContains(region, size)) {
+ return region;
+ } else if (region) {
+ CHECK(parent_space_->FreePages(region, size));
+ }
+
+ // Retry at a different address.
+ hint = RandomPageAddress();
+ }
+
+ return kNullAddress;
+}
+
+bool EmulatedVirtualAddressSubspace::FreePages(Address address, size_t size) {
+ if (MappedRegionContains(address, size)) {
+ MutexGuard guard(&mutex_);
+ if (region_allocator_.FreeRegion(address) != size) return false;
+ CHECK(parent_space_->DecommitPages(address, size));
+ return true;
+ }
+ if (!UnmappedRegionContains(address, size)) return false;
+ return parent_space_->FreePages(address, size);
+}
+
+bool EmulatedVirtualAddressSubspace::SetPagePermissions(
+ Address address, size_t size, PagePermissions permissions) {
+ DCHECK(Contains(address, size));
+ return parent_space_->SetPagePermissions(address, size, permissions);
+}
+
+bool EmulatedVirtualAddressSubspace::CanAllocateSubspaces() {
+ // This is not supported, mostly because it's not (yet) needed in practice.
+ return false;
+}
+
+std::unique_ptr<v8::VirtualAddressSpace>
+EmulatedVirtualAddressSubspace::AllocateSubspace(
+ Address hint, size_t size, size_t alignment,
+ PagePermissions max_permissions) {
+ UNREACHABLE();
+}
+
+bool EmulatedVirtualAddressSubspace::DiscardSystemPages(Address address,
+ size_t size) {
+ DCHECK(Contains(address, size));
+ return parent_space_->DiscardSystemPages(address, size);
+}
+
+bool EmulatedVirtualAddressSubspace::DecommitPages(Address address,
+ size_t size) {
+ DCHECK(Contains(address, size));
+ return parent_space_->DecommitPages(address, size);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/emulated-virtual-address-subspace.h b/deps/v8/src/base/emulated-virtual-address-subspace.h
new file mode 100644
index 0000000000..480c3e1ae0
--- /dev/null
+++ b/deps/v8/src/base/emulated-virtual-address-subspace.h
@@ -0,0 +1,113 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_EMULATED_VIRTUAL_ADDRESS_SUBSPACE_H_
+#define V8_BASE_EMULATED_VIRTUAL_ADDRESS_SUBSPACE_H_
+
+#include "include/v8-platform.h"
+#include "src/base/base-export.h"
+#include "src/base/compiler-specific.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/region-allocator.h"
+#include "src/base/virtual-address-space.h"
+
+namespace v8 {
+namespace base {
+
+/**
+ * Emulates a virtual address subspace.
+ *
+ * This class is (optionally) backed by a page allocation and emulates a virtual
+ * address space that is potentially larger than that mapping. It generally
+ * first attempts to satisfy page allocation requests from its backing mapping,
+ * but will also attempt to obtain new page mappings inside the unmapped space
+ * through page allocation hints if necessary.
+ *
+ * Caveat: an emulated subspace violates the invariant that page allocations in
+ * an address space will never end up inside a child space and so does not
+ * provide the same security gurarantees.
+ */
+class V8_BASE_EXPORT EmulatedVirtualAddressSubspace final
+ : public NON_EXPORTED_BASE(::v8::VirtualAddressSpace) {
+ public:
+ // Construct an emulated virtual address subspace of the specified total size,
+ // potentially backed by a page allocation from the parent space. The newly
+ // created instance takes ownership of the page allocation (if any) and frees
+ // it during destruction.
+ EmulatedVirtualAddressSubspace(v8::VirtualAddressSpace* parent_space,
+ Address base, size_t mapped_size,
+ size_t total_size);
+
+ ~EmulatedVirtualAddressSubspace() override;
+
+ void SetRandomSeed(int64_t seed) override;
+
+ Address RandomPageAddress() override;
+
+ Address AllocatePages(Address hint, size_t size, size_t alignment,
+ PagePermissions permissions) override;
+
+ bool FreePages(Address address, size_t size) override;
+
+ bool SetPagePermissions(Address address, size_t size,
+ PagePermissions permissions) override;
+
+ bool CanAllocateSubspaces() override;
+
+ std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
+ Address hint, size_t size, size_t alignment,
+ PagePermissions max_permissions) override;
+
+ bool DiscardSystemPages(Address address, size_t size) override;
+
+ bool DecommitPages(Address address, size_t size) override;
+
+ private:
+ size_t mapped_size() const { return mapped_size_; }
+ size_t unmapped_size() const { return size() - mapped_size_; }
+
+ Address mapped_base() const { return base(); }
+ Address unmapped_base() const { return base() + mapped_size_; }
+
+ bool Contains(Address outer_start, size_t outer_size, Address inner_start,
+ size_t inner_size) const {
+ return (inner_start >= outer_start) &&
+ ((inner_start + inner_size) <= (outer_start + outer_size));
+ }
+
+ bool Contains(Address addr, size_t length) const {
+ return Contains(base(), size(), addr, length);
+ }
+
+ bool MappedRegionContains(Address addr, size_t length) const {
+ return Contains(mapped_base(), mapped_size(), addr, length);
+ }
+
+ bool UnmappedRegionContains(Address addr, size_t length) const {
+ return Contains(unmapped_base(), unmapped_size(), addr, length);
+ }
+
+ // Size of the mapped region located at the beginning of this address space.
+ const size_t mapped_size_;
+
+ // Pointer to the parent space from which the backing pages were allocated.
+ // Must be kept alive by the owner of this instance.
+ v8::VirtualAddressSpace* parent_space_;
+
+ // Mutex guarding the non-threadsafe RegionAllocator and
+ // RandomNumberGenerator.
+ Mutex mutex_;
+
+ // RegionAllocator to manage the page allocation and divide it into further
+ // regions as necessary.
+ RegionAllocator region_allocator_;
+
+ // Random number generator for generating random addresses.
+ RandomNumberGenerator rng_;
+};
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_EMULATED_VIRTUAL_ADDRESS_SUBSPACE_H_
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index a0fd83e939..f090ea5b6a 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -7,6 +7,7 @@
#include <lib/zx/vmar.h>
#include <lib/zx/vmo.h>
+#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/base/platform/platform-posix-time.h"
#include "src/base/platform/platform-posix.h"
@@ -34,24 +35,37 @@ zx_vm_option_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
UNREACHABLE();
}
-} // namespace
-
-TimezoneCache* OS::CreateTimezoneCache() {
- return new PosixDefaultTimezoneCache();
+// Determine ZX_VM_ALIGN_X constant corresponding to the specified alignment.
+// Returns 0 if there is none.
+zx_vm_option_t GetAlignmentOptionFromAlignment(size_t alignment) {
+ // The alignment must be one of the ZX_VM_ALIGN_X constants.
+ // See zircon/system/public/zircon/types.h.
+ static_assert(
+ ZX_VM_ALIGN_1KB == (10 << ZX_VM_ALIGN_BASE),
+ "Fuchsia's ZX_VM_ALIGN_1KB constant doesn't match expected value");
+ static_assert(
+ ZX_VM_ALIGN_4GB == (32 << ZX_VM_ALIGN_BASE),
+ "Fuchsia's ZX_VM_ALIGN_4GB constant doesn't match expected value");
+ zx_vm_option_t alignment_log2 = 0;
+ for (int shift = 10; shift <= 32; shift++) {
+ if (alignment == (size_t{1} << shift)) {
+ alignment_log2 = shift;
+ break;
+ }
+ }
+ return alignment_log2 << ZX_VM_ALIGN_BASE;
}
-// static
-void* OS::Allocate(void* address, size_t size, size_t alignment,
- OS::MemoryPermission access) {
- size_t page_size = OS::AllocatePageSize();
+void* AllocateInternal(const zx::vmar& vmar, size_t page_size,
+ size_t vmar_offset, bool vmar_offset_is_hint,
+ size_t size, size_t alignment,
+ OS::MemoryPermission access) {
DCHECK_EQ(0, size % page_size);
DCHECK_EQ(0, alignment % page_size);
- address = AlignedAddress(address, alignment);
- // Add the maximum misalignment so we are guaranteed an aligned base address.
- size_t request_size = size + (alignment - page_size);
+ DCHECK_EQ(0, vmar_offset % page_size);
zx::vmo vmo;
- if (zx::vmo::create(request_size, 0, &vmo) != ZX_OK) {
+ if (zx::vmo::create(size, 0, &vmo) != ZX_OK) {
return nullptr;
}
static const char kVirtualMemoryName[] = "v8-virtualmem";
@@ -68,85 +82,130 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
zx_vm_option_t options = GetProtectionFromMemoryPermission(access);
- uint64_t vmar_offset = 0;
- if (address) {
- vmar_offset = reinterpret_cast<uint64_t>(address);
+ zx_vm_option_t alignment_option = GetAlignmentOptionFromAlignment(alignment);
+ CHECK_NE(0, alignment_option); // Invalid alignment specified
+ options |= alignment_option;
+
+ if (vmar_offset != 0) {
options |= ZX_VM_SPECIFIC;
}
- zx_vaddr_t reservation;
- zx_status_t status = zx::vmar::root_self()->map(options, vmar_offset, vmo, 0,
- request_size, &reservation);
- if (status != ZX_OK && address != nullptr) {
- // Retry without the hint, if we supplied one.
+ zx_vaddr_t address;
+ zx_status_t status = vmar.map(options, vmar_offset, vmo, 0, size, &address);
+
+ if (status != ZX_OK && vmar_offset != 0 && vmar_offset_is_hint) {
+ // If a vmar_offset was specified and the allocation failed (for example,
+ // because the offset overlapped another mapping), then we should retry
+ // again without a vmar_offset if that offset was just meant to be a hint.
options &= ~(ZX_VM_SPECIFIC);
- status = zx::vmar::root_self()->map(options, 0, vmo, 0, request_size,
- &reservation);
+ status = vmar.map(options, 0, vmo, 0, size, &address);
}
+
if (status != ZX_OK) {
return nullptr;
}
- uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
- uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
- RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
-
- // Unmap extra memory reserved before and after the desired block.
- if (aligned_base != base) {
- DCHECK_LT(base, aligned_base);
- size_t prefix_size = static_cast<size_t>(aligned_base - base);
- zx::vmar::root_self()->unmap(reinterpret_cast<uintptr_t>(base),
- prefix_size);
- request_size -= prefix_size;
- }
+ return reinterpret_cast<void*>(address);
+}
+
+bool FreeInternal(const zx::vmar& vmar, size_t page_size, void* address,
+ const size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
+ DCHECK_EQ(0, size % page_size);
+ return vmar.unmap(reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
+}
+
+bool SetPermissionsInternal(const zx::vmar& vmar, size_t page_size,
+ void* address, size_t size,
+ OS::MemoryPermission access) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
+ DCHECK_EQ(0, size % page_size);
+ uint32_t prot = GetProtectionFromMemoryPermission(access);
+ return vmar.protect(prot, reinterpret_cast<uintptr_t>(address), size) ==
+ ZX_OK;
+}
+
+bool DiscardSystemPagesInternal(const zx::vmar& vmar, size_t page_size,
+ void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
+ DCHECK_EQ(0, size % page_size);
+ uint64_t address_int = reinterpret_cast<uint64_t>(address);
+ return vmar.op_range(ZX_VMO_OP_DECOMMIT, address_int, size, nullptr, 0) ==
+ ZX_OK;
+}
+
+zx_status_t CreateAddressSpaceReservationInternal(
+ const zx::vmar& vmar, size_t page_size, size_t vmar_offset,
+ bool vmar_offset_is_hint, size_t size, size_t alignment,
+ OS::MemoryPermission max_permission, zx::vmar* child,
+ zx_vaddr_t* child_addr) {
+ DCHECK_EQ(0, size % page_size);
+ DCHECK_EQ(0, alignment % page_size);
+ DCHECK_EQ(0, vmar_offset % page_size);
+
+ // TODO(v8) determine these based on max_permission.
+ zx_vm_option_t options = ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
+ ZX_VM_CAN_MAP_EXECUTE | ZX_VM_CAN_MAP_SPECIFIC;
- size_t aligned_size = RoundUp(size, page_size);
+ zx_vm_option_t alignment_option = GetAlignmentOptionFromAlignment(alignment);
+ CHECK_NE(0, alignment_option); // Invalid alignment specified
+ options |= alignment_option;
- if (aligned_size != request_size) {
- DCHECK_LT(aligned_size, request_size);
- size_t suffix_size = request_size - aligned_size;
- zx::vmar::root_self()->unmap(
- reinterpret_cast<uintptr_t>(aligned_base + aligned_size), suffix_size);
- request_size -= suffix_size;
+ if (vmar_offset != 0) {
+ options |= ZX_VM_SPECIFIC;
}
- DCHECK(aligned_size == request_size);
- return static_cast<void*>(aligned_base);
+ zx_status_t status =
+ vmar.allocate(options, vmar_offset, size, child, child_addr);
+ if (status != ZX_OK && vmar_offset != 0 && vmar_offset_is_hint) {
+ // If a vmar_offset was specified and the allocation failed (for example,
+ // because the offset overlapped another mapping), then we should retry
+ // again without a vmar_offset if that offset was just meant to be a hint.
+ options &= ~(ZX_VM_SPECIFIC);
+ status = vmar.allocate(options, 0, size, child, child_addr);
+ }
+
+ return status;
+}
+
+} // namespace
+
+TimezoneCache* OS::CreateTimezoneCache() {
+ return new PosixDefaultTimezoneCache();
}
// static
-bool OS::Free(void* address, const size_t size) {
- DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
- DCHECK_EQ(0, size % AllocatePageSize());
- return zx::vmar::root_self()->unmap(reinterpret_cast<uintptr_t>(address),
- size) == ZX_OK;
+void* OS::Allocate(void* address, size_t size, size_t alignment,
+ MemoryPermission access) {
+ constexpr bool vmar_offset_is_hint = true;
+ DCHECK_EQ(0, reinterpret_cast<Address>(address) % alignment);
+ return AllocateInternal(*zx::vmar::root_self(), AllocatePageSize(),
+ reinterpret_cast<uint64_t>(address),
+ vmar_offset_is_hint, size, alignment, access);
}
// static
-bool OS::Release(void* address, size_t size) {
- DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
- DCHECK_EQ(0, size % CommitPageSize());
- return zx::vmar::root_self()->unmap(reinterpret_cast<uintptr_t>(address),
- size) == ZX_OK;
+bool OS::Free(void* address, const size_t size) {
+ return FreeInternal(*zx::vmar::root_self(), AllocatePageSize(), address,
+ size);
}
// static
+bool OS::Release(void* address, size_t size) { return Free(address, size); }
+
+// static
bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
- DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
- DCHECK_EQ(0, size % CommitPageSize());
- uint32_t prot = GetProtectionFromMemoryPermission(access);
- return zx::vmar::root_self()->protect(
- prot, reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
+ return SetPermissionsInternal(*zx::vmar::root_self(), CommitPageSize(),
+ address, size, access);
}
// static
bool OS::DiscardSystemPages(void* address, size_t size) {
- uint64_t address_int = reinterpret_cast<uint64_t>(address);
- zx_status_t status = zx::vmar::root_self()->op_range(
- ZX_VMO_OP_DECOMMIT, address_int, size, nullptr, 0);
- return status == ZX_OK;
+ return DiscardSystemPagesInternal(*zx::vmar::root_self(), CommitPageSize(),
+ address, size);
}
+// static
bool OS::DecommitPages(void* address, size_t size) {
// We rely on DiscardSystemPages decommitting the pages immediately (via
// ZX_VMO_OP_DECOMMIT) so that they are guaranteed to be zero-initialized
@@ -156,6 +215,34 @@ bool OS::DecommitPages(void* address, size_t size) {
}
// static
+bool OS::CanReserveAddressSpace() { return true; }
+
+// static
+Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
+ void* hint, size_t size, size_t alignment,
+ MemoryPermission max_permission) {
+ DCHECK_EQ(0, reinterpret_cast<Address>(hint) % alignment);
+ zx::vmar child;
+ zx_vaddr_t child_addr;
+ uint64_t vmar_offset = reinterpret_cast<uint64_t>(hint);
+ constexpr bool vmar_offset_is_hint = true;
+ zx_status_t status = CreateAddressSpaceReservationInternal(
+ *zx::vmar::root_self(), AllocatePageSize(), vmar_offset,
+ vmar_offset_is_hint, size, alignment, max_permission, &child,
+ &child_addr);
+ if (status != ZX_OK) return {};
+ return AddressSpaceReservation(reinterpret_cast<void*>(child_addr), size,
+ child.release());
+}
+
+// static
+bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
+ // Destroy the vmar and release the handle.
+ zx::vmar vmar(reservation.vmar_);
+ return vmar.destroy() == ZX_OK;
+}
+
+// static
bool OS::HasLazyCommits() { return true; }
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
@@ -194,5 +281,74 @@ std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
return {};
}
+Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
+ void* address, size_t size, OS::MemoryPermission max_permission) {
+ DCHECK(Contains(address, size));
+
+ zx::vmar child;
+ zx_vaddr_t child_addr;
+ size_t vmar_offset = 0;
+ if (address != 0) {
+ vmar_offset =
+ reinterpret_cast<size_t>(address) - reinterpret_cast<size_t>(base());
+ }
+ constexpr bool vmar_offset_is_hint = false;
+ zx_status_t status = CreateAddressSpaceReservationInternal(
+ *zx::unowned_vmar(vmar_), OS::AllocatePageSize(), vmar_offset,
+ vmar_offset_is_hint, size, OS::AllocatePageSize(), max_permission, &child,
+ &child_addr);
+ if (status != ZX_OK) return {};
+ DCHECK_EQ(reinterpret_cast<void*>(child_addr), address);
+ return AddressSpaceReservation(reinterpret_cast<void*>(child_addr), size,
+ child.release());
+}
+
+bool AddressSpaceReservation::FreeSubReservation(
+ AddressSpaceReservation reservation) {
+ return OS::FreeAddressSpaceReservation(reservation);
+}
+
+bool AddressSpaceReservation::Allocate(void* address, size_t size,
+ OS::MemoryPermission access) {
+ DCHECK(Contains(address, size));
+ size_t vmar_offset = 0;
+ if (address != 0) {
+ vmar_offset =
+ reinterpret_cast<size_t>(address) - reinterpret_cast<size_t>(base());
+ }
+ constexpr bool vmar_offset_is_hint = false;
+ void* allocation = AllocateInternal(
+ *zx::unowned_vmar(vmar_), OS::AllocatePageSize(), vmar_offset,
+ vmar_offset_is_hint, size, OS::AllocatePageSize(), access);
+ DCHECK(!allocation || allocation == address);
+ return allocation != nullptr;
+}
+
+bool AddressSpaceReservation::Free(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ return FreeInternal(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
+ size);
+}
+
+bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
+ OS::MemoryPermission access) {
+ DCHECK(Contains(address, size));
+ return SetPermissionsInternal(*zx::unowned_vmar(vmar_), OS::CommitPageSize(),
+ address, size, access);
+}
+
+bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ return DiscardSystemPagesInternal(*zx::unowned_vmar(vmar_),
+ OS::CommitPageSize(), address, size);
+}
+
+bool AddressSpaceReservation::DecommitPages(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ // See comment in OS::DecommitPages.
+ return SetPermissions(address, size, OS::MemoryPermission::kNoAccess) &&
+ DiscardSystemPages(address, size);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index f05f22c913..155af37155 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -153,11 +153,15 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access,
flags |= MAP_LAZY;
#endif // V8_OS_QNX
}
-#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+#if V8_OS_MACOSX
+ // MAP_JIT is required to obtain writable and executable pages when the
+ // hardened runtime/memory protection is enabled, which is optional (via code
+ // signing) on Intel-based Macs but mandatory on Apple silicon ones. See also
+ // https://developer.apple.com/documentation/apple-silicon/porting-just-in-time-compilers-to-apple-silicon.
if (access == OS::MemoryPermission::kNoAccessWillJitLater) {
flags |= MAP_JIT;
}
-#endif
+#endif // V8_OS_MACOSX
return flags;
}
@@ -467,6 +471,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
return ret == 0;
}
+// static
bool OS::DiscardSystemPages(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
@@ -495,6 +500,7 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
return ret == 0;
}
+// static
bool OS::DecommitPages(void* address, size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
@@ -510,6 +516,36 @@ bool OS::DecommitPages(void* address, size_t size) {
}
// static
+bool OS::CanReserveAddressSpace() { return true; }
+
+// static
+Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
+ void* hint, size_t size, size_t alignment,
+ MemoryPermission max_permission) {
+ // On POSIX, address space reservations are backed by private memory mappings.
+ MemoryPermission permission = MemoryPermission::kNoAccess;
+ if (max_permission == MemoryPermission::kReadWriteExecute) {
+ permission = MemoryPermission::kNoAccessWillJitLater;
+ }
+
+ void* reservation = Allocate(hint, size, alignment, permission);
+ if (!reservation && permission == MemoryPermission::kNoAccessWillJitLater) {
+ // Retry without MAP_JIT, for example in case we are running on an old OS X.
+ permission = MemoryPermission::kNoAccess;
+ reservation = Allocate(hint, size, alignment, permission);
+ }
+
+ if (!reservation) return {};
+
+ return AddressSpaceReservation(reservation, size);
+}
+
+// static
+bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
+ return Free(reservation.base(), reservation.size());
+}
+
+// static
bool OS::HasLazyCommits() {
#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
return true;
@@ -823,6 +859,57 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
strncpy(dest, src, n);
}
+// ----------------------------------------------------------------------------
+// POSIX Address space reservation support.
+//
+
+#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
+
+Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
+ void* address, size_t size, OS::MemoryPermission max_permission) {
+ DCHECK(Contains(address, size));
+ DCHECK_EQ(0, size % OS::AllocatePageSize());
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % OS::AllocatePageSize());
+
+ return AddressSpaceReservation(address, size);
+}
+
+bool AddressSpaceReservation::FreeSubReservation(
+ AddressSpaceReservation reservation) {
+ // Nothing to do.
+ // Pages allocated inside the reservation must've already been freed.
+ return true;
+}
+
+bool AddressSpaceReservation::Allocate(void* address, size_t size,
+ OS::MemoryPermission access) {
+ // The region is already mmap'ed, so it just has to be made accessible now.
+ DCHECK(Contains(address, size));
+ return OS::SetPermissions(address, size, access);
+}
+
+bool AddressSpaceReservation::Free(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ return OS::DecommitPages(address, size);
+}
+
+bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
+ OS::MemoryPermission access) {
+ DCHECK(Contains(address, size));
+ return OS::SetPermissions(address, size, access);
+}
+
+bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ return OS::DiscardSystemPages(address, size);
+}
+
+bool AddressSpaceReservation::DecommitPages(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ return OS::DecommitPages(address, size);
+}
+
+#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
// ----------------------------------------------------------------------------
// POSIX thread support.
@@ -840,9 +927,8 @@ Thread::Thread(const Options& options)
: data_(new PlatformData),
stack_size_(options.stack_size()),
start_semaphore_(nullptr) {
- if (stack_size_ > 0 && static_cast<size_t>(stack_size_) < PTHREAD_STACK_MIN) {
- stack_size_ = PTHREAD_STACK_MIN;
- }
+ const int min_stack_size = static_cast<int>(PTHREAD_STACK_MIN);
+ if (stack_size_ > 0) stack_size_ = std::max(stack_size_, min_stack_size);
set_name(options.name());
}
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 919c3ef4df..d00c4f5ebb 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -722,6 +722,20 @@ void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
g_hard_abort = hard_abort;
}
+typedef PVOID (*VirtualAlloc2_t)(HANDLE, PVOID, SIZE_T, ULONG, ULONG,
+ MEM_EXTENDED_PARAMETER*, ULONG);
+VirtualAlloc2_t VirtualAlloc2;
+
+void OS::EnsureWin32MemoryAPILoaded() {
+ static bool loaded = false;
+ if (!loaded) {
+ VirtualAlloc2 = (VirtualAlloc2_t)GetProcAddress(
+ GetModuleHandle(L"kernelbase.dll"), "VirtualAlloc2");
+
+ loaded = true;
+ }
+}
+
// static
size_t OS::AllocatePageSize() {
static size_t allocate_alignment = 0;
@@ -801,6 +815,14 @@ DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
UNREACHABLE();
}
+void* VirtualAllocWrapper(void* hint, size_t size, DWORD flags, DWORD protect) {
+ if (VirtualAlloc2) {
+ return VirtualAlloc2(nullptr, hint, size, flags, protect, NULL, 0);
+ } else {
+ return VirtualAlloc(hint, size, flags, protect);
+ }
+}
+
uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
void* hint) {
LPVOID base = nullptr;
@@ -816,32 +838,18 @@ uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
if (use_aslr && protect != PAGE_READWRITE) {
// For executable or reserved pages try to randomize the allocation address.
- base = VirtualAlloc(hint, size, flags, protect);
+ base = VirtualAllocWrapper(hint, size, flags, protect);
}
// On failure, let the OS find an address to use.
if (base == nullptr) {
- base = VirtualAlloc(nullptr, size, flags, protect);
+ base = VirtualAllocWrapper(nullptr, size, flags, protect);
}
return reinterpret_cast<uint8_t*>(base);
}
-} // namespace
-
-// static
-void* OS::Allocate(void* hint, size_t size, size_t alignment,
- MemoryPermission access) {
- size_t page_size = AllocatePageSize();
- DCHECK_EQ(0, size % page_size);
- DCHECK_EQ(0, alignment % page_size);
- DCHECK_LE(page_size, alignment);
- hint = AlignedAddress(hint, alignment);
-
- DWORD flags = (access == OS::MemoryPermission::kNoAccess)
- ? MEM_RESERVE
- : MEM_RESERVE | MEM_COMMIT;
- DWORD protect = GetProtectionFromMemoryPermission(access);
-
+void* AllocateInternal(void* hint, size_t size, size_t alignment,
+ size_t page_size, DWORD flags, DWORD protect) {
// First, try an exact size aligned allocation.
uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, hint);
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
@@ -852,7 +860,7 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
if (base == aligned_base) return reinterpret_cast<void*>(base);
// Otherwise, free it and try a larger allocation.
- CHECK(Free(base, size));
+ CHECK(VirtualFree(base, 0, MEM_RELEASE));
// Clear the hint. It's unlikely we can allocate at this address.
hint = nullptr;
@@ -868,11 +876,11 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
// Try to trim the allocation by freeing the padded allocation and then
// calling VirtualAlloc at the aligned base.
- CHECK(Free(base, padded_size));
+ CHECK(VirtualFree(base, 0, MEM_RELEASE));
aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
base = reinterpret_cast<uint8_t*>(
- VirtualAlloc(aligned_base, size, flags, protect));
+ VirtualAllocWrapper(aligned_base, size, flags, protect));
// We might not get the reduced allocation due to a race. In that case,
// base will be nullptr.
if (base != nullptr) break;
@@ -881,6 +889,25 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
return reinterpret_cast<void*>(base);
}
+} // namespace
+
+// static
+void* OS::Allocate(void* hint, size_t size, size_t alignment,
+ MemoryPermission access) {
+ size_t page_size = AllocatePageSize();
+ DCHECK_EQ(0, size % page_size);
+ DCHECK_EQ(0, alignment % page_size);
+ DCHECK_LE(page_size, alignment);
+ hint = AlignedAddress(hint, alignment);
+
+ DWORD flags = (access == OS::MemoryPermission::kNoAccess)
+ ? MEM_RESERVE
+ : MEM_RESERVE | MEM_COMMIT;
+ DWORD protect = GetProtectionFromMemoryPermission(access);
+
+ return AllocateInternal(hint, size, alignment, page_size, flags, protect);
+}
+
// static
bool OS::Free(void* address, const size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
@@ -904,7 +931,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
return VirtualFree(address, size, MEM_DECOMMIT) != 0;
}
DWORD protect = GetProtectionFromMemoryPermission(access);
- return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr;
+ return VirtualAllocWrapper(address, size, MEM_COMMIT, protect) != nullptr;
}
// static
@@ -929,7 +956,7 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
}
// DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
// failure.
- void* ptr = VirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE);
+ void* ptr = VirtualAllocWrapper(address, size, MEM_RESET, PAGE_READWRITE);
CHECK(ptr);
return ptr;
}
@@ -950,6 +977,35 @@ bool OS::DecommitPages(void* address, size_t size) {
}
// static
+bool OS::CanReserveAddressSpace() { return VirtualAlloc2 != nullptr; }
+
+// static
+Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
+ void* hint, size_t size, size_t alignment,
+ MemoryPermission max_permission) {
+ CHECK(CanReserveAddressSpace());
+
+ size_t page_size = AllocatePageSize();
+ DCHECK_EQ(0, size % page_size);
+ DCHECK_EQ(0, alignment % page_size);
+ DCHECK_LE(page_size, alignment);
+ hint = AlignedAddress(hint, alignment);
+
+ // On Windows, address space reservations are backed by placeholder mappings.
+ void* reservation =
+ AllocateInternal(hint, size, alignment, page_size,
+ MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS);
+ if (!reservation) return {};
+
+ return AddressSpaceReservation(reservation, size);
+}
+
+// static
+bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
+ return OS::Free(reservation.base(), reservation.size());
+}
+
+// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
@@ -1068,6 +1124,64 @@ Win32MemoryMappedFile::~Win32MemoryMappedFile() {
CloseHandle(file_);
}
+Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
+ void* address, size_t size, OS::MemoryPermission max_permission) {
+ // Nothing to do, the sub reservation must already have been split by now.
+ DCHECK(Contains(address, size));
+ DCHECK_EQ(0, size % OS::AllocatePageSize());
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % OS::AllocatePageSize());
+
+ return AddressSpaceReservation(address, size);
+}
+
+bool AddressSpaceReservation::FreeSubReservation(
+ AddressSpaceReservation reservation) {
+ // Nothing to do.
+ // Pages allocated inside the reservation must've already been freed.
+ return true;
+}
+
+bool AddressSpaceReservation::SplitPlaceholder(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ return VirtualFree(address, size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
+}
+
+bool AddressSpaceReservation::MergePlaceholders(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ return VirtualFree(address, size, MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS);
+}
+
+bool AddressSpaceReservation::Allocate(void* address, size_t size,
+ OS::MemoryPermission access) {
+ DCHECK(Contains(address, size));
+ CHECK(VirtualAlloc2);
+ DWORD flags = (access == OS::MemoryPermission::kNoAccess)
+ ? MEM_RESERVE | MEM_REPLACE_PLACEHOLDER
+ : MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER;
+ DWORD protect = GetProtectionFromMemoryPermission(access);
+ return VirtualAlloc2(nullptr, address, size, flags, protect, NULL, 0);
+}
+
+bool AddressSpaceReservation::Free(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ return VirtualFree(address, size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
+}
+
+bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
+ OS::MemoryPermission access) {
+ DCHECK(Contains(address, size));
+ return OS::SetPermissions(address, size, access);
+}
+
+bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ return OS::DiscardSystemPages(address, size);
+}
+
+bool AddressSpaceReservation::DecommitPages(void* address, size_t size) {
+ DCHECK(Contains(address, size));
+ return OS::DecommitPages(address, size);
+}
// The following code loads functions defined in DbhHelp.h and TlHelp32.h
// dynamically. This is to avoid being depending on dbghelp.dll and
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index bc1edc9c03..53a7267889 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -29,6 +29,7 @@
#include "src/base/base-export.h"
#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
+#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
@@ -36,6 +37,10 @@
#include "src/base/qnx-math.h"
#endif
+#if V8_OS_FUCHSIA
+#include <zircon/types.h>
+#endif // V8_OS_FUCHSIA
+
#ifdef V8_USE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif // V8_USE_ADDRESS_SANITIZER
@@ -115,8 +120,11 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
#endif // V8_NO_FAST_TLS
+class AddressSpaceReservation;
class PageAllocator;
class TimezoneCache;
+class VirtualAddressSpace;
+class VirtualAddressSubspace;
// ----------------------------------------------------------------------------
// OS
@@ -132,6 +140,17 @@ class V8_BASE_EXPORT OS {
// - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof.
static void Initialize(bool hard_abort, const char* const gc_fake_mmap);
+#if V8_OS_WIN
+ // On Windows, ensure the newer memory API is loaded if available. This
+ // includes function like VirtualAlloc2 and MapViewOfFile3.
+ // TODO(chromium:1218005) this should probably happen as part of Initialize,
+ // but that is currently invoked too late, after the virtual memory cage
+ // is initialized. However, eventually the virtual memory cage initialization
+ // will happen as part of V8::Initialize, at which point this function can
+ // probably be merged into OS::Initialize.
+ static void EnsureWin32MemoryAPILoaded();
+#endif
+
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
// strive for high-precision timer resolution, preferable
@@ -291,9 +310,12 @@ class V8_BASE_EXPORT OS {
private:
// These classes use the private memory management API below.
+ friend class AddressSpaceReservation;
friend class MemoryMappedFile;
friend class PosixMemoryMappedFile;
friend class v8::base::PageAllocator;
+ friend class v8::base::VirtualAddressSpace;
+ friend class v8::base::VirtualAddressSubspace;
static size_t AllocatePageSize();
@@ -326,6 +348,15 @@ class V8_BASE_EXPORT OS {
V8_WARN_UNUSED_RESULT static bool DecommitPages(void* address, size_t size);
+ V8_WARN_UNUSED_RESULT static bool CanReserveAddressSpace();
+
+ V8_WARN_UNUSED_RESULT static Optional<AddressSpaceReservation>
+ CreateAddressSpaceReservation(void* hint, size_t size, size_t alignment,
+ MemoryPermission max_permission);
+
+ V8_WARN_UNUSED_RESULT static bool FreeAddressSpaceReservation(
+ AddressSpaceReservation reservation);
+
static const int msPerSecond = 1000;
#if V8_OS_POSIX
@@ -348,6 +379,73 @@ inline void EnsureConsoleOutput() {
}
// ----------------------------------------------------------------------------
+// AddressSpaceReservation
+//
+// This class provides the same memory management functions as OS but operates
+// inside a previously reserved contiguous region of virtual address space.
+class V8_BASE_EXPORT AddressSpaceReservation {
+ public:
+ using Address = uintptr_t;
+
+ void* base() const { return base_; }
+ size_t size() const { return size_; }
+
+ bool Contains(void* region_addr, size_t region_size) const {
+ Address base = reinterpret_cast<Address>(base_);
+ Address region_base = reinterpret_cast<Address>(region_addr);
+ return (region_base >= base) &&
+ ((region_base + region_size) <= (base + size_));
+ }
+
+ V8_WARN_UNUSED_RESULT bool Allocate(void* address, size_t size,
+ OS::MemoryPermission access);
+
+ V8_WARN_UNUSED_RESULT bool Free(void* address, size_t size);
+
+ V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size,
+ OS::MemoryPermission access);
+
+ V8_WARN_UNUSED_RESULT bool DiscardSystemPages(void* address, size_t size);
+
+ V8_WARN_UNUSED_RESULT bool DecommitPages(void* address, size_t size);
+
+ V8_WARN_UNUSED_RESULT Optional<AddressSpaceReservation> CreateSubReservation(
+ void* address, size_t size, OS::MemoryPermission max_permission);
+
+ V8_WARN_UNUSED_RESULT static bool FreeSubReservation(
+ AddressSpaceReservation reservation);
+
+#if V8_OS_WIN
+ // On Windows, the placeholder mappings backing address space reservations
+ // need to be split and merged as page allocations can only replace an entire
+ // placeholder mapping, not parts of it. This must be done by the users of
+ // this API as it requires a RegionAllocator (or equivalent) to keep track of
+ // sub-regions and decide when to split and when to coalesce multiple free
+ // regions into a single one.
+ V8_WARN_UNUSED_RESULT bool SplitPlaceholder(void* address, size_t size);
+ V8_WARN_UNUSED_RESULT bool MergePlaceholders(void* address, size_t size);
+#endif // V8_OS_WIN
+
+ private:
+ friend class OS;
+
+#if V8_OS_FUCHSIA
+ AddressSpaceReservation(void* base, size_t size, zx_handle_t vmar)
+ : base_(base), size_(size), vmar_(vmar) {}
+#else
+ AddressSpaceReservation(void* base, size_t size) : base_(base), size_(size) {}
+#endif // V8_OS_FUCHSIA
+
+ void* base_ = nullptr;
+ size_t size_ = 0;
+
+#if V8_OS_FUCHSIA
+ // On Fuchsia, address space reservations are backed by VMARs.
+ zx_handle_t vmar_ = ZX_HANDLE_INVALID;
+#endif // V8_OS_FUCHSIA
+};
+
+// ----------------------------------------------------------------------------
// Thread
//
// Thread objects are used for creating and running threads. When the start()
diff --git a/deps/v8/src/base/platform/yield-processor.h b/deps/v8/src/base/platform/yield-processor.h
new file mode 100644
index 0000000000..a2f4b2d413
--- /dev/null
+++ b/deps/v8/src/base/platform/yield-processor.h
@@ -0,0 +1,55 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_YIELD_PROCESSOR_H_
+#define V8_BASE_PLATFORM_YIELD_PROCESSOR_H_
+
+// The YIELD_PROCESSOR macro wraps an architecture specific-instruction that
+// informs the processor we're in a busy wait, so it can handle the branch more
+// intelligently and e.g. reduce power to our core or give more resources to the
+// other hyper-thread on this core. See the following for context:
+// https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
+
+#if defined(V8_CC_MSVC)
+// MSVC does not support inline assembly via __asm__ and provides compiler
+// intrinsics instead. Check if there is a usable intrinsic.
+//
+// intrin.h is an expensive header, so only include it if we're on a host
+// architecture that has a usable intrinsic.
+#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
+#include <intrin.h>
+#define YIELD_PROCESSOR _mm_pause()
+#elif defined(V8_HOST_ARCH_ARM64) || \
+ (defined(V8_HOST_ARCH_ARM) && __ARM_ARCH >= 6)
+#include <intrin.h>
+#define YIELD_PROCESSOR __yield()
+#endif // V8_HOST_ARCH
+
+#else // !V8_CC_MSVC
+
+#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
+#define YIELD_PROCESSOR __asm__ __volatile__("pause")
+#elif defined(V8_HOST_ARCH_ARM64) || \
+ (defined(V8_HOST_ARCH_ARM) && __ARM_ARCH >= 6)
+#define YIELD_PROCESSOR __asm__ __volatile__("yield")
+#elif defined(V8_HOST_ARCH_MIPS)
+// The MIPS32 docs state that the PAUSE instruction is a no-op on older
+// architectures (first added in MIPS32r2). To avoid assembler errors when
+// targeting pre-r2, we must encode the instruction manually.
+#define YIELD_PROCESSOR __asm__ __volatile__(".word 0x00000140")
+#elif defined(V8_HOST_ARCH_MIPS64EL) && __mips_isa_rev >= 2
+// Don't bother doing using .word here since r2 is the lowest supported mips64
+// that Chromium supports.
+#define YIELD_PROCESSOR __asm__ __volatile__("pause")
+#elif defined(V8_HOST_ARCH_PPC64)
+#define YIELD_PROCESSOR __asm__ __volatile__("or 31,31,31")
+#endif // V8_HOST_ARCH
+
+#endif // V8_CC_MSVC
+
+#ifndef YIELD_PROCESSOR
+#define YIELD_PROCESSOR ((void)0)
+#endif
+
+#endif // V8_BASE_PLATFORM_YIELD_PROCESSOR_H_
diff --git a/deps/v8/src/base/region-allocator.cc b/deps/v8/src/base/region-allocator.cc
index 53932d2864..d4d443cacf 100644
--- a/deps/v8/src/base/region-allocator.cc
+++ b/deps/v8/src/base/region-allocator.cc
@@ -41,6 +41,8 @@ RegionAllocator::RegionAllocator(Address memory_region_begin,
}
RegionAllocator::~RegionAllocator() {
+ // TODO(chromium:1218005) either (D)CHECK that all allocated regions have
+ // been freed again (and thus merged into a single region) or do that now.
for (Region* region : all_regions_) {
delete region;
}
@@ -87,6 +89,8 @@ RegionAllocator::Region* RegionAllocator::Split(Region* region,
DCHECK_NE(new_size, 0);
DCHECK_GT(region->size(), new_size);
+ if (on_split_) on_split_(region->begin(), new_size);
+
// Create new region and put it to the lists after the |region|.
DCHECK(!region->is_excluded());
RegionState state = region->state();
@@ -112,6 +116,9 @@ void RegionAllocator::Merge(AllRegionsSet::iterator prev_iter,
Region* prev = *prev_iter;
Region* next = *next_iter;
DCHECK_EQ(prev->end(), next->begin());
+
+ if (on_merge_) on_merge_(prev->begin(), prev->size() + next->size());
+
prev->set_size(prev->size() + next->size());
all_regions_.erase(next_iter); // prev_iter stays valid.
@@ -229,6 +236,29 @@ RegionAllocator::Address RegionAllocator::AllocateAlignedRegion(
return region->begin();
}
+RegionAllocator::Address RegionAllocator::AllocateRegion(Address hint,
+ size_t size,
+ size_t alignment) {
+ DCHECK(IsAligned(alignment, page_size()));
+ DCHECK(IsAligned(hint, alignment));
+
+ if (hint && contains(hint, size)) {
+ if (AllocateRegionAt(hint, size)) {
+ return hint;
+ }
+ }
+
+ Address address;
+ if (alignment <= page_size()) {
+ // TODO(chromium:1218005): Consider using randomized version here.
+ address = AllocateRegion(size);
+ } else {
+ address = AllocateAlignedRegion(size, alignment);
+ }
+
+ return address;
+}
+
size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
DCHECK(IsAligned(new_size, page_size_));
diff --git a/deps/v8/src/base/region-allocator.h b/deps/v8/src/base/region-allocator.h
index f80524870f..13df2aa7ef 100644
--- a/deps/v8/src/base/region-allocator.h
+++ b/deps/v8/src/base/region-allocator.h
@@ -27,6 +27,8 @@ class V8_BASE_EXPORT RegionAllocator final {
public:
using Address = uintptr_t;
+ using SplitMergeCallback = std::function<void(Address start, size_t size)>;
+
static constexpr Address kAllocationFailure = static_cast<Address>(-1);
enum class RegionState {
@@ -43,6 +45,27 @@ class V8_BASE_EXPORT RegionAllocator final {
RegionAllocator& operator=(const RegionAllocator&) = delete;
~RegionAllocator();
+ // Split and merge callbacks.
+ //
+ // These callbacks can be installed to perform additional logic when regions
+ // are split or merged. For example, when managing Windows placeholder
+ // regions, a region must be split into sub-regions (using
+ // VirtualFree(MEM_PRESERVE_PLACEHOLDER)) before a part of it can be replaced
+ // with an actual memory mapping. Similarly, multiple sub-regions must be
+ // merged (using VirtualFree(MEM_COALESCE_PLACEHOLDERS)) when coalescing them
+ // into a larger, free region again.
+ //
+ // The on_split callback is called to signal that an existing region is split
+ // so that [start, start+size) becomes a new region.
+ void set_on_split_callback(SplitMergeCallback callback) {
+ on_split_ = callback;
+ }
+ // The on_merge callback is called to signal that all regions in the range
+ // [start, start+size) are merged into a single one.
+ void set_on_merge_callback(SplitMergeCallback callback) {
+ on_merge_ = callback;
+ }
+
// Allocates region of |size| (must be |page_size|-aligned). Returns
// the address of the region on success or kAllocationFailure.
Address AllocateRegion(size_t size);
@@ -66,6 +89,11 @@ class V8_BASE_EXPORT RegionAllocator final {
// success or kAllocationFailure.
Address AllocateAlignedRegion(size_t size, size_t alignment);
+ // Attempts to allocate a region of the given size and alignment at the
+ // specified address but fall back to allocating the region elsewhere if
+ // necessary.
+ Address AllocateRegion(Address hint, size_t size, size_t alignment);
+
// Frees region at given |address|, returns the size of the region.
// There must be a used region starting at given address otherwise nothing
// will be freed and 0 will be returned.
@@ -114,9 +142,9 @@ class V8_BASE_EXPORT RegionAllocator final {
bool is_free() const { return state_ == RegionState::kFree; }
bool is_allocated() const { return state_ == RegionState::kAllocated; }
bool is_excluded() const { return state_ == RegionState::kExcluded; }
- void set_state(RegionState state) { state_ = state; }
RegionState state() { return state_; }
+ void set_state(RegionState state) { state_ = state; }
void Print(std::ostream& os) const;
@@ -158,6 +186,10 @@ class V8_BASE_EXPORT RegionAllocator final {
// Free regions ordered by sizes and addresses.
std::set<Region*, SizeAddressOrder> free_regions_;
+ // Callbacks called when regions are split or merged.
+ SplitMergeCallback on_split_;
+ SplitMergeCallback on_merge_;
+
// Returns region containing given address or nullptr.
AllRegionsSet::iterator FindRegion(Address address);
diff --git a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc
new file mode 100644
index 0000000000..1877c44b7b
--- /dev/null
+++ b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc
@@ -0,0 +1,61 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/sanitizer/lsan-virtual-address-space.h"
+
+#include "include/v8-platform.h"
+#include "src/base/logging.h"
+
+#if defined(LEAK_SANITIZER)
+#include <sanitizer/lsan_interface.h>
+#endif
+
+namespace v8 {
+namespace base {
+
+LsanVirtualAddressSpace::LsanVirtualAddressSpace(
+ std::unique_ptr<v8::VirtualAddressSpace> vas)
+ : VirtualAddressSpace(vas->page_size(), vas->allocation_granularity(),
+ vas->base(), vas->size()),
+ vas_(std::move(vas)) {
+ DCHECK_NOT_NULL(vas_);
+}
+
+Address LsanVirtualAddressSpace::AllocatePages(Address hint, size_t size,
+ size_t alignment,
+ PagePermissions permissions) {
+ Address result = vas_->AllocatePages(hint, size, alignment, permissions);
+#if defined(LEAK_SANITIZER)
+ if (result != 0) {
+ __lsan_register_root_region(reinterpret_cast<void*>(result), size);
+ }
+#endif // defined(LEAK_SANITIZER)
+ return result;
+}
+
+bool LsanVirtualAddressSpace::FreePages(Address address, size_t size) {
+ bool result = vas_->FreePages(address, size);
+#if defined(LEAK_SANITIZER)
+ if (result) {
+ __lsan_unregister_root_region(reinterpret_cast<void*>(address), size);
+ }
+#endif // defined(LEAK_SANITIZER)
+ return result;
+}
+
+std::unique_ptr<VirtualAddressSpace> LsanVirtualAddressSpace::AllocateSubspace(
+ Address hint, size_t size, size_t alignment,
+ PagePermissions max_permissions) {
+ auto subspace =
+ vas_->AllocateSubspace(hint, size, alignment, max_permissions);
+#if defined(LEAK_SANITIZER)
+ if (subspace) {
+ subspace = std::make_unique<LsanVirtualAddressSpace>(std::move(subspace));
+ }
+#endif // defined(LEAK_SANITIZER)
+ return subspace;
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h
new file mode 100644
index 0000000000..cc16561710
--- /dev/null
+++ b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h
@@ -0,0 +1,63 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_SANITIZER_LSAN_VIRTUAL_ADDRESS_SPACE_H_
+#define V8_BASE_SANITIZER_LSAN_VIRTUAL_ADDRESS_SPACE_H_
+
+#include "include/v8-platform.h"
+#include "src/base/base-export.h"
+#include "src/base/compiler-specific.h"
+
+namespace v8 {
+namespace base {
+
+using Address = uintptr_t;
+
+// This is a v8::VirtualAddressSpace implementation that decorates provided page
+// allocator object with leak sanitizer notifications when LEAK_SANITIZER is
+// defined.
+class V8_BASE_EXPORT LsanVirtualAddressSpace final
+ : public v8::VirtualAddressSpace {
+ public:
+ explicit LsanVirtualAddressSpace(
+ std::unique_ptr<v8::VirtualAddressSpace> vas);
+ ~LsanVirtualAddressSpace() override = default;
+
+ void SetRandomSeed(int64_t seed) override {
+ return vas_->SetRandomSeed(seed);
+ }
+
+ Address RandomPageAddress() override { return vas_->RandomPageAddress(); }
+
+ Address AllocatePages(Address hint, size_t size, size_t alignment,
+ PagePermissions permissions) override;
+
+ bool FreePages(Address address, size_t size) override;
+
+ bool SetPagePermissions(Address address, size_t size,
+ PagePermissions permissions) override {
+ return vas_->SetPagePermissions(address, size, permissions);
+ }
+
+ bool CanAllocateSubspaces() override { return vas_->CanAllocateSubspaces(); }
+
+ std::unique_ptr<VirtualAddressSpace> AllocateSubspace(
+ Address hint, size_t size, size_t alignment,
+ PagePermissions max_permissions) override;
+
+ bool DiscardSystemPages(Address address, size_t size) override {
+ return vas_->DiscardSystemPages(address, size);
+ }
+
+ bool DecommitPages(Address address, size_t size) override {
+ return vas_->DecommitPages(address, size);
+ }
+
+ private:
+ std::unique_ptr<v8::VirtualAddressSpace> vas_;
+};
+
+} // namespace base
+} // namespace v8
+#endif // V8_BASE_SANITIZER_LSAN_VIRTUAL_ADDRESS_SPACE_H_
diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h
index 9b866dde6b..30850013dc 100644
--- a/deps/v8/src/base/small-vector.h
+++ b/deps/v8/src/base/small-vector.h
@@ -11,14 +11,13 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
-#include "src/base/platform/wrappers.h"
namespace v8 {
namespace base {
// Minimal SmallVector implementation. Uses inline storage first, switches to
-// malloc when it overflows.
-template <typename T, size_t kSize>
+// dynamic storage when it overflows.
+template <typename T, size_t kSize, typename Allocator = std::allocator<T>>
class SmallVector {
// Currently only support trivially copyable and trivially destructible data
// types, as it uses memcpy to copy elements and never calls destructors.
@@ -28,17 +27,31 @@ class SmallVector {
public:
static constexpr size_t kInlineSize = kSize;
- SmallVector() = default;
- explicit SmallVector(size_t size) { resize_no_init(size); }
- SmallVector(const SmallVector& other) V8_NOEXCEPT { *this = other; }
- SmallVector(SmallVector&& other) V8_NOEXCEPT { *this = std::move(other); }
- SmallVector(std::initializer_list<T> init) {
+ explicit SmallVector(const Allocator& allocator = Allocator())
+ : allocator_(allocator) {}
+ explicit SmallVector(size_t size, const Allocator& allocator = Allocator())
+ : allocator_(allocator) {
+ resize_no_init(size);
+ }
+ SmallVector(const SmallVector& other,
+ const Allocator& allocator = Allocator()) V8_NOEXCEPT
+ : allocator_(allocator) {
+ *this = other;
+ }
+ SmallVector(SmallVector&& other,
+ const Allocator& allocator = Allocator()) V8_NOEXCEPT
+ : allocator_(allocator) {
+ *this = std::move(other);
+ }
+ SmallVector(std::initializer_list<T> init,
+ const Allocator& allocator = Allocator())
+ : allocator_(allocator) {
resize_no_init(init.size());
memcpy(begin_, init.begin(), sizeof(T) * init.size());
}
~SmallVector() {
- if (is_big()) base::Free(begin_);
+ if (is_big()) FreeDynamicStorage();
}
SmallVector& operator=(const SmallVector& other) V8_NOEXCEPT {
@@ -46,8 +59,8 @@ class SmallVector {
size_t other_size = other.size();
if (capacity() < other_size) {
// Create large-enough heap-allocated storage.
- if (is_big()) base::Free(begin_);
- begin_ = reinterpret_cast<T*>(base::Malloc(sizeof(T) * other_size));
+ if (is_big()) FreeDynamicStorage();
+ begin_ = AllocateDynamicStorage(other_size);
end_of_storage_ = begin_ + other_size;
}
memcpy(begin_, other.begin_, sizeof(T) * other_size);
@@ -58,11 +71,11 @@ class SmallVector {
SmallVector& operator=(SmallVector&& other) V8_NOEXCEPT {
if (this == &other) return *this;
if (other.is_big()) {
- if (is_big()) base::Free(begin_);
+ if (is_big()) FreeDynamicStorage();
begin_ = other.begin_;
end_ = other.end_;
end_of_storage_ = other.end_of_storage_;
- other.reset();
+ other.reset_to_inline_storage();
} else {
DCHECK_GE(capacity(), other.size()); // Sanity check.
size_t other_size = other.size();
@@ -126,17 +139,12 @@ class SmallVector {
end_ = begin_ + new_size;
}
- // Clear without freeing any storage.
+ // Clear without reverting back to inline storage.
void clear() { end_ = begin_; }
- // Clear and go back to inline storage.
- void reset() {
- begin_ = inline_storage_begin();
- end_ = begin_;
- end_of_storage_ = begin_ + kInlineSize;
- }
-
private:
+ V8_NO_UNIQUE_ADDRESS Allocator allocator_;
+
T* begin_ = inline_storage_begin();
T* end_ = begin_;
T* end_of_storage_ = begin_ + kInlineSize;
@@ -152,8 +160,7 @@ class SmallVector {
size_t in_use = end_ - begin_;
size_t new_capacity =
base::bits::RoundUpToPowerOfTwo(std::max(min_capacity, 2 * capacity()));
- T* new_storage =
- reinterpret_cast<T*>(base::Malloc(sizeof(T) * new_capacity));
+ T* new_storage = AllocateDynamicStorage(new_capacity);
if (new_storage == nullptr) {
// Should be: V8::FatalProcessOutOfMemory, but we don't include V8 from
// base. The message is intentionally the same as FatalProcessOutOfMemory
@@ -162,13 +169,30 @@ class SmallVector {
FATAL("Fatal process out of memory: base::SmallVector::Grow");
}
memcpy(new_storage, begin_, sizeof(T) * in_use);
- if (is_big()) base::Free(begin_);
+ if (is_big()) FreeDynamicStorage();
begin_ = new_storage;
end_ = new_storage + in_use;
end_of_storage_ = new_storage + new_capacity;
return end_;
}
+ T* AllocateDynamicStorage(size_t number_of_elements) {
+ return allocator_.allocate(number_of_elements);
+ }
+
+ void FreeDynamicStorage() {
+ DCHECK(is_big());
+ allocator_.deallocate(begin_, end_of_storage_ - begin_);
+ }
+
+ // Clear and go back to inline storage. Dynamic storage is *not* freed. For
+ // internal use only.
+ void reset_to_inline_storage() {
+ begin_ = inline_storage_begin();
+ end_ = begin_;
+ end_of_storage_ = begin_ + kInlineSize;
+ }
+
bool is_big() const { return begin_ != inline_storage_begin(); }
T* inline_storage_begin() { return reinterpret_cast<T*>(&inline_storage_); }
diff --git a/deps/v8/src/base/virtual-address-space-page-allocator.cc b/deps/v8/src/base/virtual-address-space-page-allocator.cc
new file mode 100644
index 0000000000..297b9adbf9
--- /dev/null
+++ b/deps/v8/src/base/virtual-address-space-page-allocator.cc
@@ -0,0 +1,69 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/virtual-address-space-page-allocator.h"
+
+namespace v8 {
+namespace base {
+
+VirtualAddressSpacePageAllocator::VirtualAddressSpacePageAllocator(
+ v8::VirtualAddressSpace* vas)
+ : vas_(vas) {}
+
+void* VirtualAddressSpacePageAllocator::AllocatePages(
+ void* hint, size_t size, size_t alignment,
+ PageAllocator::Permission access) {
+ return reinterpret_cast<void*>(
+ vas_->AllocatePages(reinterpret_cast<Address>(hint), size, alignment,
+ static_cast<PagePermissions>(access)));
+}
+
+bool VirtualAddressSpacePageAllocator::FreePages(void* ptr, size_t size) {
+ MutexGuard guard(&mutex_);
+ Address address = reinterpret_cast<Address>(ptr);
+ // Was this allocation resized previously? If so, use the original size.
+ auto result = resized_allocations_.find(address);
+ if (result != resized_allocations_.end()) {
+ size = result->second;
+ resized_allocations_.erase(result);
+ }
+ return vas_->FreePages(address, size);
+}
+
+bool VirtualAddressSpacePageAllocator::ReleasePages(void* ptr, size_t size,
+ size_t new_size) {
+ // The VirtualAddressSpace class doesn't support this method because it can't
+ // be properly implemented on top of Windows placeholder mappings (they cannot
+ // be partially freed or resized while being allocated). Instead, we emulate
+ // this behaviour by decommitting the released pages, which in effect achieves
+ // exactly what ReleasePages would normally do as well. However, we still need
+ // to pass the original size to FreePages eventually, so we'll need to keep
+ // track of that.
+ DCHECK_LE(new_size, size);
+
+ MutexGuard guard(&mutex_);
+ // Will fail if the allocation was resized previously, which is desired.
+ Address address = reinterpret_cast<Address>(ptr);
+ resized_allocations_.insert({address, size});
+ return vas_->DecommitPages(address + new_size, size - new_size);
+}
+
+bool VirtualAddressSpacePageAllocator::SetPermissions(
+ void* address, size_t size, PageAllocator::Permission access) {
+ return vas_->SetPagePermissions(reinterpret_cast<Address>(address), size,
+ static_cast<PagePermissions>(access));
+}
+
+bool VirtualAddressSpacePageAllocator::DiscardSystemPages(void* address,
+ size_t size) {
+ return vas_->DiscardSystemPages(reinterpret_cast<Address>(address), size);
+}
+
+bool VirtualAddressSpacePageAllocator::DecommitPages(void* address,
+ size_t size) {
+ return vas_->DecommitPages(reinterpret_cast<Address>(address), size);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/virtual-address-space-page-allocator.h b/deps/v8/src/base/virtual-address-space-page-allocator.h
new file mode 100644
index 0000000000..46368783cc
--- /dev/null
+++ b/deps/v8/src/base/virtual-address-space-page-allocator.h
@@ -0,0 +1,72 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_VIRTUAL_ADDRESS_SPACE_PAGE_ALLOCATOR_H_
+#define V8_BASE_VIRTUAL_ADDRESS_SPACE_PAGE_ALLOCATOR_H_
+
+#include <unordered_map>
+
+#include "include/v8-platform.h"
+#include "src/base/base-export.h"
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace base {
+
+// This class bridges a VirtualAddressSpace, the future memory management API,
+// to a PageAllocator, the current API.
+class V8_BASE_EXPORT VirtualAddressSpacePageAllocator
+ : public v8::PageAllocator {
+ public:
+ using Address = uintptr_t;
+
+ explicit VirtualAddressSpacePageAllocator(v8::VirtualAddressSpace* vas);
+
+ VirtualAddressSpacePageAllocator(const VirtualAddressSpacePageAllocator&) =
+ delete;
+ VirtualAddressSpacePageAllocator& operator=(
+ const VirtualAddressSpacePageAllocator&) = delete;
+ ~VirtualAddressSpacePageAllocator() override = default;
+
+ size_t AllocatePageSize() override { return vas_->allocation_granularity(); }
+
+ size_t CommitPageSize() override { return vas_->page_size(); }
+
+ void SetRandomMmapSeed(int64_t seed) override { vas_->SetRandomSeed(seed); }
+
+ void* GetRandomMmapAddr() override {
+ return reinterpret_cast<void*>(vas_->RandomPageAddress());
+ }
+
+ void* AllocatePages(void* hint, size_t size, size_t alignment,
+ Permission access) override;
+
+ bool FreePages(void* address, size_t size) override;
+
+ bool ReleasePages(void* address, size_t size, size_t new_size) override;
+
+ bool SetPermissions(void* address, size_t size, Permission access) override;
+
+ bool DiscardSystemPages(void* address, size_t size) override;
+
+ bool DecommitPages(void* address, size_t size) override;
+
+ private:
+ // Client of this class must keep the VirtualAddressSpace alive during the
+ // lifetime of this instance.
+ v8::VirtualAddressSpace* vas_;
+
+ // As the VirtualAddressSpace class doesn't support ReleasePages, this map is
+ // required to keep track of the original size of resized page allocations.
+ // See the ReleasePages implementation.
+ std::unordered_map<Address, size_t> resized_allocations_;
+
+ // Mutex guarding the above map.
+ Mutex mutex_;
+};
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_VIRTUAL_ADDRESS_SPACE_PAGE_ALLOCATOR_H_
diff --git a/deps/v8/src/base/virtual-address-space.cc b/deps/v8/src/base/virtual-address-space.cc
new file mode 100644
index 0000000000..9907facb57
--- /dev/null
+++ b/deps/v8/src/base/virtual-address-space.cc
@@ -0,0 +1,262 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/virtual-address-space.h"
+
+#include "include/v8-platform.h"
+#include "src/base/bits.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/wrappers.h"
+
+namespace v8 {
+namespace base {
+
+#define STATIC_ASSERT_ENUM(a, b) \
+ static_assert(static_cast<int>(a) == static_cast<int>(b), \
+ "mismatching enum: " #a)
+
+STATIC_ASSERT_ENUM(PagePermissions::kNoAccess, OS::MemoryPermission::kNoAccess);
+STATIC_ASSERT_ENUM(PagePermissions::kReadWrite,
+ OS::MemoryPermission::kReadWrite);
+STATIC_ASSERT_ENUM(PagePermissions::kReadWriteExecute,
+ OS::MemoryPermission::kReadWriteExecute);
+STATIC_ASSERT_ENUM(PagePermissions::kReadExecute,
+ OS::MemoryPermission::kReadExecute);
+
+#undef STATIC_ASSERT_ENUM
+
+VirtualAddressSpace::VirtualAddressSpace()
+ : VirtualAddressSpaceBase(OS::CommitPageSize(), OS::AllocatePageSize(),
+ kNullAddress,
+ std::numeric_limits<uintptr_t>::max()) {
+#if V8_OS_WIN
+ // On Windows, this additional step is required to lookup the VirtualAlloc2
+ // and friends functions.
+ OS::EnsureWin32MemoryAPILoaded();
+#endif // V8_OS_WIN
+ DCHECK(bits::IsPowerOfTwo(page_size()));
+ DCHECK(bits::IsPowerOfTwo(allocation_granularity()));
+ DCHECK_GE(allocation_granularity(), page_size());
+ DCHECK(IsAligned(allocation_granularity(), page_size()));
+}
+
+void VirtualAddressSpace::SetRandomSeed(int64_t seed) {
+ OS::SetRandomMmapSeed(seed);
+}
+
+Address VirtualAddressSpace::RandomPageAddress() {
+ return reinterpret_cast<Address>(OS::GetRandomMmapAddr());
+}
+
+Address VirtualAddressSpace::AllocatePages(Address hint, size_t size,
+ size_t alignment,
+ PagePermissions permissions) {
+ DCHECK(IsAligned(alignment, allocation_granularity()));
+ DCHECK(IsAligned(hint, alignment));
+ DCHECK(IsAligned(size, allocation_granularity()));
+
+ return reinterpret_cast<Address>(
+ OS::Allocate(reinterpret_cast<void*>(hint), size, alignment,
+ static_cast<OS::MemoryPermission>(permissions)));
+}
+
+bool VirtualAddressSpace::FreePages(Address address, size_t size) {
+ DCHECK(IsAligned(address, allocation_granularity()));
+ DCHECK(IsAligned(size, allocation_granularity()));
+
+ return OS::Free(reinterpret_cast<void*>(address), size);
+}
+
+bool VirtualAddressSpace::SetPagePermissions(Address address, size_t size,
+ PagePermissions permissions) {
+ DCHECK(IsAligned(address, page_size()));
+ DCHECK(IsAligned(size, page_size()));
+
+ return OS::SetPermissions(reinterpret_cast<void*>(address), size,
+ static_cast<OS::MemoryPermission>(permissions));
+}
+
+bool VirtualAddressSpace::CanAllocateSubspaces() {
+ return OS::CanReserveAddressSpace();
+}
+
+std::unique_ptr<v8::VirtualAddressSpace> VirtualAddressSpace::AllocateSubspace(
+ Address hint, size_t size, size_t alignment,
+ PagePermissions max_permissions) {
+ DCHECK(IsAligned(alignment, allocation_granularity()));
+ DCHECK(IsAligned(hint, alignment));
+ DCHECK(IsAligned(size, allocation_granularity()));
+
+ base::Optional<AddressSpaceReservation> reservation =
+ OS::CreateAddressSpaceReservation(
+ reinterpret_cast<void*>(hint), size, alignment,
+ static_cast<OS::MemoryPermission>(max_permissions));
+ if (!reservation.has_value())
+ return std::unique_ptr<v8::VirtualAddressSpace>();
+ return std::unique_ptr<v8::VirtualAddressSpace>(
+ new VirtualAddressSubspace(*reservation, this));
+}
+
+bool VirtualAddressSpace::DiscardSystemPages(Address address, size_t size) {
+ DCHECK(IsAligned(address, page_size()));
+ DCHECK(IsAligned(size, page_size()));
+
+ return OS::DiscardSystemPages(reinterpret_cast<void*>(address), size);
+}
+
+bool VirtualAddressSpace::DecommitPages(Address address, size_t size) {
+ DCHECK(IsAligned(address, page_size()));
+ DCHECK(IsAligned(size, page_size()));
+
+ return OS::DecommitPages(reinterpret_cast<void*>(address), size);
+}
+
+bool VirtualAddressSpace::FreeSubspace(VirtualAddressSubspace* subspace) {
+ return OS::FreeAddressSpaceReservation(subspace->reservation_);
+}
+
+VirtualAddressSubspace::VirtualAddressSubspace(
+ AddressSpaceReservation reservation, VirtualAddressSpaceBase* parent_space)
+ : VirtualAddressSpaceBase(
+ parent_space->page_size(), parent_space->allocation_granularity(),
+ reinterpret_cast<Address>(reservation.base()), reservation.size()),
+ reservation_(reservation),
+ region_allocator_(reinterpret_cast<Address>(reservation.base()),
+ reservation.size(),
+ parent_space->allocation_granularity()),
+ parent_space_(parent_space) {
+#if V8_OS_WIN
+ // On Windows, the address space reservation needs to be split and merged at
+ // the OS level as well.
+ region_allocator_.set_on_split_callback([this](Address start, size_t size) {
+ DCHECK(IsAligned(start, allocation_granularity()));
+ CHECK(reservation_.SplitPlaceholder(reinterpret_cast<void*>(start), size));
+ });
+ region_allocator_.set_on_merge_callback([this](Address start, size_t size) {
+ DCHECK(IsAligned(start, allocation_granularity()));
+ CHECK(reservation_.MergePlaceholders(reinterpret_cast<void*>(start), size));
+ });
+#endif // V8_OS_WIN
+}
+
+VirtualAddressSubspace::~VirtualAddressSubspace() {
+ CHECK(parent_space_->FreeSubspace(this));
+}
+
+void VirtualAddressSubspace::SetRandomSeed(int64_t seed) {
+ MutexGuard guard(&mutex_);
+ rng_.SetSeed(seed);
+}
+
+Address VirtualAddressSubspace::RandomPageAddress() {
+ MutexGuard guard(&mutex_);
+ // Note: the random numbers generated here aren't uniformly distributed if the
+ // size isn't a power of two.
+ Address addr = base() + (rng_.NextInt64() % size());
+ return RoundDown(addr, allocation_granularity());
+}
+
+Address VirtualAddressSubspace::AllocatePages(Address hint, size_t size,
+ size_t alignment,
+ PagePermissions permissions) {
+ DCHECK(IsAligned(alignment, allocation_granularity()));
+ DCHECK(IsAligned(hint, alignment));
+ DCHECK(IsAligned(size, allocation_granularity()));
+
+ MutexGuard guard(&mutex_);
+
+ Address address = region_allocator_.AllocateRegion(hint, size, alignment);
+ if (address == RegionAllocator::kAllocationFailure) return kNullAddress;
+
+ if (!reservation_.Allocate(reinterpret_cast<void*>(address), size,
+ static_cast<OS::MemoryPermission>(permissions))) {
+ // This most likely means that we ran out of memory.
+ CHECK_EQ(size, region_allocator_.FreeRegion(address));
+ return kNullAddress;
+ }
+
+ return address;
+}
+
+bool VirtualAddressSubspace::FreePages(Address address, size_t size) {
+ DCHECK(IsAligned(address, allocation_granularity()));
+ DCHECK(IsAligned(size, allocation_granularity()));
+
+ MutexGuard guard(&mutex_);
+ if (region_allocator_.CheckRegion(address) != size) return false;
+
+ // The order here is important: on Windows, the allocation first has to be
+ // freed to a placeholder before the placeholder can be merged (during the
+ // merge_callback) with any surrounding placeholder mappings.
+ CHECK(reservation_.Free(reinterpret_cast<void*>(address), size));
+ CHECK_EQ(size, region_allocator_.FreeRegion(address));
+ return true;
+}
+
+bool VirtualAddressSubspace::SetPagePermissions(Address address, size_t size,
+ PagePermissions permissions) {
+ DCHECK(IsAligned(address, page_size()));
+ DCHECK(IsAligned(size, page_size()));
+
+ return reservation_.SetPermissions(
+ reinterpret_cast<void*>(address), size,
+ static_cast<OS::MemoryPermission>(permissions));
+}
+
+std::unique_ptr<v8::VirtualAddressSpace>
+VirtualAddressSubspace::AllocateSubspace(Address hint, size_t size,
+ size_t alignment,
+ PagePermissions max_permissions) {
+ DCHECK(IsAligned(alignment, allocation_granularity()));
+ DCHECK(IsAligned(hint, alignment));
+ DCHECK(IsAligned(size, allocation_granularity()));
+
+ MutexGuard guard(&mutex_);
+
+ Address address = region_allocator_.AllocateRegion(hint, size, alignment);
+ if (address == RegionAllocator::kAllocationFailure) {
+ return std::unique_ptr<v8::VirtualAddressSpace>();
+ }
+
+ base::Optional<AddressSpaceReservation> reservation =
+ reservation_.CreateSubReservation(
+ reinterpret_cast<void*>(address), size,
+ static_cast<OS::MemoryPermission>(max_permissions));
+ if (!reservation.has_value()) {
+ CHECK_EQ(size, region_allocator_.FreeRegion(address));
+ return nullptr;
+ }
+ return std::unique_ptr<v8::VirtualAddressSpace>(
+ new VirtualAddressSubspace(*reservation, this));
+}
+
+bool VirtualAddressSubspace::DiscardSystemPages(Address address, size_t size) {
+ DCHECK(IsAligned(address, page_size()));
+ DCHECK(IsAligned(size, page_size()));
+
+ return reservation_.DiscardSystemPages(reinterpret_cast<void*>(address),
+ size);
+}
+
+bool VirtualAddressSubspace::DecommitPages(Address address, size_t size) {
+ DCHECK(IsAligned(address, page_size()));
+ DCHECK(IsAligned(size, page_size()));
+
+ return reservation_.DecommitPages(reinterpret_cast<void*>(address), size);
+}
+
+bool VirtualAddressSubspace::FreeSubspace(VirtualAddressSubspace* subspace) {
+ MutexGuard guard(&mutex_);
+
+ AddressSpaceReservation reservation = subspace->reservation_;
+ Address base = reinterpret_cast<Address>(reservation.base());
+ if (region_allocator_.FreeRegion(base) != reservation.size()) {
+ return false;
+ }
+
+ return reservation_.FreeSubReservation(reservation);
+}
+
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/virtual-address-space.h b/deps/v8/src/base/virtual-address-space.h
new file mode 100644
index 0000000000..5cfe462079
--- /dev/null
+++ b/deps/v8/src/base/virtual-address-space.h
@@ -0,0 +1,136 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_VIRTUAL_ADDRESS_SPACE_H_
+#define V8_BASE_VIRTUAL_ADDRESS_SPACE_H_
+
+#include "include/v8-platform.h"
+#include "src/base/base-export.h"
+#include "src/base/compiler-specific.h"
+#include "src/base/platform/platform.h"
+#include "src/base/region-allocator.h"
+
+namespace v8 {
+namespace base {
+
+using Address = uintptr_t;
+constexpr Address kNullAddress = 0;
+
+class VirtualAddressSubspace;
+
+/*
+ * Common parent class to implement deletion of subspaces.
+ */
+class VirtualAddressSpaceBase
+ : public NON_EXPORTED_BASE(::v8::VirtualAddressSpace) {
+ public:
+ using VirtualAddressSpace::VirtualAddressSpace;
+
+ private:
+ friend VirtualAddressSubspace;
+ // Called by a subspace during destruction. Responsible for freeing the
+ // address space reservation and any other data associated with the subspace
+ // in the parent space.
+ virtual bool FreeSubspace(VirtualAddressSubspace* subspace) = 0;
+};
+
+/*
+ * The virtual address space of the current process. Conceptionally, there
+ * should only be one such "root" instance. However, in practice there is no
+ * issue with having multiple instances as the actual resources are managed by
+ * the OS kernel.
+ */
+class V8_BASE_EXPORT VirtualAddressSpace : public VirtualAddressSpaceBase {
+ public:
+ VirtualAddressSpace();
+ ~VirtualAddressSpace() override = default;
+
+ void SetRandomSeed(int64_t seed) override;
+
+ Address RandomPageAddress() override;
+
+ Address AllocatePages(Address hint, size_t size, size_t alignment,
+ PagePermissions access) override;
+
+ bool FreePages(Address address, size_t size) override;
+
+ bool SetPagePermissions(Address address, size_t size,
+ PagePermissions access) override;
+
+ bool CanAllocateSubspaces() override;
+
+ std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
+ Address hint, size_t size, size_t alignment,
+ PagePermissions max_permissions) override;
+
+ bool DiscardSystemPages(Address address, size_t size) override;
+
+ bool DecommitPages(Address address, size_t size) override;
+
+ private:
+ bool FreeSubspace(VirtualAddressSubspace* subspace) override;
+};
+
+/*
+ * A subspace of a parent virtual address space. This represents a reserved
+ * contiguous region of virtual address space in the current process.
+ */
+class V8_BASE_EXPORT VirtualAddressSubspace : public VirtualAddressSpaceBase {
+ public:
+ ~VirtualAddressSubspace() override;
+
+ void SetRandomSeed(int64_t seed) override;
+
+ Address RandomPageAddress() override;
+
+ Address AllocatePages(Address hint, size_t size, size_t alignment,
+ PagePermissions permissions) override;
+
+ bool FreePages(Address address, size_t size) override;
+
+ bool SetPagePermissions(Address address, size_t size,
+ PagePermissions permissions) override;
+
+ bool CanAllocateSubspaces() override { return true; }
+
+ std::unique_ptr<v8::VirtualAddressSpace> AllocateSubspace(
+ Address hint, size_t size, size_t alignment,
+ PagePermissions max_permissions) override;
+
+ bool DiscardSystemPages(Address address, size_t size) override;
+
+ bool DecommitPages(Address address, size_t size) override;
+
+ private:
+ // The VirtualAddressSpace class creates instances of this class when
+ // allocating sub spaces.
+ friend class v8::base::VirtualAddressSpace;
+
+ bool FreeSubspace(VirtualAddressSubspace* subspace) override;
+
+ VirtualAddressSubspace(AddressSpaceReservation reservation,
+ VirtualAddressSpaceBase* parent_space);
+
+ // The address space reservation backing this subspace.
+ AddressSpaceReservation reservation_;
+
+ // Mutex guarding the non-threadsafe RegionAllocator and
+ // RandomNumberGenerator.
+ Mutex mutex_;
+
+ // RegionAllocator to manage the virtual address reservation and divide it
+ // into further regions as necessary.
+ RegionAllocator region_allocator_;
+
+ // Random number generator for generating random addresses.
+ RandomNumberGenerator rng_;
+
+ // Pointer to the parent space. Must be kept alive by the owner of this
+ // instance during its lifetime.
+ VirtualAddressSpaceBase* parent_space_;
+};
+
+} // namespace base
+} // namespace v8
+#endif // V8_BASE_VIRTUAL_ADDRESS_SPACE_H_
diff --git a/deps/v8/src/base/win32-headers.h b/deps/v8/src/base/win32-headers.h
index 95aedd8c95..08eb44dc58 100644
--- a/deps/v8/src/base/win32-headers.h
+++ b/deps/v8/src/base/win32-headers.h
@@ -33,10 +33,8 @@
#ifndef NOMCX
#define NOMCX
#endif
-// Require Windows Vista or higher (this is required for the
-// QueryThreadCycleTime function to be present).
#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x0600
+#error This should be set in build config files. See build\config\win\BUILD.gn
#endif
#include <signal.h> // For raise().
diff --git a/deps/v8/src/baseline/baseline-batch-compiler.cc b/deps/v8/src/baseline/baseline-batch-compiler.cc
index a34764744b..fe0e9d84cc 100644
--- a/deps/v8/src/baseline/baseline-batch-compiler.cc
+++ b/deps/v8/src/baseline/baseline-batch-compiler.cc
@@ -9,6 +9,8 @@
#include "src/flags/flags.h"
#if ENABLE_SPARKPLUG
+#include <algorithm>
+
#include "src/baseline/baseline-compiler.h"
#include "src/codegen/compiler.h"
#include "src/execution/isolate.h"
@@ -56,7 +58,13 @@ class BaselineCompilerTask {
if (FLAG_print_code) {
code->Print();
}
- shared_function_info_->set_baseline_code(*code, kReleaseStore);
+ // Don't install the code if the bytecode has been flushed or has
+ // already some baseline code installed.
+ if (!shared_function_info_->is_compiled() ||
+ shared_function_info_->HasBaselineCode()) {
+ return;
+ }
+ shared_function_info_->set_baseline_code(ToCodeT(*code), kReleaseStore);
if (V8_LIKELY(FLAG_use_osr)) {
// Arm back edges for OSR
shared_function_info_->GetBytecodeArray(isolate)
@@ -162,8 +170,12 @@ class ConcurrentBaselineCompiler {
void Run(JobDelegate* delegate) override {
while (!incoming_queue_->IsEmpty() && !delegate->ShouldYield()) {
+ // Since we're going to compile an entire batch, this guarantees that
+ // we only switch back the memory chunks to RX at the end.
+ CodePageCollectionMemoryModificationScope batch_alloc(isolate_->heap());
std::unique_ptr<BaselineBatchCompilerJob> job;
- incoming_queue_->Dequeue(&job);
+ if (!incoming_queue_->Dequeue(&job)) break;
+ DCHECK_NOT_NULL(job);
job->Compile();
outgoing_queue_->Enqueue(std::move(job));
}
@@ -171,6 +183,10 @@ class ConcurrentBaselineCompiler {
}
size_t GetMaxConcurrency(size_t worker_count) const override {
+ size_t max_threads = FLAG_concurrent_sparkplug_max_threads;
+ if (max_threads > 0) {
+ return std::min(max_threads, incoming_queue_->size());
+ }
return incoming_queue_->size();
}
diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc
index 071e46268e..3ef0c68727 100644
--- a/deps/v8/src/baseline/baseline-compiler.cc
+++ b/deps/v8/src/baseline/baseline-compiler.cc
@@ -1043,62 +1043,62 @@ void BaselineCompiler::VisitShiftRightLogical() {
}
void BaselineCompiler::VisitAddSmi() {
- CallBuiltin<Builtin::kAdd_Baseline>(kInterpreterAccumulatorRegister,
- IntAsSmi(0), Index(1));
+ CallBuiltin<Builtin::kAddSmi_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitSubSmi() {
- CallBuiltin<Builtin::kSubtract_Baseline>(kInterpreterAccumulatorRegister,
- IntAsSmi(0), Index(1));
+ CallBuiltin<Builtin::kSubtractSmi_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitMulSmi() {
- CallBuiltin<Builtin::kMultiply_Baseline>(kInterpreterAccumulatorRegister,
- IntAsSmi(0), Index(1));
+ CallBuiltin<Builtin::kMultiplySmi_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitDivSmi() {
- CallBuiltin<Builtin::kDivide_Baseline>(kInterpreterAccumulatorRegister,
- IntAsSmi(0), Index(1));
+ CallBuiltin<Builtin::kDivideSmi_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitModSmi() {
- CallBuiltin<Builtin::kModulus_Baseline>(kInterpreterAccumulatorRegister,
- IntAsSmi(0), Index(1));
+ CallBuiltin<Builtin::kModulusSmi_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitExpSmi() {
- CallBuiltin<Builtin::kExponentiate_Baseline>(kInterpreterAccumulatorRegister,
- IntAsSmi(0), Index(1));
+ CallBuiltin<Builtin::kExponentiateSmi_Baseline>(
+ kInterpreterAccumulatorRegister, IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitBitwiseOrSmi() {
- CallBuiltin<Builtin::kBitwiseOr_Baseline>(kInterpreterAccumulatorRegister,
- IntAsSmi(0), Index(1));
+ CallBuiltin<Builtin::kBitwiseOrSmi_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitBitwiseXorSmi() {
- CallBuiltin<Builtin::kBitwiseXor_Baseline>(kInterpreterAccumulatorRegister,
- IntAsSmi(0), Index(1));
+ CallBuiltin<Builtin::kBitwiseXorSmi_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitBitwiseAndSmi() {
- CallBuiltin<Builtin::kBitwiseAnd_Baseline>(kInterpreterAccumulatorRegister,
- IntAsSmi(0), Index(1));
+ CallBuiltin<Builtin::kBitwiseAndSmi_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitShiftLeftSmi() {
- CallBuiltin<Builtin::kShiftLeft_Baseline>(kInterpreterAccumulatorRegister,
- IntAsSmi(0), Index(1));
+ CallBuiltin<Builtin::kShiftLeftSmi_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitShiftRightSmi() {
- CallBuiltin<Builtin::kShiftRight_Baseline>(kInterpreterAccumulatorRegister,
- IntAsSmi(0), Index(1));
+ CallBuiltin<Builtin::kShiftRightSmi_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitShiftRightLogicalSmi() {
- CallBuiltin<Builtin::kShiftRightLogical_Baseline>(
+ CallBuiltin<Builtin::kShiftRightLogicalSmi_Baseline>(
kInterpreterAccumulatorRegister, IntAsSmi(0), Index(1));
}
diff --git a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
index 33f792fce8..185bb349c2 100644
--- a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
+++ b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
@@ -414,26 +414,17 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
Label fallthrough;
- if (case_value_base > 0) {
+ if (case_value_base != 0) {
__ Sub_d(reg, reg, Operand(case_value_base));
}
- ScratchRegisterScope scope(this);
- Register scratch = scope.AcquireScratch();
__ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
reg, Operand(num_labels));
- int entry_size_log2 = 2;
- __ pcaddi(scratch, 3);
- __ Alsl_d(scratch, reg, scratch, entry_size_log2);
- __ Jump(scratch);
- {
- TurboAssembler::BlockTrampolinePoolScope(masm());
- __ BlockTrampolinePoolFor(num_labels * kInstrSize);
- for (int i = 0; i < num_labels; ++i) {
- __ Branch(labels[i]);
- }
- __ bind(&fallthrough);
- }
+
+ __ GenerateSwitchTable(reg, num_labels,
+ [labels](size_t i) { return labels[i]; });
+
+ __ bind(&fallthrough);
}
#undef __
diff --git a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
index 996b4ba831..9cc0e749bd 100644
--- a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
+++ b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
@@ -426,29 +426,17 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
Label fallthrough;
- if (case_value_base > 0) {
+ if (case_value_base != 0) {
__ Subu(reg, reg, Operand(case_value_base));
}
- ScratchRegisterScope scope(this);
- Register temp = scope.AcquireScratch();
__ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
reg, Operand(num_labels));
- __ push(ra);
- int entry_size_log2 = 3;
- __ nal();
- __ addiu(reg, reg, 3);
- __ Lsa(temp, ra, reg, entry_size_log2);
- __ pop(ra);
- __ Jump(temp);
- {
- TurboAssembler::BlockTrampolinePoolScope(masm());
- __ BlockTrampolinePoolFor(num_labels * kInstrSize * 2);
- for (int i = 0; i < num_labels; ++i) {
- __ Branch(labels[i]);
- }
- __ bind(&fallthrough);
- }
+
+ __ GenerateSwitchTable(reg, num_labels,
+ [labels](size_t i) { return labels[i]; });
+
+ __ bind(&fallthrough);
}
#undef __
diff --git a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
index 18e0c3445d..3f4dd6d455 100644
--- a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
+++ b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
@@ -424,29 +424,17 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
Label fallthrough;
- if (case_value_base > 0) {
+ if (case_value_base != 0) {
__ Dsubu(reg, reg, Operand(case_value_base));
}
- ScratchRegisterScope scope(this);
- Register temp = scope.AcquireScratch();
__ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
reg, Operand(num_labels));
- __ push(ra);
- int entry_size_log2 = 3;
- __ nal();
- __ daddiu(reg, reg, 3);
- __ Dlsa(temp, ra, reg, entry_size_log2);
- __ pop(ra);
- __ Jump(temp);
- {
- TurboAssembler::BlockTrampolinePoolScope(masm());
- __ BlockTrampolinePoolFor(num_labels * kInstrSize * 2);
- for (int i = 0; i < num_labels; ++i) {
- __ Branch(labels[i]);
- }
- __ bind(&fallthrough);
- }
+
+ __ GenerateSwitchTable(reg, num_labels,
+ [labels](size_t i) { return labels[i]; });
+
+ __ bind(&fallthrough);
}
#undef __
diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
index 85ada600f1..96420093d1 100644
--- a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
+++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
@@ -437,6 +437,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
CHECK(is_int32(imm64 + 0x800));
int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
+ __ BlockTrampolinePoolFor(2);
__ auipc(t6, Hi20); // Read PC + Hi20 into t6
__ addi(t6, t6, Lo12); // jump PC + Hi20 + Lo12
diff --git a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h
index c73f080ecb..ce7afbf4ea 100644
--- a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h
+++ b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h
@@ -13,78 +13,148 @@ namespace v8 {
namespace internal {
namespace baseline {
+namespace detail {
+
+static constexpr Register kScratchRegisters[] = {r8, r9, ip, r1};
+static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters);
+
+#ifdef DEBUG
+inline bool Clobbers(Register target, MemOperand op) {
+ return op.rb() == target || op.rx() == target;
+}
+#endif
+} // namespace detail
+
class BaselineAssembler::ScratchRegisterScope {
public:
explicit ScratchRegisterScope(BaselineAssembler* assembler)
: assembler_(assembler),
prev_scope_(assembler->scratch_register_scope_),
- wrapped_scope_(assembler->masm()) {
- if (!assembler_->scratch_register_scope_) {
- // If we haven't opened a scratch scope yet, for the first one add a
- // couple of extra registers.
- DCHECK(wrapped_scope_.CanAcquire());
- wrapped_scope_.Include(r8, r9);
- wrapped_scope_.Include(kInterpreterBytecodeOffsetRegister);
- }
+ registers_used_(prev_scope_ == nullptr ? 0
+ : prev_scope_->registers_used_) {
assembler_->scratch_register_scope_ = this;
}
~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
- Register AcquireScratch() { return wrapped_scope_.Acquire(); }
+ Register AcquireScratch() {
+ DCHECK_LT(registers_used_, detail::kNumScratchRegisters);
+ return detail::kScratchRegisters[registers_used_++];
+ }
private:
BaselineAssembler* assembler_;
ScratchRegisterScope* prev_scope_;
- UseScratchRegisterScope wrapped_scope_;
+ int registers_used_;
};
// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler.
enum class Condition : uint32_t {
- kEqual = static_cast<uint32_t>(eq),
- kNotEqual = static_cast<uint32_t>(ne),
+ kEqual,
+ kNotEqual,
- kLessThan = static_cast<uint32_t>(lt),
- kGreaterThan = static_cast<uint32_t>(gt),
- kLessThanEqual = static_cast<uint32_t>(le),
- kGreaterThanEqual = static_cast<uint32_t>(ge),
+ kLessThan,
+ kGreaterThan,
+ kLessThanEqual,
+ kGreaterThanEqual,
- kUnsignedLessThan = static_cast<uint32_t>(lo),
- kUnsignedGreaterThan = static_cast<uint32_t>(hi),
- kUnsignedLessThanEqual = static_cast<uint32_t>(ls),
- kUnsignedGreaterThanEqual = static_cast<uint32_t>(hs),
+ kUnsignedLessThan,
+ kUnsignedGreaterThan,
+ kUnsignedLessThanEqual,
+ kUnsignedGreaterThanEqual,
- kOverflow = static_cast<uint32_t>(vs),
- kNoOverflow = static_cast<uint32_t>(vc),
+ kOverflow,
+ kNoOverflow,
- kZero = static_cast<uint32_t>(eq),
- kNotZero = static_cast<uint32_t>(ne),
+ kZero,
+ kNotZero
};
inline internal::Condition AsMasmCondition(Condition cond) {
- UNIMPLEMENTED();
- return static_cast<internal::Condition>(cond);
+ STATIC_ASSERT(sizeof(internal::Condition) == sizeof(Condition));
+ switch (cond) {
+ case Condition::kEqual:
+ return eq;
+ case Condition::kNotEqual:
+ return ne;
+
+ case Condition::kLessThan:
+ return lt;
+ case Condition::kGreaterThan:
+ return gt;
+ case Condition::kLessThanEqual:
+ return le;
+ case Condition::kGreaterThanEqual:
+ return ge;
+
+ case Condition::kUnsignedLessThan:
+ return lt;
+ case Condition::kUnsignedGreaterThan:
+ return gt;
+ case Condition::kUnsignedLessThanEqual:
+ return le;
+ case Condition::kUnsignedGreaterThanEqual:
+ return ge;
+
+ case Condition::kOverflow:
+ return overflow;
+ case Condition::kNoOverflow:
+ return nooverflow;
+
+ case Condition::kZero:
+ return eq;
+ case Condition::kNotZero:
+ return ne;
+ default:
+ UNREACHABLE();
+ }
}
-namespace detail {
+inline bool IsSignedCondition(Condition cond) {
+ switch (cond) {
+ case Condition::kEqual:
+ case Condition::kNotEqual:
+ case Condition::kLessThan:
+ case Condition::kGreaterThan:
+ case Condition::kLessThanEqual:
+ case Condition::kGreaterThanEqual:
+ case Condition::kOverflow:
+ case Condition::kNoOverflow:
+ case Condition::kZero:
+ case Condition::kNotZero:
+ return true;
+
+ case Condition::kUnsignedLessThan:
+ case Condition::kUnsignedGreaterThan:
+ case Condition::kUnsignedLessThanEqual:
+ case Condition::kUnsignedGreaterThanEqual:
+ return false;
+
+ default:
+ UNREACHABLE();
+ }
+}
-#ifdef DEBUG
-inline bool Clobbers(Register target, MemOperand op) {
- UNIMPLEMENTED();
- return false;
+#define __ assm->masm()->
+// s390x helper
+void JumpIfHelper(BaselineAssembler* assm, Condition cc, Register lhs,
+ Register rhs, Label* target) {
+ if (IsSignedCondition(cc)) {
+ __ CmpS64(lhs, rhs);
+ } else {
+ __ CmpU64(lhs, rhs);
+ }
+ __ b(AsMasmCondition(cc), target);
}
-#endif
-} // namespace detail
+#undef __
#define __ masm_->
MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
- UNIMPLEMENTED();
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
- UNIMPLEMENTED();
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
@@ -93,83 +163,129 @@ void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
void BaselineAssembler::JumpTarget() {
// NOP on arm.
- UNIMPLEMENTED();
}
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
- UNIMPLEMENTED();
+ __ b(target);
}
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ __ JumpIfRoot(value, index, target);
}
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ __ JumpIfNotRoot(value, index, target);
}
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) {
- UNIMPLEMENTED();
+ __ JumpIfSmi(value, target);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
- UNIMPLEMENTED();
+ __ JumpIfNotSmi(value, target);
+}
+
+void BaselineAssembler::CallBuiltin(Builtin builtin) {
+ if (masm()->options().short_builtin_calls) {
+ // Generate pc-relative call.
+ __ CallBuiltin(builtin);
+ } else {
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Call(temp);
+ }
}
-void BaselineAssembler::CallBuiltin(Builtin builtin) { UNIMPLEMENTED(); }
-
void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("tail call", builtin));
- UNIMPLEMENTED();
+ if (masm()->options().short_builtin_calls) {
+ // Generate pc-relative call.
+ __ TailCallBuiltin(builtin);
+ } else {
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Jump(temp);
+ }
}
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ __ AndP(r0, value, Operand(mask));
+ __ b(AsMasmCondition(cc), target);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ if (IsSignedCondition(cc)) {
+ __ CmpS64(lhs, rhs);
+ } else {
+ __ CmpU64(lhs, rhs);
+ }
+ __ b(AsMasmCondition(cc), target);
}
+
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
Register map, Label* target,
Label::Distance) {
- UNIMPLEMENTED();
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ __ LoadMap(map, object);
+ __ LoadU16(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ JumpIf(cc, type, Operand(instance_type), target);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(map);
+ __ CompareObjectType(map, type, type, MAP_TYPE);
+ __ Assert(eq, AbortReason::kUnexpectedValue);
+ }
+ __ LoadU16(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ JumpIf(cc, type, Operand(instance_type), target);
}
void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
- UNIMPLEMENTED();
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ LoadU64(tmp, operand);
+ JumpIfHelper(this, cc, value, tmp, target);
}
+
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ __ AssertSmi(value);
+ __ LoadSmiLiteral(r0, smi);
+ JumpIfHelper(this, cc, value, r0, target);
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ __ AssertSmi(lhs);
+ __ AssertSmi(rhs);
+ JumpIfHelper(this, cc, lhs, rhs, target);
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
- UNIMPLEMENTED();
+ __ LoadU64(r0, operand);
+ JumpIfHelper(this, cc, value, r0, target);
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance) {
- UNIMPLEMENTED();
+ __ LoadU64(r0, operand);
+ JumpIfHelper(this, cc, r0, value, target);
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
- UNIMPLEMENTED();
+ JumpIf(cc, value, Operand(byte), target);
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
diff --git a/deps/v8/src/bigint/CPPLINT.cfg b/deps/v8/src/bigint/CPPLINT.cfg
new file mode 100644
index 0000000000..663e4f7d12
--- /dev/null
+++ b/deps/v8/src/bigint/CPPLINT.cfg
@@ -0,0 +1 @@
+filter=-readability/check \ No newline at end of file
diff --git a/deps/v8/src/bigint/bigint-internal.cc b/deps/v8/src/bigint/bigint-internal.cc
index 2d74f3572c..35a9e5b3f2 100644
--- a/deps/v8/src/bigint/bigint-internal.cc
+++ b/deps/v8/src/bigint/bigint-internal.cc
@@ -52,7 +52,7 @@ void ProcessorImpl::Multiply(RWDigits Z, Digits X, Digits Y) {
void ProcessorImpl::Divide(RWDigits Q, Digits A, Digits B) {
A.Normalize();
B.Normalize();
- DCHECK(B.len() > 0); // NOLINT(readability/check)
+ DCHECK(B.len() > 0);
int cmp = Compare(A, B);
if (cmp < 0) return Q.Clear();
if (cmp == 0) {
@@ -82,7 +82,7 @@ void ProcessorImpl::Divide(RWDigits Q, Digits A, Digits B) {
void ProcessorImpl::Modulo(RWDigits R, Digits A, Digits B) {
A.Normalize();
B.Normalize();
- DCHECK(B.len() > 0); // NOLINT(readability/check)
+ DCHECK(B.len() > 0);
int cmp = Compare(A, B);
if (cmp < 0) {
for (int i = 0; i < B.len(); i++) R[i] = B[i];
diff --git a/deps/v8/src/bigint/bigint.h b/deps/v8/src/bigint/bigint.h
index 28df2936ac..300229c97d 100644
--- a/deps/v8/src/bigint/bigint.h
+++ b/deps/v8/src/bigint/bigint.h
@@ -253,6 +253,14 @@ void BitwiseOr_PosNeg(RWDigits Z, Digits X, Digits Y);
void BitwiseXor_PosPos(RWDigits Z, Digits X, Digits Y);
void BitwiseXor_NegNeg(RWDigits Z, Digits X, Digits Y);
void BitwiseXor_PosNeg(RWDigits Z, Digits X, Digits Y);
+void LeftShift(RWDigits Z, Digits X, digit_t shift);
+// RightShiftState is provided by RightShift_ResultLength and used by the actual
+// RightShift to avoid some recomputation.
+struct RightShiftState {
+ bool must_round_down = false;
+};
+void RightShift(RWDigits Z, Digits X, digit_t shift,
+ const RightShiftState& state);
// Z := (least significant n bits of X, interpreted as a signed n-bit integer).
// Returns true if the result is negative; Z will hold the absolute value.
@@ -352,6 +360,17 @@ inline int BitwiseXor_PosNeg_ResultLength(int x_length, int y_length) {
// Result length growth example: 3 ^ -1 == -4 (2-bit inputs, 3-bit result).
return std::max(x_length, y_length) + 1;
}
+inline int LeftShift_ResultLength(int x_length,
+ digit_t x_most_significant_digit,
+ digit_t shift) {
+ int digit_shift = static_cast<int>(shift / kDigitBits);
+ int bits_shift = static_cast<int>(shift % kDigitBits);
+ bool grow = bits_shift != 0 &&
+ (x_most_significant_digit >> (kDigitBits - bits_shift)) != 0;
+ return x_length + digit_shift + grow;
+}
+int RightShift_ResultLength(Digits X, bool x_sign, digit_t shift,
+ RightShiftState* state);
// Returns -1 if this "asIntN" operation would be a no-op.
int AsIntNResultLength(Digits X, bool x_negative, int n);
diff --git a/deps/v8/src/bigint/bitwise.cc b/deps/v8/src/bigint/bitwise.cc
index 087847c118..c4cec22b53 100644
--- a/deps/v8/src/bigint/bitwise.cc
+++ b/deps/v8/src/bigint/bitwise.cc
@@ -33,8 +33,8 @@ void BitwiseAnd_NegNeg(RWDigits Z, Digits X, Digits Y) {
// (At least) one of the next two loops will perform zero iterations:
for (; i < X.len(); i++) Z[i] = digit_sub(X[i], x_borrow, &x_borrow);
for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], y_borrow, &y_borrow);
- DCHECK(x_borrow == 0); // NOLINT(readability/check)
- DCHECK(y_borrow == 0); // NOLINT(readability/check)
+ DCHECK(x_borrow == 0);
+ DCHECK(y_borrow == 0);
for (; i < Z.len(); i++) Z[i] = 0;
Add(Z, 1);
}
@@ -83,7 +83,7 @@ void BitwiseOr_PosNeg(RWDigits Z, Digits X, Digits Y) {
int i = 0;
for (; i < pairs; i++) Z[i] = digit_sub(Y[i], borrow, &borrow) & ~X[i];
for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], borrow, &borrow);
- DCHECK(borrow == 0); // NOLINT(readability/check)
+ DCHECK(borrow == 0);
for (; i < Z.len(); i++) Z[i] = 0;
Add(Z, 1);
}
@@ -114,8 +114,8 @@ void BitwiseXor_NegNeg(RWDigits Z, Digits X, Digits Y) {
// (At least) one of the next two loops will perform zero iterations:
for (; i < X.len(); i++) Z[i] = digit_sub(X[i], x_borrow, &x_borrow);
for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], y_borrow, &y_borrow);
- DCHECK(x_borrow == 0); // NOLINT(readability/check)
- DCHECK(y_borrow == 0); // NOLINT(readability/check)
+ DCHECK(x_borrow == 0);
+ DCHECK(y_borrow == 0);
for (; i < Z.len(); i++) Z[i] = 0;
}
@@ -128,11 +128,96 @@ void BitwiseXor_PosNeg(RWDigits Z, Digits X, Digits Y) {
// (At least) one of the next two loops will perform zero iterations:
for (; i < X.len(); i++) Z[i] = X[i];
for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], borrow, &borrow);
- DCHECK(borrow == 0); // NOLINT(readability/check)
+ DCHECK(borrow == 0);
for (; i < Z.len(); i++) Z[i] = 0;
Add(Z, 1);
}
+void LeftShift(RWDigits Z, Digits X, digit_t shift) {
+ int digit_shift = static_cast<int>(shift / kDigitBits);
+ int bits_shift = static_cast<int>(shift % kDigitBits);
+
+ int i = 0;
+ for (; i < digit_shift; ++i) Z[i] = 0;
+ if (bits_shift == 0) {
+ for (; i < X.len() + digit_shift; ++i) Z[i] = X[i - digit_shift];
+ for (; i < Z.len(); ++i) Z[i] = 0;
+ } else {
+ digit_t carry = 0;
+ for (; i < X.len() + digit_shift; ++i) {
+ digit_t d = X[i - digit_shift];
+ Z[i] = (d << bits_shift) | carry;
+ carry = d >> (kDigitBits - bits_shift);
+ }
+ if (carry != 0) Z[i++] = carry;
+ for (; i < Z.len(); ++i) Z[i] = 0;
+ }
+}
+
+int RightShift_ResultLength(Digits X, bool x_sign, digit_t shift,
+ RightShiftState* state) {
+ int digit_shift = static_cast<int>(shift / kDigitBits);
+ int bits_shift = static_cast<int>(shift % kDigitBits);
+ int result_length = X.len() - digit_shift;
+ if (result_length <= 0) return 0;
+
+ // For negative numbers, round down if any bit was shifted out (so that e.g.
+ // -5n >> 1n == -3n and not -2n). Check now whether this will happen and
+ // whether it can cause overflow into a new digit.
+ bool must_round_down = false;
+ if (x_sign) {
+ const digit_t mask = (static_cast<digit_t>(1) << bits_shift) - 1;
+ if ((X[digit_shift] & mask) != 0) {
+ must_round_down = true;
+ } else {
+ for (int i = 0; i < digit_shift; i++) {
+ if (X[i] != 0) {
+ must_round_down = true;
+ break;
+ }
+ }
+ }
+ }
+ // If bits_shift is non-zero, it frees up bits, preventing overflow.
+ if (must_round_down && bits_shift == 0) {
+ // Overflow cannot happen if the most significant digit has unset bits.
+ const bool rounding_can_overflow = digit_ismax(X.msd());
+ if (rounding_can_overflow) ++result_length;
+ }
+
+ if (state) {
+ DCHECK(!must_round_down || x_sign);
+ state->must_round_down = must_round_down;
+ }
+ return result_length;
+}
+
+void RightShift(RWDigits Z, Digits X, digit_t shift,
+ const RightShiftState& state) {
+ int digit_shift = static_cast<int>(shift / kDigitBits);
+ int bits_shift = static_cast<int>(shift % kDigitBits);
+
+ int i = 0;
+ if (bits_shift == 0) {
+ for (; i < X.len() - digit_shift; ++i) Z[i] = X[i + digit_shift];
+ } else {
+ digit_t carry = X[digit_shift] >> bits_shift;
+ for (; i < X.len() - digit_shift - 1; ++i) {
+ digit_t d = X[i + digit_shift + 1];
+ Z[i] = (d << (kDigitBits - bits_shift)) | carry;
+ carry = d >> bits_shift;
+ }
+ Z[i++] = carry;
+ }
+ for (; i < Z.len(); ++i) Z[i] = 0;
+
+ if (state.must_round_down) {
+ // Rounding down (a negative value) means adding one to
+ // its absolute value. This cannot overflow.
+ Add(Z, 1);
+ }
+}
+
namespace {
// Z := (least significant n bits of X).
@@ -175,7 +260,7 @@ void TruncateAndSubFromPowerOfTwo(RWDigits Z, Digits X, int n) {
msd = (msd << drop) >> drop;
digit_t minuend_msd = static_cast<digit_t>(1) << bits;
digit_t result_msd = digit_sub2(minuend_msd, msd, borrow, &borrow);
- DCHECK(borrow == 0); // result < 2^n. NOLINT(readability/check)
+ DCHECK(borrow == 0); // result < 2^n.
// If all subtracted bits were zero, we have to get rid of the
// materialized minuend_msd again.
Z[last] = result_msd & (minuend_msd - 1);
@@ -203,9 +288,8 @@ int AsIntNResultLength(Digits X, bool x_negative, int n) {
}
bool AsIntN(RWDigits Z, Digits X, bool x_negative, int n) {
- DCHECK(X.len() > 0); // NOLINT(readability/check)
- DCHECK(n > 0); // NOLINT(readability/check)
- // NOLINTNEXTLINE(readability/check)
+ DCHECK(X.len() > 0);
+ DCHECK(n > 0);
DCHECK(AsIntNResultLength(X, x_negative, n) > 0);
int needed_digits = DIV_CEIL(n, kDigitBits);
digit_t top_digit = X[needed_digits - 1];
@@ -250,7 +334,7 @@ int AsUintN_Pos_ResultLength(Digits X, int n) {
}
void AsUintN_Pos(RWDigits Z, Digits X, int n) {
- DCHECK(AsUintN_Pos_ResultLength(X, n) > 0); // NOLINT(readability/check)
+ DCHECK(AsUintN_Pos_ResultLength(X, n) > 0);
TruncateToNBits(Z, X, n);
}
diff --git a/deps/v8/src/bigint/digit-arithmetic.h b/deps/v8/src/bigint/digit-arithmetic.h
index 96ac949eb7..d9113efc91 100644
--- a/deps/v8/src/bigint/digit-arithmetic.h
+++ b/deps/v8/src/bigint/digit-arithmetic.h
@@ -17,6 +17,8 @@ static constexpr int kHalfDigitBits = kDigitBits / 2;
static constexpr digit_t kHalfDigitBase = digit_t{1} << kHalfDigitBits;
static constexpr digit_t kHalfDigitMask = kHalfDigitBase - 1;
+constexpr bool digit_ismax(digit_t x) { return static_cast<digit_t>(~x) == 0; }
+
// {carry} will be set to 0 or 1.
inline digit_t digit_add2(digit_t a, digit_t b, digit_t* carry) {
#if HAVE_TWODIGIT_T
@@ -118,7 +120,7 @@ static inline digit_t digit_div(digit_t high, digit_t low, digit_t divisor,
digit_t* remainder) {
#if defined(DCHECK)
DCHECK(high < divisor);
- DCHECK(divisor != 0); // NOLINT(readability/check)
+ DCHECK(divisor != 0);
#endif
#if __x86_64__ && (__GNUC__ || __clang__)
digit_t quotient;
diff --git a/deps/v8/src/bigint/div-barrett.cc b/deps/v8/src/bigint/div-barrett.cc
index 39f09d0ac1..306dec8b25 100644
--- a/deps/v8/src/bigint/div-barrett.cc
+++ b/deps/v8/src/bigint/div-barrett.cc
@@ -41,7 +41,7 @@ void DcheckIntegerPartRange(Digits X, digit_t min, digit_t max) {
// See comments at {Invert} and {InvertNewton} below for details.
void ProcessorImpl::InvertBasecase(RWDigits Z, Digits V, RWDigits scratch) {
DCHECK(Z.len() > V.len());
- DCHECK(V.len() > 0); // NOLINT(readability/check)
+ DCHECK(V.len() > 0);
DCHECK(scratch.len() >= 2 * V.len());
int n = V.len();
RWDigits X(scratch, 0, 2 * n);
@@ -49,7 +49,7 @@ void ProcessorImpl::InvertBasecase(RWDigits Z, Digits V, RWDigits scratch) {
int i = 0;
for (; i < n; i++) X[i] = 0;
for (; i < 2 * n; i++) X[i] = digit_sub2(0, V[i - n], borrow, &borrow);
- DCHECK(borrow == 1); // NOLINT(readability/check)
+ DCHECK(borrow == 1);
RWDigits R(nullptr, 0); // We don't need the remainder.
if (n < kBurnikelThreshold) {
DivideSchoolbook(Z, R, X, V);
@@ -76,7 +76,7 @@ void ProcessorImpl::InvertNewton(RWDigits Z, Digits V, RWDigits scratch) {
const int kUOffset = vn + kInvertNewtonExtraSpace;
// The base case won't work otherwise.
- DCHECK(V.len() >= 3); // NOLINT(readability/check)
+ DCHECK(V.len() >= 3);
constexpr int kBasecasePrecision = kNewtonInversionThreshold - 1;
// V must have more digits than the basecase.
@@ -147,17 +147,17 @@ void ProcessorImpl::InvertNewton(RWDigits Z, Digits V, RWDigits scratch) {
if (U.len() <= vn) {
// Normal subtraction.
// This is not the last iteration.
- DCHECK(iteration > 0); // NOLINT(readability/check)
+ DCHECK(iteration > 0);
Z.set_len(U.len());
digit_t borrow = SubtractAndReturnBorrow(Z, W, U);
- DCHECK(borrow == 0); // NOLINT(readability/check)
+ DCHECK(borrow == 0);
USE(borrow);
DcheckIntegerPartRange(Z, 1, 2);
} else {
// Truncate some least significant digits so that we get vn
// fraction digits, and compute the integer digit separately.
// This is the last iteration.
- DCHECK(iteration == 0); // NOLINT(readability/check)
+ DCHECK(iteration == 0);
Z.set_len(vn);
Digits W_part(W, W.len() - vn - 1, vn);
Digits U_part(U, U.len() - vn - 1, vn);
@@ -186,7 +186,7 @@ void ProcessorImpl::InvertNewton(RWDigits Z, Digits V, RWDigits scratch) {
// Needs InvertScratchSpace(V.len) digits of scratch space.
void ProcessorImpl::Invert(RWDigits Z, Digits V, RWDigits scratch) {
DCHECK(Z.len() > V.len());
- DCHECK(V.len() >= 1); // NOLINT(readability/check)
+ DCHECK(V.len() >= 1);
DCHECK(IsBitNormalized(V));
DCHECK(scratch.len() >= InvertScratchSpace(V.len()));
@@ -218,7 +218,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B,
DCHECK(R.len() >= B.len());
DCHECK(A.len() > B.len()); // Careful: This is *not* '>=' !
DCHECK(A.len() <= 2 * B.len());
- DCHECK(B.len() > 0); // NOLINT(readability/check)
+ DCHECK(B.len() > 0);
DCHECK(IsBitNormalized(B));
DCHECK(I.len() == A.len() - B.len());
DCHECK(scratch.len() >= DivideBarrettScratchSpace(A.len()));
@@ -257,7 +257,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B,
do {
r_high += AddAndReturnCarry(R, R, B);
q_sub++;
- DCHECK(q_sub <= 5); // NOLINT(readability/check)
+ DCHECK(q_sub <= 5);
} while (r_high != 0);
Subtract(Q, q_sub);
} else {
@@ -266,7 +266,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B,
// (5c): R >= B, so R -= B
r_high -= SubtractAndReturnBorrow(R, R, B);
q_add++;
- DCHECK(q_add <= 5); // NOLINT(readability/check)
+ DCHECK(q_add <= 5);
}
Add(Q, q_add);
}
@@ -281,7 +281,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B) {
DCHECK(Q.len() > A.len() - B.len());
DCHECK(R.len() >= B.len());
DCHECK(A.len() > B.len()); // Careful: This is *not* '>=' !
- DCHECK(B.len() > 0); // NOLINT(readability/check)
+ DCHECK(B.len() > 0);
// Normalize B, and shift A by the same amount.
ShiftedDigits b_normalized(B);
@@ -312,7 +312,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B) {
int n = B.len(); // Chunk length.
// (5): {t} is the number of B-sized chunks of A.
int t = DIV_CEIL(A.len(), n);
- DCHECK(t >= 3); // NOLINT(readability/check)
+ DCHECK(t >= 3);
// (6)/(7): Z is used for the current 2-chunk block to be divided by B,
// initialized to the two topmost chunks of A.
int z_len = n * 2;
@@ -334,7 +334,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B) {
for (int j = to_copy; j < target.len(); j++) target[j] = 0;
#if DEBUG
for (int j = to_copy; j < Qi.len(); j++) {
- DCHECK(Qi[j] == 0); // NOLINT(readability/check)
+ DCHECK(Qi[j] == 0);
}
#endif
}
@@ -346,7 +346,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B) {
PutAt(Z, A + n * i, n);
// (8a): Compute Qi, Ri such that Zi = B*Qi + Ri.
DivideBarrett(Qi, Ri, Z, B, I, scratch);
- DCHECK(Qi[qi_len - 1] == 0); // NOLINT(readability/check)
+ DCHECK(Qi[qi_len - 1] == 0);
if (should_terminate()) return;
// (9): Return Q = [Q_(t-2), ..., Q_0]...
PutAt(Q + n * i, Qi, n);
diff --git a/deps/v8/src/bigint/div-burnikel.cc b/deps/v8/src/bigint/div-burnikel.cc
index 0caedb1cc1..264bc784a8 100644
--- a/deps/v8/src/bigint/div-burnikel.cc
+++ b/deps/v8/src/bigint/div-burnikel.cc
@@ -70,7 +70,7 @@ class BZ {
void BZ::DivideBasecase(RWDigits Q, RWDigits R, Digits A, Digits B) {
A.Normalize();
B.Normalize();
- DCHECK(B.len() > 0); // NOLINT(readability/check)
+ DCHECK(B.len() > 0);
int cmp = Compare(A, B);
if (cmp <= 0) {
Q.Clear();
@@ -94,11 +94,11 @@ void BZ::DivideBasecase(RWDigits Q, RWDigits R, Digits A, Digits B) {
// Returns Q(uotient) and R(emainder) for A/B, with B having two thirds
// the size of A = [A1, A2, A3].
void BZ::D3n2n(RWDigits Q, RWDigits R, Digits A1A2, Digits A3, Digits B) {
- DCHECK((B.len() & 1) == 0); // NOLINT(readability/check)
+ DCHECK((B.len() & 1) == 0);
int n = B.len() / 2;
DCHECK(A1A2.len() == 2 * n);
// Actual condition is stricter than length: A < B * 2^(kDigitBits * n)
- DCHECK(Compare(A1A2, B) < 0); // NOLINT(readability/check)
+ DCHECK(Compare(A1A2, B) < 0);
DCHECK(A3.len() == n);
DCHECK(Q.len() == n);
DCHECK(R.len() == 2 * n);
@@ -126,7 +126,7 @@ void BZ::D3n2n(RWDigits Q, RWDigits R, Digits A1A2, Digits A3, Digits B) {
RWDigits temp = R1;
Subtract(temp, A1, B1);
temp.Normalize();
- DCHECK(temp.len() <= 1); // NOLINT(readability/check)
+ DCHECK(temp.len() <= 1);
if (temp.len() > 0) r1_high = temp[0];
// Step 2: compute A2 + B1.
Digits A2(A1A2, 0, n);
@@ -149,7 +149,7 @@ void BZ::D3n2n(RWDigits Q, RWDigits R, Digits A1A2, Digits A3, Digits B) {
// 5. Compute Rhat = R1*2^(kDigitBits * n) + A3 - D = [R1, A3] - D.
digit_t borrow = SubtractAndReturnBorrow(R, R, D);
DCHECK(borrow == r1_high);
- DCHECK(Compare(R, B) < 0); // NOLINT(readability/check)
+ DCHECK(Compare(R, B) < 0);
(void)borrow;
// 7. Return R = Rhat, Q = Qhat.
}
@@ -160,7 +160,7 @@ void BZ::D2n1n(RWDigits Q, RWDigits R, Digits A, Digits B) {
int n = B.len();
DCHECK(A.len() <= 2 * n);
// A < B * 2^(kDigitsBits * n)
- DCHECK(Compare(Digits(A, n, n), B) < 0); // NOLINT(readability/check)
+ DCHECK(Compare(Digits(A, n, n), B) < 0);
DCHECK(Q.len() == n);
DCHECK(R.len() == n);
// 1. If n is odd or smaller than some convenient constant, compute Q and R
@@ -264,7 +264,7 @@ void ProcessorImpl::DivideBurnikelZiegler(RWDigits Q, RWDigits R, Digits A,
// 9. Return Q = [Q_(t-2), ..., Q_0] and R = R_0 * 2^(-sigma).
#if DEBUG
for (int i = 0; i < digit_shift; i++) {
- DCHECK(Ri[i] == 0); // NOLINT(readability/check)
+ DCHECK(Ri[i] == 0);
}
#endif
if (R.len() != 0) {
diff --git a/deps/v8/src/bigint/div-helpers.cc b/deps/v8/src/bigint/div-helpers.cc
index 0dfca0b02c..39beb48675 100644
--- a/deps/v8/src/bigint/div-helpers.cc
+++ b/deps/v8/src/bigint/div-helpers.cc
@@ -23,7 +23,7 @@ void Copy(RWDigits Z, Digits X) {
// Z := X << shift
// Z and X may alias for an in-place shift.
void LeftShift(RWDigits Z, Digits X, int shift) {
- DCHECK(shift >= 0); // NOLINT(readability/check)
+ DCHECK(shift >= 0);
DCHECK(shift < kDigitBits);
DCHECK(Z.len() >= X.len());
if (shift == 0) return Copy(Z, X);
@@ -37,7 +37,7 @@ void LeftShift(RWDigits Z, Digits X, int shift) {
if (i < Z.len()) {
Z[i++] = carry;
} else {
- DCHECK(carry == 0); // NOLINT(readability/check)
+ DCHECK(carry == 0);
}
for (; i < Z.len(); i++) Z[i] = 0;
}
@@ -45,7 +45,7 @@ void LeftShift(RWDigits Z, Digits X, int shift) {
// Z := X >> shift
// Z and X may alias for an in-place shift.
void RightShift(RWDigits Z, Digits X, int shift) {
- DCHECK(shift >= 0); // NOLINT(readability/check)
+ DCHECK(shift >= 0);
DCHECK(shift < kDigitBits);
X.Normalize();
DCHECK(Z.len() >= X.len());
diff --git a/deps/v8/src/bigint/div-schoolbook.cc b/deps/v8/src/bigint/div-schoolbook.cc
index a6295c573c..d8245a77ad 100644
--- a/deps/v8/src/bigint/div-schoolbook.cc
+++ b/deps/v8/src/bigint/div-schoolbook.cc
@@ -28,8 +28,8 @@ namespace bigint {
// Q may be the same as A for an in-place division.
void ProcessorImpl::DivideSingle(RWDigits Q, digit_t* remainder, Digits A,
digit_t b) {
- DCHECK(b != 0); // NOLINT(readability/check)
- DCHECK(A.len() > 0); // NOLINT(readability/check)
+ DCHECK(b != 0);
+ DCHECK(A.len() > 0);
*remainder = 0;
int length = A.len();
if (Q.len() != 0) {
@@ -93,7 +93,6 @@ bool QLengthOK(Digits Q, Digits A, Digits B) {
// See Knuth, Volume 2, section 4.3.1, Algorithm D.
void ProcessorImpl::DivideSchoolbook(RWDigits Q, RWDigits R, Digits A,
Digits B) {
- // NOLINTNEXTLINE(readability/check)
DCHECK(B.len() >= 2); // Use DivideSingle otherwise.
DCHECK(A.len() >= B.len()); // No-op otherwise.
DCHECK(Q.len() == 0 || QLengthOK(Q, A, B));
@@ -173,7 +172,7 @@ void ProcessorImpl::DivideSchoolbook(RWDigits Q, RWDigits R, Digits A,
if (Q.len() != 0) {
if (j >= Q.len()) {
- DCHECK(qhat == 0); // NOLINT(readability/check)
+ DCHECK(qhat == 0);
} else {
Q[j] = qhat;
}
diff --git a/deps/v8/src/bigint/fromstring.cc b/deps/v8/src/bigint/fromstring.cc
index a4b34a1a02..456a6d2919 100644
--- a/deps/v8/src/bigint/fromstring.cc
+++ b/deps/v8/src/bigint/fromstring.cc
@@ -13,7 +13,7 @@ namespace bigint {
void ProcessorImpl::FromStringClassic(RWDigits Z,
FromStringAccumulator* accumulator) {
// We always have at least one part to process.
- DCHECK(accumulator->stack_parts_used_ > 0); // NOLINT(readability/check)
+ DCHECK(accumulator->stack_parts_used_ > 0);
Z[0] = accumulator->stack_parts_[0];
RWDigits already_set(Z, 0, 1);
for (int i = 1; i < Z.len(); i++) Z[i] = 0;
@@ -89,7 +89,7 @@ void ProcessorImpl::FromStringClassic(RWDigits Z,
void ProcessorImpl::FromStringLarge(RWDigits Z,
FromStringAccumulator* accumulator) {
int num_parts = static_cast<int>(accumulator->heap_parts_.size());
- DCHECK(num_parts >= 2); // NOLINT(readability/check)
+ DCHECK(num_parts >= 2);
DCHECK(Z.len() >= num_parts);
RWDigits parts(accumulator->heap_parts_.data(), num_parts);
Storage multipliers_storage(num_parts);
@@ -160,7 +160,7 @@ void ProcessorImpl::FromStringLarge(RWDigits Z,
Multiply(p_out, p_in, m_in2);
if (should_terminate()) return;
digit_t overflow = AddAndReturnOverflow(p_out, p_in2);
- DCHECK(overflow == 0); // NOLINT(readability/check)
+ DCHECK(overflow == 0);
USE(overflow);
// m[j] = m[i] * m[i+1]
if (i > 0) {
@@ -240,7 +240,7 @@ void ProcessorImpl::FromStringLarge(RWDigits Z,
void ProcessorImpl::FromStringBasePowerOfTwo(
RWDigits Z, FromStringAccumulator* accumulator) {
const int num_parts = accumulator->ResultLength();
- DCHECK(num_parts >= 1); // NOLINT(readability/check)
+ DCHECK(num_parts >= 1);
DCHECK(Z.len() >= num_parts);
Digits parts(accumulator->heap_parts_.size() > 0
? accumulator->heap_parts_.data()
@@ -259,7 +259,7 @@ void ProcessorImpl::FromStringBasePowerOfTwo(
// If the last part is fully populated, then all parts must be, and we can
// simply copy them (in reversed order).
if (unused_last_part_bits == 0) {
- DCHECK(kDigitBits % char_bits == 0); // NOLINT(readability/check)
+ DCHECK(kDigitBits % char_bits == 0);
while (part_index >= 0) {
Z[z_index++] = parts[part_index--];
}
diff --git a/deps/v8/src/bigint/mul-fft.cc b/deps/v8/src/bigint/mul-fft.cc
index 9c297c00df..3c255f48ad 100644
--- a/deps/v8/src/bigint/mul-fft.cc
+++ b/deps/v8/src/bigint/mul-fft.cc
@@ -183,7 +183,7 @@ void ShiftModFn(digit_t* result, const digit_t* input, int power_of_two, int K,
// The modulo-reduction amounts to a subtraction, which we combine
// with the shift as follows:
// input = [ iK ][iK-1] .... .... [ i1 ][ i0 ]
- // result = [iX-1] .... [ i0 ] <<<<<<<<<<< shift by {power_of_two}
+ // result = [iX-1] .... [ i0 ] <---------- shift by {power_of_two}
// - [ iK ] .... [ iX ]
// where "X" is the index "K - digit_shift".
int digit_shift = power_of_two / kDigitBits;
@@ -207,7 +207,7 @@ void ShiftModFn(digit_t* result, const digit_t* input, int power_of_two, int K,
}
// Any remaining work can hard-code the knowledge that input[i] == 0.
for (; i < K - digit_shift; i++) {
- DCHECK(input[i] == 0); // NOLINT(readability/check)
+ DCHECK(input[i] == 0);
result[i + digit_shift] = 0;
}
// Second phase: subtract input digits [iX] to [iK] from (virtually) zero-
@@ -219,7 +219,7 @@ void ShiftModFn(digit_t* result, const digit_t* input, int power_of_two, int K,
}
// Any remaining work can hard-code the knowledge that input[i] == 0.
for (; i < K; i++) {
- DCHECK(input[i] == 0); // NOLINT(readability/check)
+ DCHECK(input[i] == 0);
result[i - K + digit_shift] = digit_sub(0, borrow, &borrow);
}
// Last step: subtract [iK] from [i0] and store at result index digit_shift.
@@ -238,7 +238,7 @@ void ShiftModFn(digit_t* result, const digit_t* input, int power_of_two, int K,
}
// Any remaining work can hard-code the knowledge that input[i] == 0.
for (; i < K - digit_shift; i++) {
- DCHECK(input[i] == 0); // NOLINT(readability/check)
+ DCHECK(input[i] == 0);
result[i + digit_shift] = carry;
carry = 0;
}
@@ -252,13 +252,13 @@ void ShiftModFn(digit_t* result, const digit_t* input, int power_of_two, int K,
}
// Any remaining work can hard-code the knowledge that input[i] == 0.
if (i < K) {
- DCHECK(input[i] == 0); // NOLINT(readability/check)
+ DCHECK(input[i] == 0);
result[i - K + digit_shift] = digit_sub2(0, carry, borrow, &borrow);
carry = 0;
i++;
}
for (; i < K; i++) {
- DCHECK(input[i] == 0); // NOLINT(readability/check)
+ DCHECK(input[i] == 0);
result[i - K + digit_shift] = digit_sub(0, borrow, &borrow);
}
// Last step: compute result[digit_shift].
@@ -266,7 +266,7 @@ void ShiftModFn(digit_t* result, const digit_t* input, int power_of_two, int K,
result[digit_shift] = digit_sub2(
result[digit_shift], (d << bits_shift) | carry, borrow, &borrow);
// No carry left.
- DCHECK((d >> (kDigitBits - bits_shift)) == 0); // NOLINT(readability/check)
+ DCHECK((d >> (kDigitBits - bits_shift)) == 0);
}
result[K] = 0;
for (int i = digit_shift + 1; i <= K && borrow > 0; i++) {
@@ -324,8 +324,8 @@ void ComputeParameters(int N, int m, Parameters* params) {
K_tz = CountTrailingZeros(K);
}
- DCHECK(K % kDigitBits == 0); // NOLINT(readability/check)
- DCHECK(s % kDigitBits == 0); // NOLINT(readability/check)
+ DCHECK(K % kDigitBits == 0);
+ DCHECK(s % kDigitBits == 0);
params->K = K / kDigitBits;
params->s = s / kDigitBits;
params->n = n;
@@ -347,8 +347,8 @@ void ComputeParameters_Inner(int N, Parameters* params) {
K = RoundUp(K, n); // ...and a multiple of n and kDigitBits.
K = RoundUp(K, kDigitBits);
params->r = K >> m; // Which multiple?
- DCHECK(K % kDigitBits == 0); // NOLINT(readability/check)
- DCHECK(s % kDigitBits == 0); // NOLINT(readability/check)
+ DCHECK(K % kDigitBits == 0);
+ DCHECK(s % kDigitBits == 0);
params->K = K / kDigitBits;
params->s = s / kDigitBits;
params->n = n;
@@ -502,7 +502,7 @@ void FFTContainer::Start_Default(Digits X, int chunk_size, int theta,
// corner case where X[n_ * chunk_size] == 1. Detect that case, and handle
// the extra bit as part of the last chunk; we always have the space.
if (i == n_ - 1 && len == chunk_size + 1) {
- DCHECK(X[n_ * chunk_size] <= 1); // NOLINT(readability/check)
+ DCHECK(X[n_ * chunk_size] <= 1);
DCHECK(length_ >= chunk_size + 1);
chunk_size++;
}
@@ -517,7 +517,7 @@ void FFTContainer::Start_Default(Digits X, int chunk_size, int theta,
pointer += chunk_size;
len -= chunk_size;
}
- DCHECK(len == 0); // NOLINT(readability/check)
+ DCHECK(len == 0);
for (; i < n_; i++) {
memset(part_[i], 0, part_length_in_bytes);
}
@@ -531,7 +531,7 @@ void FFTContainer::Start(Digits X, int chunk_size, int theta, int omega) {
if (len > n_ * chunk_size / 2) {
return Start_Default(X, chunk_size, theta, omega);
}
- DCHECK(theta == 0); // NOLINT(readability/check)
+ DCHECK(theta == 0);
const digit_t* pointer = X.digits();
const size_t part_length_in_bytes = length_ * sizeof(digit_t);
int nhalf = n_ / 2;
@@ -562,7 +562,7 @@ void FFTContainer::Start(Digits X, int chunk_size, int theta, int omega) {
// need as input for the "DIT" aka "decimation in time" backwards transform.
void FFTContainer::FFT_ReturnShuffledThreadsafe(int start, int len, int omega,
digit_t* temp) {
- DCHECK((len & 1) == 0); // {len} must be even. NOLINT(readability/check)
+ DCHECK((len & 1) == 0); // {len} must be even.
int half = len / 2;
SumDiff(part_[start], part_[start + half], part_[start], part_[start + half],
length_);
@@ -592,7 +592,7 @@ void FFTContainer::BackwardFFT(int start, int len, int omega) {
void FFTContainer::BackwardFFT_Threadsafe(int start, int len, int omega,
digit_t* temp) {
- DCHECK((len & 1) == 0); // {len} must be even. NOLINT(readability/check)
+ DCHECK((len & 1) == 0); // {len} must be even.
int half = len / 2;
// Don't recurse for half == 2, as PointwiseMultiply already performed
// the first level of the backwards FFT.
@@ -626,7 +626,7 @@ void FFTContainer::NormalizeAndRecombine(int omega, int m, RWDigits Z,
Z[zi] = digit_add3(Z[zi], temp_[j], carry, &carry);
}
for (; j < length_; j++) {
- DCHECK(temp_[j] == 0); // NOLINT(readability/check)
+ DCHECK(temp_[j] == 0);
}
if (carry != 0) {
DCHECK(zi < Z.len());
@@ -654,7 +654,7 @@ void FFTContainer::CounterWeightAndRecombine(int theta, int m, RWDigits Z,
for (int k = 0; k < n_; k++, z_index += s) {
int shift = -theta * k - m;
if (shift < 0) shift += 2 * n_ * theta;
- DCHECK(shift >= 0); // NOLINT(readability/check)
+ DCHECK(shift >= 0);
digit_t* input = part_[k];
ShiftModFn(temp_, input, shift, K_);
int remaining_z = Z.len() - z_index;
@@ -679,7 +679,7 @@ void FFTContainer::CounterWeightAndRecombine(int theta, int m, RWDigits Z,
digit_t d = digit_sub2(1, temp_[i], borrow_Fn, &borrow_Fn);
Z[z_index + i] = digit_sub2(Z[z_index + i], d, borrow_z, &borrow_z);
}
- DCHECK(borrow_Fn == 0); // NOLINT(readability/check)
+ DCHECK(borrow_Fn == 0);
for (; borrow_z > 0 && i < remaining_z; i++) {
Z[z_index + i] = digit_sub(Z[z_index + i], borrow_z, &borrow_z);
}
@@ -690,7 +690,7 @@ void FFTContainer::CounterWeightAndRecombine(int theta, int m, RWDigits Z,
Z[z_index + i] = digit_add3(Z[z_index + i], temp_[i], carry, &carry);
}
for (; i < length_; i++) {
- DCHECK(temp_[i] == 0); // NOLINT(readability/check)
+ DCHECK(temp_[i] == 0);
}
for (; carry > 0 && i < remaining_z; i++) {
Z[z_index + i] = digit_add2(Z[z_index + i], carry, &carry);
diff --git a/deps/v8/src/bigint/mul-karatsuba.cc b/deps/v8/src/bigint/mul-karatsuba.cc
index d4b5a58383..3fdda20aeb 100644
--- a/deps/v8/src/bigint/mul-karatsuba.cc
+++ b/deps/v8/src/bigint/mul-karatsuba.cc
@@ -82,7 +82,7 @@ void KaratsubaSubtractionHelper(RWDigits result, Digits X, Digits Y,
for (; i < X.len(); i++) {
result[i] = digit_sub(X[i], borrow, &borrow);
}
- DCHECK(borrow == 0); // NOLINT(readability/check)
+ DCHECK(borrow == 0);
for (; i < result.len(); i++) result[i] = 0;
}
@@ -160,7 +160,7 @@ void ProcessorImpl::KaratsubaMain(RWDigits Z, Digits X, Digits Y,
}
}
DCHECK(scratch.len() >= 4 * n);
- DCHECK((n & 1) == 0); // NOLINT(readability/check)
+ DCHECK((n & 1) == 0);
int n2 = n >> 1;
Digits X0(X, 0, n2);
Digits X1(X, n2, n2);
@@ -178,7 +178,7 @@ void ProcessorImpl::KaratsubaMain(RWDigits Z, Digits X, Digits Y,
int end = std::min(Z2.len(), P2.len());
for (int i = 0; i < end; i++) Z2[i] = P2[i];
for (int i = end; i < n; i++) {
- DCHECK(P2[i] == 0); // NOLINT(readability/check)
+ DCHECK(P2[i] == 0);
}
// The intermediate result can be one digit too large; the subtraction
// below will fix this.
@@ -197,7 +197,7 @@ void ProcessorImpl::KaratsubaMain(RWDigits Z, Digits X, Digits Y,
overflow -= SubAndReturnBorrow(Z + n2, P1);
}
// The intermediate result may have been bigger, but the final result fits.
- DCHECK(overflow == 0); // NOLINT(readability/check)
+ DCHECK(overflow == 0);
USE(overflow);
}
diff --git a/deps/v8/src/bigint/mul-schoolbook.cc b/deps/v8/src/bigint/mul-schoolbook.cc
index 9222e1e675..27a3a24311 100644
--- a/deps/v8/src/bigint/mul-schoolbook.cc
+++ b/deps/v8/src/bigint/mul-schoolbook.cc
@@ -11,7 +11,7 @@ namespace bigint {
// Z := X * y, where y is a single digit.
void ProcessorImpl::MultiplySingle(RWDigits Z, Digits X, digit_t y) {
- DCHECK(y != 0); // NOLINT(readability/check)
+ DCHECK(y != 0);
digit_t carry = 0;
digit_t high = 0;
for (int i = 0; i < X.len(); i++) {
@@ -87,7 +87,7 @@ void ProcessorImpl::MultiplySchoolbook(RWDigits Z, Digits X, Digits Y) {
}
// Write the last digit, and zero out any extra space in Z.
Z[i++] = digit_add2(next, carry, &carry);
- DCHECK(carry == 0); // NOLINT(readability/check)
+ DCHECK(carry == 0);
for (; i < Z.len(); i++) Z[i] = 0;
}
diff --git a/deps/v8/src/bigint/tostring.cc b/deps/v8/src/bigint/tostring.cc
index 51fb75957a..0447ce0c22 100644
--- a/deps/v8/src/bigint/tostring.cc
+++ b/deps/v8/src/bigint/tostring.cc
@@ -56,7 +56,7 @@ constexpr digit_t digit_pow_rec(digit_t base, digit_t exponent) {
template <int radix>
char* BasecaseFixedLast(digit_t chunk, char* out) {
while (chunk != 0) {
- DCHECK(*(out - 1) == kStringZapValue); // NOLINT(readability/check)
+ DCHECK(*(out - 1) == kStringZapValue);
if (radix <= 10) {
*(--out) = '0' + (chunk % radix);
} else {
@@ -94,7 +94,7 @@ char* DivideByMagic(RWDigits rest, Digits input, char* output) {
}
// {remainder} is now the current chunk to be written out.
for (int i = 0; i < chunk_chars; i++) {
- DCHECK(*(output - 1) == kStringZapValue); // NOLINT(readability/check)
+ DCHECK(*(output - 1) == kStringZapValue);
if (radix <= 10) {
*(--output) = '0' + (remainder % radix);
} else {
@@ -102,7 +102,7 @@ char* DivideByMagic(RWDigits rest, Digits input, char* output) {
}
remainder /= radix;
}
- DCHECK(remainder == 0); // NOLINT(readability/check)
+ DCHECK(remainder == 0);
return output;
}
@@ -182,7 +182,7 @@ class ToStringFormatter {
char* BasecaseLast(digit_t digit, char* out) {
if (radix_ == 10) return BasecaseFixedLast<10>(digit, out);
do {
- DCHECK(*(out - 1) == kStringZapValue); // NOLINT(readability/check)
+ DCHECK(*(out - 1) == kStringZapValue);
*(--out) = kConversionChars[digit % radix_];
digit /= radix_;
} while (digit > 0);
@@ -193,11 +193,11 @@ class ToStringFormatter {
// same number of characters (as many '0' as necessary).
char* BasecaseMiddle(digit_t digit, char* out) {
for (int i = 0; i < chunk_chars_; i++) {
- DCHECK(*(out - 1) == kStringZapValue); // NOLINT(readability/check)
+ DCHECK(*(out - 1) == kStringZapValue);
*(--out) = kConversionChars[digit % radix_];
digit /= radix_;
}
- DCHECK(digit == 0); // NOLINT(readability/check)
+ DCHECK(digit == 0);
return out;
}
@@ -221,7 +221,7 @@ void ToStringFormatter::Start() {
chunk_chars_ = kDigitBits * kBitsPerCharTableMultiplier / max_bits_per_char_;
chunk_divisor_ = digit_pow(radix_, chunk_chars_);
// By construction of chunk_chars_, there can't have been overflow.
- DCHECK(chunk_divisor_ != 0); // NOLINT(readability/check)
+ DCHECK(chunk_divisor_ != 0);
}
int ToStringFormatter::Finish() {
@@ -411,7 +411,7 @@ void RecursionLevel::ComputeInverse(ProcessorImpl* processor,
}
Digits RecursionLevel::GetInverse(int dividend_length) {
- DCHECK(inverse_.len() != 0); // NOLINT(readability/check)
+ DCHECK(inverse_.len() != 0);
int inverse_len = dividend_length - divisor_.len();
DCHECK(inverse_len <= inverse_.len());
return inverse_ + (inverse_.len() - inverse_len);
@@ -484,7 +484,7 @@ char* ToStringFormatter::ProcessLevel(RecursionLevel* level, Digits chunk,
chunk = original_chunk;
out = ProcessLevel(level->next_, chunk, out, is_last_on_level);
} else {
- DCHECK(comparison == 0); // NOLINT(readability/check)
+ DCHECK(comparison == 0);
// If the chunk is equal to the divisor, we know that the right half
// is all '0', and the left half is '...0001'.
// Handling this case specially is an optimization; we could also
diff --git a/deps/v8/src/bigint/vector-arithmetic.cc b/deps/v8/src/bigint/vector-arithmetic.cc
index 9bbea3873e..0cd65589c8 100644
--- a/deps/v8/src/bigint/vector-arithmetic.cc
+++ b/deps/v8/src/bigint/vector-arithmetic.cc
@@ -68,7 +68,7 @@ void Subtract(RWDigits Z, Digits X, Digits Y) {
for (; i < X.len(); i++) {
Z[i] = digit_sub(X[i], borrow, &borrow);
}
- DCHECK(borrow == 0); // NOLINT(readability/check)
+ DCHECK(borrow == 0);
for (; i < Z.len(); i++) Z[i] = 0;
}
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index a3a2209f9f..00f1009610 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -209,8 +209,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r4);
- __ JumpIfIsInRange(r4, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver);
+ __ JumpIfIsInRange(
+ r4, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
+ static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r4,
@@ -892,7 +894,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
- __ cmp_raw_immediate(actual_marker, expected_marker);
+ __ cmp_raw_immediate(actual_marker, static_cast<int>(expected_marker));
__ b(ne, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 0cb79c1f04..b75ffcc065 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -75,7 +75,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
}
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ JumpCodeObject(x2);
+ __ JumpCodeTObject(x2);
}
namespace {
@@ -253,8 +253,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
- __ JumpIfIsInRange(w4, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver);
+ __ JumpIfIsInRange(
+ w4, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
+ static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, x4,
@@ -1083,7 +1085,8 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
- __ CompareAndBranch(actual_marker, Operand(expected_marker), ne, &no_match);
+ __ CompareAndBranch(actual_marker, Operand(static_cast<int>(expected_marker)),
+ ne, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -1891,10 +1894,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Ldr(kJavaScriptCallCodeStartRegister,
MemOperand(kInterpreterDispatchTableRegister, x1));
- UseScratchRegisterScope temps(masm);
- temps.Exclude(x17);
- __ Mov(x17, kJavaScriptCallCodeStartRegister);
- __ Jump(x17);
+ {
+ UseScratchRegisterScope temps(masm);
+ temps.Exclude(x17);
+ __ Mov(x17, kJavaScriptCallCodeStartRegister);
+ __ Jump(x17);
+ }
__ Bind(&return_from_bytecode_dispatch);
@@ -1932,8 +1937,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Bind(&trampoline_loaded);
- __ Add(x17, x1, Operand(interpreter_entry_return_pc_offset.value()));
- __ Br(x17);
+ {
+ UseScratchRegisterScope temps(masm);
+ temps.Exclude(x17);
+ __ Add(x17, x1, Operand(interpreter_entry_return_pc_offset.value()));
+ __ Br(x17);
+ }
}
void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/array-from.tq b/deps/v8/src/builtins/array-from.tq
index 5fcdefccc3..f1783b58f6 100644
--- a/deps/v8/src/builtins/array-from.tq
+++ b/deps/v8/src/builtins/array-from.tq
@@ -110,9 +110,9 @@ ArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(...arguments):
try {
mappedValue =
Call(context, UnsafeCast<Callable>(mapfn), thisArg, nextValue, k);
- } catch (e) {
+ } catch (e, message) {
iterator::IteratorCloseOnException(iteratorRecord);
- ReThrow(context, e);
+ ReThrowWithMessage(context, e, message);
}
} else {
mappedValue = nextValue;
@@ -123,9 +123,9 @@ ArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(...arguments):
// return ? IteratorClose(iteratorRecord, defineStatus).
try {
FastCreateDataProperty(a, k, mappedValue);
- } catch (e) deferred {
+ } catch (e, message) deferred {
iterator::IteratorCloseOnException(iteratorRecord);
- ReThrow(context, e);
+ ReThrowWithMessage(context, e, message);
}
// x. Set k to k + 1.
k += 1;
diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq
index 12988af2a2..a4bf6f002d 100644
--- a/deps/v8/src/builtins/array-join.tq
+++ b/deps/v8/src/builtins/array-join.tq
@@ -537,9 +537,9 @@ transitioning macro CycleProtectedArrayJoin<T: type>(
ArrayJoin<T>(useToLocaleString, o, sep, len, locales, options);
JoinStackPopInline(o);
return result;
- } catch (e) deferred {
+ } catch (e, message) deferred {
JoinStackPopInline(o);
- ReThrow(context, e);
+ ReThrowWithMessage(context, e, message);
}
} else {
return kEmptyString;
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index 3726207e1d..69e9faef53 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -231,8 +231,6 @@ extern class ObjectBoilerplateDescription extends FixedArray;
extern class ClosureFeedbackCellArray extends FixedArray;
extern class ScriptContextTable extends FixedArray;
-type LayoutDescriptor extends ByteArray
- generates 'TNode<LayoutDescriptor>';
extern class TransitionArray extends WeakFixedArray;
extern operator '.length_intptr' macro LoadAndUntagWeakFixedArrayLength(
@@ -777,7 +775,9 @@ macro Equal(implicit context: Context)(left: JSAny, right: JSAny): Boolean {
extern macro StrictEqual(JSAny, JSAny): Boolean;
extern macro SmiLexicographicCompare(Smi, Smi): Smi;
-extern runtime ReThrow(Context, JSAny): never;
+
+extern runtime ReThrowWithMessage(
+ Context, JSAny, TheHole | JSMessageObject): never;
extern runtime Throw(implicit context: Context)(JSAny): never;
extern runtime ThrowInvalidStringLength(Context): never;
@@ -952,7 +952,6 @@ extern operator '+' macro ConstexprInt32Add(
extern operator '*' macro ConstexprInt31Mul(
constexpr int31, constexpr int31): constexpr int31;
extern operator '-' macro Int32Sub(int16, int16): int32;
-extern operator '-' macro Int32Sub(uint16, uint16): int32;
extern operator '-' macro Int32Sub(int32, int32): int32;
extern operator '-' macro Uint32Sub(uint32, uint32): uint32;
extern operator '*' macro Int32Mul(int32, int32): int32;
@@ -1871,6 +1870,8 @@ extern macro FeedbackIteratorHandlerOffset(): intptr;
extern operator '[]' macro LoadWeakFixedArrayElement(
WeakFixedArray, intptr): MaybeObject;
+extern operator '[]' macro LoadUint8Ptr(RawPtr<uint8>, intptr): uint8;
+
const kNoHashSentinel:
constexpr int32 generates 'PropertyArray::kNoHashSentinel';
extern macro LoadNameHash(Name): uint32;
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 039f4ade69..1373e66397 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -265,6 +265,7 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
Label after_debug_hook(this), call_debug_hook(this, Label::kDeferred);
GotoIf(HasAsyncEventDelegate(), &call_debug_hook);
+ GotoIf(IsDebugActive(), &call_debug_hook);
Goto(&after_debug_hook);
BIND(&after_debug_hook);
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 0adb95ad43..08cea2e74e 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -283,9 +283,8 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(
// which almost doubles the size of `await` builtins (unnecessarily).
TNode<Smi> builtin_id = LoadObjectField<Smi>(
shared_info, SharedFunctionInfo::kFunctionDataOffset);
- TNode<Code> code = LoadBuiltin(builtin_id);
- StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset,
- ToCodeT(code));
+ TNode<CodeT> code = LoadBuiltin(builtin_id);
+ StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code);
}
TNode<JSFunction> AsyncBuiltinsAssembler::CreateUnwrapClosure(
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 87c1d443a6..384fba3375 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -381,7 +381,6 @@ TF_BUILTIN(AsyncGeneratorAwaitCaught, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
- using Descriptor = AsyncGeneratorResumeNextDescriptor;
const auto generator =
Parameter<JSAsyncGeneratorObject>(Descriptor::kGenerator);
const auto context = Parameter<Context>(Descriptor::kContext);
@@ -542,7 +541,6 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) {
- using Descriptor = AsyncGeneratorRejectDescriptor;
const auto generator =
Parameter<JSAsyncGeneratorObject>(Descriptor::kGenerator);
const auto value = Parameter<Object>(Descriptor::kValue);
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index f4885efed8..818b8373de 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -333,8 +333,11 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
}
BIND(&if_exception);
{
+ TNode<HeapObject> message = GetPendingMessage();
+ SetPendingMessage(TheHoleConstant());
IteratorCloseOnException(context, iterator);
- CallRuntime(Runtime::kReThrow, context, var_exception.value());
+ CallRuntime(Runtime::kReThrowWithMessage, context, var_exception.value(),
+ message);
Unreachable();
}
BIND(&exit);
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 28af8bfabc..9fff2f4911 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -254,11 +254,9 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
shared_function_info);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
- Handle<Code> lazy_builtin_handle = BUILTIN_CODE(isolate(), CompileLazy);
- // TODO(v8:11880): support embedding of CodeDataContainers.
- TNode<Code> lazy_builtin = HeapConstant(lazy_builtin_handle);
- StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset,
- ToCodeT(lazy_builtin));
+ TNode<CodeT> lazy_builtin =
+ HeapConstant(BUILTIN_CODET(isolate(), CompileLazy));
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin);
Return(result);
}
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 7bd277beaf..bab7ba4eeb 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -21,6 +21,7 @@ namespace internal {
BUILTIN(DataViewConstructor) {
const char* const kMethodName = "DataView constructor";
HandleScope scope(isolate);
+ // 1. If NewTarget is undefined, throw a TypeError exception.
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
@@ -55,8 +56,8 @@ BUILTIN(DataViewConstructor) {
kMethodName)));
}
- // 5. Let bufferByteLength be buffer.[[ArrayBufferByteLength]].
- size_t const buffer_byte_length = array_buffer->byte_length();
+ // 5. Let bufferByteLength be ArrayBufferByteLength(buffer, SeqCst).
+ size_t buffer_byte_length = array_buffer->GetByteLength();
// 6. If offset > bufferByteLength, throw a RangeError exception.
if (view_byte_offset > buffer_byte_length) {
@@ -64,15 +65,22 @@ BUILTIN(DataViewConstructor) {
isolate, NewRangeError(MessageTemplate::kInvalidOffset, byte_offset));
}
+ // 7. Let bufferIsResizable be IsResizableArrayBuffer(buffer).
+ // 8. Let byteLengthChecked be empty.
+ // 9. If bufferIsResizable is true and byteLength is undefined, then
+ // a. Let viewByteLength be auto.
+ // 10. Else if byteLength is undefined, then
+ // a. Let viewByteLength be bufferByteLength - offset.
size_t view_byte_length;
+ bool length_tracking = false;
if (byte_length->IsUndefined(isolate)) {
- // 7. If byteLength is undefined, then
- // a. Let viewByteLength be bufferByteLength - offset.
view_byte_length = buffer_byte_length - view_byte_offset;
+ length_tracking = array_buffer->is_resizable();
} else {
- // 8. Else,
- // a. Let viewByteLength be ? ToIndex(byteLength).
- // b. If offset+viewByteLength > bufferByteLength, throw a
+ // 11. Else,
+ // a. Set byteLengthChecked be ? ToIndex(byteLength).
+ // b. Let viewByteLength be byteLengthChecked.
+ // c. If offset + viewByteLength > bufferByteLength, throw a
// RangeError exception.
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, byte_length,
@@ -85,9 +93,9 @@ BUILTIN(DataViewConstructor) {
view_byte_length = byte_length->Number();
}
- // 9. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
- // "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
- // [[ByteLength]], [[ByteOffset]]»).
+ // 12. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
+ // "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
+ // [[ByteLength]], [[ByteOffset]]»).
Handle<JSObject> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -97,26 +105,30 @@ BUILTIN(DataViewConstructor) {
// TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
data_view->SetEmbedderField(i, Smi::zero());
}
+ data_view->set_bit_field(0);
+ data_view->set_is_backed_by_rab(array_buffer->is_resizable() &&
+ !array_buffer->is_shared());
+ data_view->set_is_length_tracking(length_tracking);
- // We have to set the internal slots before the detached check on step 10 or
+ // We have to set the internal slots before the checks on steps 13 - 17 or
// the TorqueGeneratedClassVerifier ended up complaining that the slot is
// empty or invalid on heap teardown.
- // The result object is not observable from JavaScript when step 10 early
- // aborts so it is fine to set internal slots here.
+ // The result object is not observable from JavaScript when steps 13 - 17
+ // early abort so it is fine to set internal slots here.
- // 11. Set O.[[ViewedArrayBuffer]] to buffer.
+ // 18. Set O.[[ViewedArrayBuffer]] to buffer.
data_view->set_buffer(*array_buffer);
- // 12. Set O.[[ByteLength]] to viewByteLength.
- data_view->set_byte_length(view_byte_length);
+ // 19. Set O.[[ByteLength]] to viewByteLength.
+ data_view->set_byte_length(length_tracking ? 0 : view_byte_length);
- // 13. Set O.[[ByteOffset]] to offset.
+ // 20. Set O.[[ByteOffset]] to offset.
data_view->set_byte_offset(view_byte_offset);
data_view->set_data_pointer(
isolate,
static_cast<uint8_t*>(array_buffer->backing_store()) + view_byte_offset);
- // 10. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ // 13. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
if (array_buffer->was_detached()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kDetachedOperation,
@@ -124,7 +136,27 @@ BUILTIN(DataViewConstructor) {
kMethodName)));
}
- // 14. Return O.
+ // 14. Let getBufferByteLength be
+ // MakeIdempotentArrayBufferByteLengthGetter(SeqCst).
+ // 15. Set bufferByteLength be getBufferByteLength(buffer).
+ buffer_byte_length = array_buffer->GetByteLength();
+
+ // 16. If offset > bufferByteLength, throw a RangeError exception.
+ if (view_byte_offset > buffer_byte_length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidOffset, byte_offset));
+ }
+
+ // 17. If byteLengthChecked is not empty, then
+ // a. If offset + viewByteLength > bufferByteLength, throw a RangeError
+ // exception.
+ if (!length_tracking &&
+ view_byte_offset + view_byte_length > buffer_byte_length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
+ }
+
+ // 21. Return O.
return *result;
}
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index cb264279d5..c1264891f6 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -445,12 +445,12 @@ BUILTIN(DatePrototypeSetMinutes) {
// ES6 section 20.3.4.25 Date.prototype.setMonth ( month, date )
BUILTIN(DatePrototypeSetMonth) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setMonth");
+ CHECK_RECEIVER(JSDate, this_date, "Date.prototype.setMonth");
int const argc = args.length() - 1;
Handle<Object> month = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month,
Object::ToNumber(isolate, month));
- double time_val = date->value().Number();
+ double time_val = this_date->value().Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
@@ -468,7 +468,7 @@ BUILTIN(DatePrototypeSetMonth) {
}
time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
}
- return SetLocalDateValue(isolate, date, time_val);
+ return SetLocalDateValue(isolate, this_date, time_val);
}
// ES6 section 20.3.4.26 Date.prototype.setSeconds ( sec, ms )
@@ -662,12 +662,12 @@ BUILTIN(DatePrototypeSetUTCMinutes) {
// ES6 section 20.3.4.31 Date.prototype.setUTCMonth ( month, date )
BUILTIN(DatePrototypeSetUTCMonth) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMonth");
+ CHECK_RECEIVER(JSDate, this_date, "Date.prototype.setUTCMonth");
int const argc = args.length() - 1;
Handle<Object> month = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month,
Object::ToNumber(isolate, month));
- double time_val = date->value().Number();
+ double time_val = this_date->value().Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
int days = isolate->date_cache()->DaysFromTime(time_ms);
@@ -684,7 +684,7 @@ BUILTIN(DatePrototypeSetUTCMonth) {
}
time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
}
- return *JSDate::SetValue(date, DateCache::TimeClip(time_val));
+ return *JSDate::SetValue(this_date, DateCache::TimeClip(time_val));
}
// ES6 section 20.3.4.34 Date.prototype.setUTCSeconds ( sec, ms )
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index db4fc38189..538a3970d4 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -704,17 +704,29 @@ namespace internal {
\
/* Binary ops with feedback collection */ \
TFC(Add_Baseline, BinaryOp_Baseline) \
+ TFC(AddSmi_Baseline, BinaryOp_Baseline) \
TFC(Subtract_Baseline, BinaryOp_Baseline) \
+ TFC(SubtractSmi_Baseline, BinaryOp_Baseline) \
TFC(Multiply_Baseline, BinaryOp_Baseline) \
+ TFC(MultiplySmi_Baseline, BinaryOp_Baseline) \
TFC(Divide_Baseline, BinaryOp_Baseline) \
+ TFC(DivideSmi_Baseline, BinaryOp_Baseline) \
TFC(Modulus_Baseline, BinaryOp_Baseline) \
+ TFC(ModulusSmi_Baseline, BinaryOp_Baseline) \
TFC(Exponentiate_Baseline, BinaryOp_Baseline) \
+ TFC(ExponentiateSmi_Baseline, BinaryOp_Baseline) \
TFC(BitwiseAnd_Baseline, BinaryOp_Baseline) \
+ TFC(BitwiseAndSmi_Baseline, BinaryOp_Baseline) \
TFC(BitwiseOr_Baseline, BinaryOp_Baseline) \
+ TFC(BitwiseOrSmi_Baseline, BinaryOp_Baseline) \
TFC(BitwiseXor_Baseline, BinaryOp_Baseline) \
+ TFC(BitwiseXorSmi_Baseline, BinaryOp_Baseline) \
TFC(ShiftLeft_Baseline, BinaryOp_Baseline) \
+ TFC(ShiftLeftSmi_Baseline, BinaryOp_Baseline) \
TFC(ShiftRight_Baseline, BinaryOp_Baseline) \
+ TFC(ShiftRightSmi_Baseline, BinaryOp_Baseline) \
TFC(ShiftRightLogical_Baseline, BinaryOp_Baseline) \
+ TFC(ShiftRightLogicalSmi_Baseline, BinaryOp_Baseline) \
\
TFC(Add_WithFeedback, BinaryOp_WithFeedback) \
TFC(Subtract_WithFeedback, BinaryOp_WithFeedback) \
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index b12f1ec6ea..23350c3860 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -47,7 +47,7 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
IncrementalStringBuilder builder(isolate);
builder.AppendCharacter('(');
builder.AppendCString(token);
- builder.AppendCString(" anonymous(");
+ builder.AppendCStringLiteral(" anonymous(");
if (argc > 1) {
for (int i = 1; i < argc; ++i) {
if (i > 1) builder.AppendCharacter(',');
@@ -60,14 +60,14 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
}
builder.AppendCharacter('\n');
parameters_end_pos = builder.Length();
- builder.AppendCString(") {\n");
+ builder.AppendCStringLiteral(") {\n");
if (argc > 0) {
Handle<String> body;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, body, Object::ToString(isolate, args.at(argc)), Object);
builder.AppendString(body);
}
- builder.AppendCString("\n})");
+ builder.AppendCStringLiteral("\n})");
ASSIGN_RETURN_ON_EXCEPTION(isolate, source, builder.Finish(), Object);
}
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index dc5a49640e..4777983a4e 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -110,8 +110,9 @@ TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
BIND(&tailcall_to_shared);
// Tail call into code object on the SharedFunctionInfo.
- TNode<Code> code = GetSharedFunctionInfoCode(shared);
- TailCallJSCode(code, context, function, new_target, arg_count);
+ TNode<CodeT> code = GetSharedFunctionInfoCode(shared);
+ // TODO(v8:11880): call CodeT directly.
+ TailCallJSCode(FromCodeT(code), context, function, new_target, arg_count);
}
class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
@@ -247,7 +248,8 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
void GenerationalWriteBarrier(SaveFPRegsMode fp_mode) {
Label incremental_wb(this), test_old_to_young_flags(this),
- store_buffer_exit(this), store_buffer_incremental_wb(this), next(this);
+ remembered_set_only(this), remembered_set_and_incremental_wb(this),
+ next(this);
// When incremental marking is not on, we skip cross generation pointer
// checking here, because there are checks for
@@ -257,7 +259,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
// stub, which serves as the cross generation checking.
auto slot =
UncheckedParameter<IntPtrT>(WriteBarrierDescriptor::kSlotAddress);
- Branch(IsMarking(), &test_old_to_young_flags, &store_buffer_exit);
+ Branch(IsMarking(), &test_old_to_young_flags, &remembered_set_only);
BIND(&test_old_to_young_flags);
{
@@ -274,10 +276,11 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
TNode<BoolT> object_is_young =
IsPageFlagSet(object, MemoryChunk::kIsInYoungGenerationMask);
- Branch(object_is_young, &incremental_wb, &store_buffer_incremental_wb);
+ Branch(object_is_young, &incremental_wb,
+ &remembered_set_and_incremental_wb);
}
- BIND(&store_buffer_exit);
+ BIND(&remembered_set_only);
{
TNode<IntPtrT> object = BitcastTaggedToWord(
UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
@@ -285,7 +288,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
Goto(&next);
}
- BIND(&store_buffer_incremental_wb);
+ BIND(&remembered_set_and_incremental_wb);
{
TNode<IntPtrT> object = BitcastTaggedToWord(
UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
@@ -1325,7 +1328,7 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
// On failure, tail call back to regular JavaScript by re-calling the given
// function which has been reset to the compile lazy builtin.
- // TODO(v8:11880): call CodeT instead.
+ // TODO(v8:11880): call CodeT directly.
TNode<Code> code = FromCodeT(LoadJSFunctionCode(function));
TailCallJSCode(code, context, function, new_target, arg_count);
}
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index 6fd36dd8e0..1d72a3ae32 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -238,7 +238,8 @@ Handle<JSFunction> CreateBoundFunction(Isolate* isolate,
Handle<SharedFunctionInfo> info =
isolate->factory()->NewSharedFunctionInfoForBuiltin(
- isolate->factory()->empty_string(), builtin, kNormalFunction);
+ isolate->factory()->empty_string(), builtin,
+ FunctionKind::kNormalFunction);
info->set_internal_formal_parameter_count(JSParameterCount(len));
info->set_length(len);
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 11c11b00b0..6656a37a1c 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -268,8 +268,11 @@ TNode<JSArray> IteratorBuiltinsAssembler::StringListFromIterable(
// 2. Return ? IteratorClose(iteratorRecord, error).
BIND(&if_exception);
+ TNode<HeapObject> message = GetPendingMessage();
+ SetPendingMessage(TheHoleConstant());
IteratorCloseOnException(context, iterator_record);
- CallRuntime(Runtime::kReThrow, context, var_exception.value());
+ CallRuntime(Runtime::kReThrowWithMessage, context, var_exception.value(),
+ message);
Unreachable();
}
}
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index 2ef9aa0734..5e888ba563 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -15,18 +15,18 @@ namespace v8 {
namespace internal {
void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
- TNode<Code> code, TNode<JSFunction> function) {
+ TNode<CodeT> code, TNode<JSFunction> function) {
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
-
- TailCallJSCode(code, context, function, new_target, argc);
+ // TODO(v8:11880): call CodeT directly.
+ TailCallJSCode(FromCodeT(code), context, function, new_target, argc);
}
void LazyBuiltinsAssembler::GenerateTailCallToReturnedCode(
Runtime::FunctionId function_id, TNode<JSFunction> function) {
auto context = Parameter<Context>(Descriptor::kContext);
- TNode<Code> code = CAST(CallRuntime(function_id, context, function));
+ TNode<CodeT> code = CAST(CallRuntime(function_id, context, function));
GenerateTailCallToJSCode(code, function);
}
@@ -34,7 +34,9 @@ void LazyBuiltinsAssembler::TailCallRuntimeIfMarkerEquals(
TNode<Uint32T> marker, OptimizationMarker expected_marker,
Runtime::FunctionId function_id, TNode<JSFunction> function) {
Label no_match(this);
- GotoIfNot(Word32Equal(marker, Uint32Constant(expected_marker)), &no_match);
+ GotoIfNot(Word32Equal(marker,
+ Uint32Constant(static_cast<uint32_t>(expected_marker))),
+ &no_match);
GenerateTailCallToReturnedCode(function_id, function);
BIND(&no_match);
}
@@ -78,14 +80,13 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset);
// Optimized code slot is a weak reference to CodeT object.
- TNode<CodeT> code_t = CAST(GetHeapObjectAssumeWeak(
+ TNode<CodeT> optimized_code = CAST(GetHeapObjectAssumeWeak(
maybe_optimized_code_entry, &heal_optimized_code_slot));
- TNode<Code> optimized_code = FromCodeT(code_t);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
TNode<CodeDataContainer> code_data_container =
- CodeDataContainerFromCodeT(code_t);
+ CodeDataContainerFromCodeT(optimized_code);
TNode<Int32T> code_kind_specific_flags = LoadObjectField<Int32T>(
code_data_container, CodeDataContainer::kKindSpecificFlagsOffset);
GotoIf(IsSetWord32<Code::MarkedForDeoptimizationField>(
@@ -94,10 +95,8 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
- StoreObjectField(function, JSFunction::kCodeOffset,
- ToCodeT(optimized_code, code_data_container));
+ StoreObjectField(function, JSFunction::kCodeOffset, optimized_code);
Comment("MaybeTailCallOptimizedCodeSlot:: GenerateTailCallToJSCode");
- // TODO(v8:11880): call CodeT directly.
GenerateTailCallToJSCode(optimized_code, function);
// Optimized code slot contains deoptimized code or code is cleared and
@@ -122,7 +121,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
TVARIABLE(Uint16T, sfi_data_type);
- TNode<Code> sfi_code =
+ TNode<CodeT> sfi_code =
GetSharedFunctionInfoCode(shared, &sfi_data_type, &compile_function);
TNode<HeapObject> feedback_cell_value = LoadFeedbackCellValue(function);
@@ -146,14 +145,14 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
// optimized Code object (we'd have tail-called it above). A usual case would
// be the InterpreterEntryTrampoline to start executing existing bytecode.
BIND(&maybe_use_sfi_code);
- CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE(
+ CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODET(
isolate(), CompileLazy))));
- StoreObjectField(function, JSFunction::kCodeOffset, ToCodeT(sfi_code));
+ StoreObjectField(function, JSFunction::kCodeOffset, sfi_code);
Label tailcall_code(this);
Label baseline(this);
- TVARIABLE(Code, code);
+ TVARIABLE(CodeT, code);
// Check if we have baseline code.
GotoIf(InstanceTypeEqual(sfi_data_type.value(), CODET_TYPE), &baseline);
@@ -163,7 +162,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
BIND(&baseline);
// Ensure we have a feedback vector.
- code = Select<Code>(
+ code = Select<CodeT>(
IsFeedbackVector(feedback_cell_value), [=]() { return sfi_code; },
[=]() {
return CAST(CallRuntime(Runtime::kInstallBaselineCode,
@@ -188,12 +187,9 @@ TF_BUILTIN(CompileLazy, LazyBuiltinsAssembler) {
TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) {
auto function = Parameter<JSFunction>(Descriptor::kTarget);
- Handle<Code> compile_lazy = BUILTIN_CODE(isolate(), CompileLazy);
- TNode<Code> code = HeapConstant(compile_lazy);
+ TNode<CodeT> code = HeapConstant(BUILTIN_CODET(isolate(), CompileLazy));
// Set the code slot inside the JSFunction to CompileLazy.
- // TODO(v8:11880): support embedding of CodeDataContainer constants.
- StoreObjectField(function, JSFunction::kCodeOffset, ToCodeT(code));
- // TODO(v8:11880): call CodeT directly.
+ StoreObjectField(function, JSFunction::kCodeOffset, code);
GenerateTailCallToJSCode(code, function);
}
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.h b/deps/v8/src/builtins/builtins-lazy-gen.h
index b51dcb58d4..623811663e 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.h
+++ b/deps/v8/src/builtins/builtins-lazy-gen.h
@@ -17,7 +17,7 @@ class LazyBuiltinsAssembler : public CodeStubAssembler {
explicit LazyBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- void GenerateTailCallToJSCode(TNode<Code> code, TNode<JSFunction> function);
+ void GenerateTailCallToJSCode(TNode<CodeT> code, TNode<JSFunction> function);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id,
TNode<JSFunction> function);
diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
index ab7dcf832f..9edc8ce00c 100644
--- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -413,14 +413,23 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext(
TNode<IntPtrT> flag_data_offset =
IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset +
FlagStack::kDataOffset);
+ TNode<IntPtrT> flag_capacity_offset =
+ IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset +
+ FlagStack::kCapacityOffset);
+ TNode<IntPtrT> flag_size_offset =
+ IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset +
+ FlagStack::kSizeOffset);
+ // Ensure both stacks are in sync.
+ USE(flag_capacity_offset);
+ CSA_DCHECK(this,
+ WordEqual(capacity, Load<IntPtrT>(hsi, flag_capacity_offset)));
+ CSA_DCHECK(this, WordEqual(size, Load<IntPtrT>(hsi, flag_size_offset)));
+
TNode<RawPtrT> flag_data = Load<RawPtrT>(hsi, flag_data_offset);
StoreNoWriteBarrier(MachineRepresentation::kWord8, flag_data, size,
BoolConstant(true));
- StoreNoWriteBarrier(
- MachineType::PointerRepresentation(), hsi,
- IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset +
- FlagStack::kSizeOffset),
- new_size);
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), hsi,
+ flag_size_offset, new_size);
Goto(&done);
}
@@ -449,13 +458,11 @@ void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext(
IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset +
ContextStack::kSizeOffset);
-#ifdef ENABLE_VERIFY_CSA
- {
+ if (DEBUG_BOOL) {
TNode<IntPtrT> size = Load<IntPtrT>(hsi, size_offset);
CSA_CHECK(this, IntPtrLessThan(IntPtrConstant(0), size));
CSA_CHECK(this, IntPtrLessThanOrEqual(saved_entered_context_count, size));
}
-#endif
StoreNoWriteBarrier(MachineType::PointerRepresentation(), hsi, size_offset,
saved_entered_context_count);
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 390552836d..ef89a1badd 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -74,6 +74,35 @@ DEF_BINOP(ShiftRight_Baseline, Generate_ShiftRightWithFeedback)
DEF_BINOP(ShiftRightLogical_Baseline, Generate_ShiftRightLogicalWithFeedback)
#undef DEF_BINOP
+#define DEF_BINOP_RHS_SMI(Name, Generator) \
+ TF_BUILTIN(Name, CodeStubAssembler) { \
+ auto lhs = Parameter<Object>(Descriptor::kLeft); \
+ auto rhs = Parameter<Object>(Descriptor::kRight); \
+ auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot); \
+ \
+ BinaryOpAssembler binop_asm(state()); \
+ TNode<Object> result = binop_asm.Generator( \
+ [&]() { return LoadContextFromBaseline(); }, lhs, rhs, slot, \
+ [&]() { return LoadFeedbackVectorFromBaseline(); }, \
+ UpdateFeedbackMode::kGuaranteedFeedback, true); \
+ \
+ Return(result); \
+ }
+DEF_BINOP_RHS_SMI(AddSmi_Baseline, Generate_AddWithFeedback)
+DEF_BINOP_RHS_SMI(SubtractSmi_Baseline, Generate_SubtractWithFeedback)
+DEF_BINOP_RHS_SMI(MultiplySmi_Baseline, Generate_MultiplyWithFeedback)
+DEF_BINOP_RHS_SMI(DivideSmi_Baseline, Generate_DivideWithFeedback)
+DEF_BINOP_RHS_SMI(ModulusSmi_Baseline, Generate_ModulusWithFeedback)
+DEF_BINOP_RHS_SMI(ExponentiateSmi_Baseline, Generate_ExponentiateWithFeedback)
+DEF_BINOP_RHS_SMI(BitwiseOrSmi_Baseline, Generate_BitwiseOrWithFeedback)
+DEF_BINOP_RHS_SMI(BitwiseXorSmi_Baseline, Generate_BitwiseXorWithFeedback)
+DEF_BINOP_RHS_SMI(BitwiseAndSmi_Baseline, Generate_BitwiseAndWithFeedback)
+DEF_BINOP_RHS_SMI(ShiftLeftSmi_Baseline, Generate_ShiftLeftWithFeedback)
+DEF_BINOP_RHS_SMI(ShiftRightSmi_Baseline, Generate_ShiftRightWithFeedback)
+DEF_BINOP_RHS_SMI(ShiftRightLogicalSmi_Baseline,
+ Generate_ShiftRightLogicalWithFeedback)
+#undef DEF_BINOP_RHS_SMI
+
#define DEF_UNOP(Name, Generator) \
TF_BUILTIN(Name, CodeStubAssembler) { \
auto value = Parameter<Object>(Descriptor::kValue); \
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 3e56df803a..e8a8805453 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -152,12 +152,14 @@ TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyEnumerable(
TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindAccessor(
TNode<Uint32T> kind) {
- return Word32Equal(kind, Int32Constant(PropertyKind::kAccessor));
+ return Word32Equal(kind,
+ Int32Constant(static_cast<int>(PropertyKind::kAccessor)));
}
TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindData(
TNode<Uint32T> kind) {
- return Word32Equal(kind, Int32Constant(PropertyKind::kData));
+ return Word32Equal(kind,
+ Int32Constant(static_cast<int>(PropertyKind::kData)));
}
void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries(
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 0a75e1bebd..38dc47a122 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -47,7 +47,13 @@ TNode<IntPtrT> RegExpBuiltinsAssembler::IntPtrZero() {
// If code is a builtin, return the address to the (possibly embedded) builtin
// code entry, otherwise return the entry of the code object itself.
-TNode<RawPtrT> RegExpBuiltinsAssembler::LoadCodeObjectEntry(TNode<Code> code) {
+TNode<RawPtrT> RegExpBuiltinsAssembler::LoadCodeObjectEntry(TNode<CodeT> code) {
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ // When external code space is enabled we can load the entry point directly
+ // from the CodeT object.
+ return GetCodeEntry(code);
+ }
+
TVARIABLE(RawPtrT, var_result);
Label if_code_is_off_heap(this), out(this);
@@ -553,8 +559,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
#endif
GotoIf(TaggedIsSmi(var_code.value()), &runtime);
- // TODO(v8:11880): avoid roundtrips between cdc and code.
- TNode<Code> code = FromCodeT(CAST(var_code.value()));
+ TNode<CodeT> code = CAST(var_code.value());
Label if_success(this), if_exception(this, Label::kDeferred);
{
@@ -618,7 +623,6 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
MachineType arg8_type = type_tagged;
TNode<JSRegExp> arg8 = regexp;
- // TODO(v8:11880): avoid roundtrips between cdc and code.
TNode<RawPtrT> code_entry = LoadCodeObjectEntry(code);
// AIX uses function descriptors on CFunction calls. code_entry in this case
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index e55af65f81..ef60646314 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -21,7 +21,7 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TNode<Smi> SmiZero();
TNode<IntPtrT> IntPtrZero();
- TNode<RawPtrT> LoadCodeObjectEntry(TNode<Code> code);
+ TNode<RawPtrT> LoadCodeObjectEntry(TNode<CodeT> code);
// Allocate either a JSRegExpResult or a JSRegExpResultWithIndices (depending
// on has_indices) with the given length (the number of captures, including
diff --git a/deps/v8/src/builtins/builtins-string.tq b/deps/v8/src/builtins/builtins-string.tq
index ab2cf2696d..769b3223bc 100644
--- a/deps/v8/src/builtins/builtins-string.tq
+++ b/deps/v8/src/builtins/builtins-string.tq
@@ -253,3 +253,28 @@ builtin StringCharAt(implicit context: Context)(
return StringFromSingleCharCode(code);
}
}
+
+// Check two slices for equal content.
+// Checking from both ends simultaniously allows us to detect differences
+// quickly even when the slices share a prefix or a suffix.
+macro EqualContent<T1: type, T2: type>(
+ a: ConstSlice<T1>, b: ConstSlice<T2>): bool {
+ const length = a.length;
+ if (length != b.length) return false;
+ if (a.GCUnsafeStartPointer() == b.GCUnsafeStartPointer()) return true;
+ // This creates references to the first and last characters of the slices,
+ // which can be out-of-bounds if the slices are empty. But in this case,
+ // the references will never be accessed.
+ let aFirst = a.UncheckedAtIndex(0);
+ let bFirst = b.UncheckedAtIndex(0);
+ let aLast = a.UncheckedAtIndex(length - 1);
+ let bLast = b.UncheckedAtIndex(length - 1);
+ while (aFirst.offset <= aLast.offset) {
+ if (*aFirst != *bFirst || *aLast != *bLast) return false;
+ aFirst = unsafe::AddOffset(aFirst, 1);
+ aLast = unsafe::AddOffset(aLast, -1);
+ bFirst = unsafe::AddOffset(bFirst, 1);
+ bLast = unsafe::AddOffset(bLast, -1);
+ }
+ return true;
+}
diff --git a/deps/v8/src/builtins/builtins-temporal.cc b/deps/v8/src/builtins/builtins-temporal.cc
index bbffa68a1d..d25c769ea4 100644
--- a/deps/v8/src/builtins/builtins-temporal.cc
+++ b/deps/v8/src/builtins/builtins-temporal.cc
@@ -11,7 +11,7 @@ namespace v8 {
namespace internal {
#define TO_BE_IMPLEMENTED(id) \
- BUILTIN(id) { \
+ BUILTIN_NO_RCS(id) { \
HandleScope scope(isolate); \
UNIMPLEMENTED(); \
}
diff --git a/deps/v8/src/builtins/builtins-trace.cc b/deps/v8/src/builtins/builtins-trace.cc
index 0fd7d57e36..5403110e87 100644
--- a/deps/v8/src/builtins/builtins-trace.cc
+++ b/deps/v8/src/builtins/builtins-trace.cc
@@ -61,7 +61,7 @@ class MaybeUtf8 {
private:
void AllocateSufficientSpace(int len) {
if (len + 1 > MAX_STACK_LENGTH) {
- allocated_.reset(new uint8_t[len + 1]);
+ allocated_ = std::make_unique<uint8_t[]>(len + 1);
buf_ = allocated_.get();
}
}
@@ -72,7 +72,7 @@ class MaybeUtf8 {
// the MAX_STACK_LENGTH should be more than enough.
uint8_t* buf_;
uint8_t data_[MAX_STACK_LENGTH];
- std::unique_ptr<uint8_t> allocated_;
+ std::unique_ptr<uint8_t[]> allocated_;
};
#if !defined(V8_USE_PERFETTO)
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 60f26c63dc..00b040f03f 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -33,7 +33,7 @@ void TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
// elements.
// TODO(bmeurer,v8:4153): Rename this and maybe fix up the implementation a bit.
TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
- TNode<Context> context, TNode<UintPtrT> byte_length) {
+ TNode<Context> context) {
TNode<NativeContext> native_context = LoadNativeContext(context);
TNode<Map> map =
CAST(LoadContextElement(native_context, Context::ARRAY_BUFFER_MAP_INDEX));
@@ -49,7 +49,7 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
// Setup the ArrayBuffer.
// - Set BitField to 0.
// - Set IsExternal and IsDetachable bits of BitFieldSlot.
- // - Set the byte_length field to byte_length.
+ // - Set the byte_length field to zero.
// - Set backing_store to null/Smi(0).
// - Set extension to null.
// - Set all embedder fields to Smi(0).
@@ -64,9 +64,9 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
Int32Constant(bitfield_value));
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset,
- byte_length);
- StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBackingStoreOffset,
- PointerConstant(nullptr));
+ UintPtrConstant(0));
+ StoreCagedPointerToObject(buffer, JSArrayBuffer::kBackingStoreOffset,
+ EmptyBackingStoreBufferConstant());
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kExtensionOffset,
IntPtrConstant(0));
for (int offset = JSArrayBuffer::kHeaderSize;
@@ -127,7 +127,8 @@ TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) {
LoadJSArrayBufferViewBuffer(receiver_array);
Label variable_length(this), normal(this);
- Branch(IsVariableLengthTypedArray(receiver_array), &variable_length, &normal);
+ Branch(IsVariableLengthJSArrayBufferView(receiver_array), &variable_length,
+ &normal);
BIND(&variable_length);
{
Return(ChangeUintPtrToTagged(LoadVariableLengthJSTypedArrayByteLength(
@@ -155,8 +156,8 @@ TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
// Default to zero if the {receiver}s buffer was detached / out of bounds.
Label detached_or_oob(this), not_detached_nor_oob(this);
- IsJSTypedArrayDetachedOrOutOfBounds(CAST(receiver), &detached_or_oob,
- &not_detached_nor_oob);
+ IsJSArrayBufferViewDetachedOrOutOfBounds(CAST(receiver), &detached_or_oob,
+ &not_detached_nor_oob);
BIND(&detached_or_oob);
Return(ChangeUintPtrToTagged(UintPtrConstant(0)));
@@ -436,10 +437,10 @@ void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
TNode<IntPtrT> ptr_compr_cage_base =
IntPtrSub(full_base, Signed(ChangeUint32ToWord(compressed_base)));
// Add JSTypedArray::ExternalPointerCompensationForOnHeapArray() to offset.
+ // See JSTypedArray::AddExternalPointerCompensationForDeserialization().
DCHECK_EQ(
isolate()->cage_base(),
JSTypedArray::ExternalPointerCompensationForOnHeapArray(isolate()));
- // See JSTypedArray::SetOnHeapDataPtr() for details.
offset = Unsigned(IntPtrAdd(offset, ptr_compr_cage_base));
}
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 2807745ecb..2df46e499b 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -21,8 +21,7 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<Map> map, TNode<Smi> length,
TNode<UintPtrT> byte_offset);
- TNode<JSArrayBuffer> AllocateEmptyOnHeapBuffer(TNode<Context> context,
- TNode<UintPtrT> byte_length);
+ TNode<JSArrayBuffer> AllocateEmptyOnHeapBuffer(TNode<Context> context);
TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
TNode<BoolT> IsMockArrayBufferAllocatorFlag();
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index e219aec65d..0fdca8a089 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -79,8 +79,7 @@ class BuiltinArguments : public JavaScriptArguments {
// through the BuiltinArguments object args.
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
-#ifdef V8_RUNTIME_CALL_STATS
-#define BUILTIN(name) \
+#define BUILTIN_RCS(name) \
V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
BuiltinArguments args, Isolate* isolate); \
\
@@ -106,8 +105,7 @@ class BuiltinArguments : public JavaScriptArguments {
V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
BuiltinArguments args, Isolate* isolate)
-#else // V8_RUNTIME_CALL_STATS
-#define BUILTIN(name) \
+#define BUILTIN_NO_RCS(name) \
V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
BuiltinArguments args, Isolate* isolate); \
\
@@ -120,6 +118,11 @@ class BuiltinArguments : public JavaScriptArguments {
\
V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
BuiltinArguments args, Isolate* isolate)
+
+#ifdef V8_RUNTIME_CALL_STATS
+#define BUILTIN(name) BUILTIN_RCS(name)
+#else // V8_RUNTIME_CALL_STATS
+#define BUILTIN(name) BUILTIN_NO_RCS(name)
#endif // V8_RUNTIME_CALL_STATS
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index eb9311d0c6..66746c3b94 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -30,10 +30,10 @@ TNode<FixedArray> WasmBuiltinsAssembler::LoadTablesFromInstance(
WasmInstanceObject::kTablesOffset);
}
-TNode<FixedArray> WasmBuiltinsAssembler::LoadExternalFunctionsFromInstance(
+TNode<FixedArray> WasmBuiltinsAssembler::LoadInternalFunctionsFromInstance(
TNode<WasmInstanceObject> instance) {
return LoadObjectField<FixedArray>(
- instance, WasmInstanceObject::kWasmExternalFunctionsOffset);
+ instance, WasmInstanceObject::kWasmInternalFunctionsOffset);
}
TNode<FixedArray> WasmBuiltinsAssembler::LoadManagedObjectMapsFromInstance(
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.h b/deps/v8/src/builtins/builtins-wasm-gen.h
index ccf5bae7a1..1804957ef1 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.h
+++ b/deps/v8/src/builtins/builtins-wasm-gen.h
@@ -22,7 +22,7 @@ class WasmBuiltinsAssembler : public CodeStubAssembler {
TNode<FixedArray> LoadTablesFromInstance(TNode<WasmInstanceObject> instance);
- TNode<FixedArray> LoadExternalFunctionsFromInstance(
+ TNode<FixedArray> LoadInternalFunctionsFromInstance(
TNode<WasmInstanceObject> instance);
TNode<FixedArray> LoadManagedObjectMapsFromInstance(
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index af1e7490b0..561bca4307 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -107,7 +107,7 @@ void Builtins::TearDown() { initialized_ = false; }
const char* Builtins::Lookup(Address pc) {
// Off-heap pc's can be looked up through binary search.
- Builtin builtin = InstructionStream::TryLookupCode(isolate_, pc);
+ Builtin builtin = OffHeapInstructionStream::TryLookupCode(isolate_, pc);
if (Builtins::IsBuiltinId(builtin)) return name(builtin);
// May be called during initialization (disassembler).
@@ -194,6 +194,39 @@ Handle<Code> Builtins::code_handle(Builtin builtin) {
return Handle<Code>(location);
}
+FullObjectSlot Builtins::builtin_code_data_container_slot(Builtin builtin) {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ Address* location =
+ &isolate_->builtin_code_data_container_table()[Builtins::ToInt(builtin)];
+ return FullObjectSlot(location);
+}
+
+void Builtins::set_codet(Builtin builtin, CodeT code) {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ // TODO(v8:11880): add DCHECK_EQ(builtin, code.builtin_id()); once CodeT
+ // has respective field.
+ DCHECK(Internals::HasHeapObjectTag(code.ptr()));
+ // The given builtin may be uninitialized thus we cannot check its type here.
+ isolate_->builtin_code_data_container_table()[Builtins::ToInt(builtin)] =
+ code.ptr();
+}
+
+CodeT Builtins::codet(Builtin builtin) {
+ Address* table = V8_EXTERNAL_CODE_SPACE_BOOL
+ ? isolate_->builtin_code_data_container_table()
+ : isolate_->builtin_table();
+ Address ptr = table[Builtins::ToInt(builtin)];
+ return CodeT::cast(Object(ptr));
+}
+
+Handle<CodeT> Builtins::codet_handle(Builtin builtin) {
+ Address* table = V8_EXTERNAL_CODE_SPACE_BOOL
+ ? isolate_->builtin_code_data_container_table()
+ : isolate_->builtin_table();
+ Address* location = &table[Builtins::ToInt(builtin)];
+ return Handle<CodeT>(location);
+}
+
// static
int Builtins::GetStackParameterCount(Builtin builtin) {
DCHECK(Builtins::KindOf(builtin) == TFJ);
@@ -296,6 +329,17 @@ bool Builtins::IsBuiltinHandle(Handle<HeapObject> maybe_code,
return true;
}
+bool Builtins::IsBuiltinCodeDataContainerHandle(Handle<HeapObject> maybe_code,
+ Builtin* builtin) const {
+ Address* handle_location = maybe_code.location();
+ Address* builtins_table = isolate_->builtin_code_data_container_table();
+ if (handle_location < builtins_table) return false;
+ Address* builtins_table_end = &builtins_table[Builtins::kBuiltinCount];
+ if (handle_location >= builtins_table_end) return false;
+ *builtin = FromInt(static_cast<int>(handle_location - builtins_table));
+ return true;
+}
+
// static
bool Builtins::IsIsolateIndependentBuiltin(const Code code) {
const Builtin builtin = code.builtin_id();
@@ -373,7 +417,7 @@ class OffHeapTrampolineGenerator {
FrameScope scope(&masm_, StackFrame::NO_FRAME_TYPE);
if (type == TrampolineType::kJump) {
masm_.CodeEntry();
- masm_.JumpToInstructionStream(off_heap_entry);
+ masm_.JumpToOffHeapInstructionStream(off_heap_entry);
} else {
DCHECK_EQ(type, TrampolineType::kAbort);
masm_.Trap();
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index 79e4da840c..ddb50d3230 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -36,6 +36,13 @@ static constexpr T FirstFromVarArgs(T x, ...) noexcept {
#define BUILTIN_CODE(isolate, name) \
(isolate)->builtins()->code_handle(i::Builtin::k##name)
+#ifdef V8_EXTERNAL_CODE_SPACE
+#define BUILTIN_CODET(isolate, name) \
+ (isolate)->builtins()->codet_handle(i::Builtin::k##name)
+#else
+#define BUILTIN_CODET(isolate, name) BUILTIN_CODE(isolate, name)
+#endif // V8_EXTERNAL_CODE_SPACE
+
enum class Builtin : int32_t {
kNoBuiltinId = -1,
#define DEF_ENUM(Name, ...) k##Name,
@@ -158,10 +165,14 @@ class Builtins {
// Used by CreateOffHeapTrampolines in isolate.cc.
void set_code(Builtin builtin, Code code);
+ void set_codet(Builtin builtin, CodeT code);
V8_EXPORT_PRIVATE Code code(Builtin builtin);
V8_EXPORT_PRIVATE Handle<Code> code_handle(Builtin builtin);
+ V8_EXPORT_PRIVATE CodeT codet(Builtin builtin);
+ V8_EXPORT_PRIVATE Handle<CodeT> codet_handle(Builtin builtin);
+
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin);
V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate,
Builtin builtin);
@@ -192,6 +203,11 @@ class Builtins {
// by handle location. Similar to Heap::IsRootHandle.
bool IsBuiltinHandle(Handle<HeapObject> maybe_code, Builtin* index) const;
+ // Similar to IsBuiltinHandle but for respective CodeDataContainer handle.
+ // Can be used only when external code space is enabled.
+ bool IsBuiltinCodeDataContainerHandle(Handle<HeapObject> maybe_code,
+ Builtin* index) const;
+
// True, iff the given code object is a builtin with off-heap embedded code.
static bool IsIsolateIndependentBuiltin(const Code code);
@@ -280,6 +296,8 @@ class Builtins {
FullObjectSlot builtin_slot(Builtin builtin);
// Returns given builtin's slot in the tier0 builtin table.
FullObjectSlot builtin_tier0_slot(Builtin builtin);
+ // Returns given builtin's slot in the builtin code data container table.
+ FullObjectSlot builtin_code_data_container_slot(Builtin builtin);
private:
static void Generate_CallFunction(MacroAssembler* masm,
diff --git a/deps/v8/src/builtins/collections.tq b/deps/v8/src/builtins/collections.tq
index c0d311a825..30444ddadc 100644
--- a/deps/v8/src/builtins/collections.tq
+++ b/deps/v8/src/builtins/collections.tq
@@ -33,12 +33,9 @@ macro LoadKeyValuePairNoSideEffects(implicit context: Context)(o: JSAny):
}
}
}
- case (JSReceiver): {
+ case (JSAny): {
goto MayHaveSideEffects;
}
- case (o: JSAny): deferred {
- ThrowTypeError(MessageTemplate::kIteratorValueNotAnObject, o);
- }
}
}
@@ -48,6 +45,8 @@ transitioning macro LoadKeyValuePair(implicit context: Context)(o: JSAny):
try {
return LoadKeyValuePairNoSideEffects(o) otherwise Generic;
} label Generic {
+ const o = Cast<JSReceiver>(o)
+ otherwise ThrowTypeError(MessageTemplate::kIteratorValueNotAnObject, o);
return KeyValuePair{
key: GetProperty(o, Convert<Smi>(0)),
value: GetProperty(o, Convert<Smi>(1))
diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq
index 2a36badfb7..64c81ca572 100644
--- a/deps/v8/src/builtins/convert.tq
+++ b/deps/v8/src/builtins/convert.tq
@@ -195,6 +195,12 @@ Convert<int32, uint16>(i: uint16): int32 {
Convert<int32, char16|char8>(i: char16|char8): int32 {
return Signed(Convert<uint32>(i));
}
+Convert<intptr, char16>(i: char16): intptr {
+ return Convert<intptr, uint32>(i);
+}
+Convert<intptr, char8>(i: char8): intptr {
+ return Convert<intptr, uint32>(i);
+}
Convert<int32, uint31>(i: uint31): int32 {
return Signed(Convert<uint32>(i));
}
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
index 4acc13b223..9bc4bd5f2e 100644
--- a/deps/v8/src/builtins/data-view.tq
+++ b/deps/v8/src/builtins/data-view.tq
@@ -84,15 +84,32 @@ javascript builtin DataViewPrototypeGetBuffer(
return dataView.buffer;
}
+extern macro IsJSArrayBufferViewDetachedOrOutOfBounds(JSArrayBufferView):
+ never labels DetachedOrOutOfBounds, NotDetachedNorOutOfBounds;
+extern macro LoadVariableLengthJSArrayBufferViewByteLength(
+ JSArrayBufferView, JSArrayBuffer): uintptr labels DetachedOrOutOfBounds;
+
// ES6 section 24.2.4.2 get DataView.prototype.byteLength
javascript builtin DataViewPrototypeGetByteLength(
js-implicit context: NativeContext, receiver: JSAny)(...arguments): Number {
const dataView: JSDataView =
ValidateDataView(context, receiver, 'get DataView.prototype.byte_length');
- if (WasDetached(dataView)) {
- ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameByteLength);
+ if (IsVariableLengthJSArrayBufferView(dataView)) {
+ try {
+ const byteLength = LoadVariableLengthJSArrayBufferViewByteLength(
+ dataView, dataView.buffer) otherwise DetachedOrOutOfBounds;
+ return Convert<Number>(byteLength);
+ } label DetachedOrOutOfBounds {
+ ThrowTypeError(
+ MessageTemplate::kDetachedOperation, kBuiltinNameByteLength);
+ }
+ } else {
+ if (WasDetached(dataView)) {
+ ThrowTypeError(
+ MessageTemplate::kDetachedOperation, kBuiltinNameByteLength);
+ }
+ return Convert<Number>(dataView.byte_length);
}
- return Convert<Number>(dataView.byte_length);
}
// ES6 section 24.2.4.3 get DataView.prototype.byteOffset
@@ -100,10 +117,14 @@ javascript builtin DataViewPrototypeGetByteOffset(
js-implicit context: NativeContext, receiver: JSAny)(...arguments): Number {
const dataView: JSDataView =
ValidateDataView(context, receiver, 'get DataView.prototype.byte_offset');
- if (WasDetached(dataView)) {
+ try {
+ IsJSArrayBufferViewDetachedOrOutOfBounds(dataView)
+ otherwise DetachedOrOutOfBounds, NotDetachedNorOutOfBounds;
+ } label DetachedOrOutOfBounds {
ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameByteOffset);
+ } label NotDetachedNorOutOfBounds {
+ return Convert<Number>(dataView.byte_offset);
}
- return Convert<Number>(dataView.byte_offset);
}
extern macro BitcastInt32ToFloat32(uint32): float32;
@@ -373,28 +394,40 @@ transitioning macro DataViewGet(
// 5. Let buffer be view.[[ViewedArrayBuffer]].
const buffer: JSArrayBuffer = dataView.buffer;
- // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- if (IsDetachedBuffer(buffer)) {
+ // 6. Let getBufferByteLength be
+ // MakeIdempotentArrayBufferByteLengthGetter(Unordered).
+ // 7. If IsViewOutOfBounds(view, getBufferByteLength) is true, throw a
+ // TypeError exception.
+ try {
+ IsJSArrayBufferViewDetachedOrOutOfBounds(dataView)
+ otherwise DetachedOrOutOfBounds, NotDetachedNorOutOfBounds;
+ } label DetachedOrOutOfBounds {
ThrowTypeError(
MessageTemplate::kDetachedOperation,
MakeDataViewGetterNameString(kind));
- }
+ } label NotDetachedNorOutOfBounds {}
- // 7. Let viewOffset be view.[[ByteOffset]].
+ // 8. Let viewOffset be view.[[ByteOffset]].
const viewOffset: uintptr = dataView.byte_offset;
- // 8. Let viewSize be view.[[ByteLength]].
- const viewSize: uintptr = dataView.byte_length;
+ // 9. Let viewSize be GetViewByteLength(view, getBufferByteLength).
+ let viewSize: uintptr;
+ if (dataView.bit_field.is_length_tracking) {
+ viewSize = LoadVariableLengthJSArrayBufferViewByteLength(
+ dataView, dataView.buffer) otherwise unreachable;
+ } else {
+ viewSize = dataView.byte_length;
+ }
- // 9. Let elementSize be the Element Size value specified in Table 62
+ // 10. Let elementSize be the Element Size value specified in Table 62
// for Element Type type.
const elementSize: uintptr = DataViewElementSize(kind);
- // 10. If getIndex + elementSize > viewSize, throw a RangeError exception.
+ // 11. If getIndex + elementSize > viewSize, throw a RangeError exception.
CheckIntegerIndexAdditionOverflow(getIndex, elementSize, viewSize)
otherwise RangeError;
- // 11. Let bufferIndex be getIndex + viewOffset.
+ // 12. Let bufferIndex be getIndex + viewOffset.
const bufferIndex: uintptr = getIndex + viewOffset;
if constexpr (kind == ElementsKind::UINT8_ELEMENTS) {
@@ -654,9 +687,6 @@ transitioning macro DataViewSet(
// 3. Let getIndex be ? ToIndex(requestIndex).
const getIndex: uintptr = ToIndex(requestIndex) otherwise RangeError;
- const littleEndian: bool = ToBoolean(requestedLittleEndian);
- const buffer: JSArrayBuffer = dataView.buffer;
-
let numberValue: Numeric;
if constexpr (
kind == ElementsKind::BIGUINT64_ELEMENTS ||
@@ -669,28 +699,54 @@ transitioning macro DataViewSet(
numberValue = ToNumber(context, value);
}
+ // 6. Set isLittleEndian to !ToBoolean(isLittleEndian).
+ const littleEndian: bool = ToBoolean(requestedLittleEndian);
+
+ // 7. Let buffer be view.[[ViewedArrayBuffer]].
+ const buffer: JSArrayBuffer = dataView.buffer;
+
// 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
if (IsDetachedBuffer(buffer)) {
ThrowTypeError(
MessageTemplate::kDetachedOperation,
MakeDataViewSetterNameString(kind));
}
+ // 8. Let getBufferByteLength be
+ // MakeIdempotentArrayBufferByteLengthGetter(Unordered).
+ // 9. NOTE: Bounds checking is not a synchronizing operation when view's
+ // backing buffer is a growable SharedArrayBuffer.
+ // 10. If IsViewOutOfBounds(view, getBufferByteLength) is true, throw a
+ // TypeError exception.
+ try {
+ IsJSArrayBufferViewDetachedOrOutOfBounds(dataView)
+ otherwise DetachedOrOutOfBounds, NotDetachedNorOutOfBounds;
+ } label DetachedOrOutOfBounds {
+ ThrowTypeError(
+ MessageTemplate::kDetachedOperation,
+ MakeDataViewGetterNameString(kind));
+ } label NotDetachedNorOutOfBounds {}
- // 9. Let viewOffset be view.[[ByteOffset]].
+ // 11. Let viewOffset be view.[[ByteOffset]].
const viewOffset: uintptr = dataView.byte_offset;
- // 10. Let viewSize be view.[[ByteLength]].
- const viewSize: uintptr = dataView.byte_length;
+ // 12. Let viewSize be GetViewByteLength(view, getBufferByteLength).
+ let viewSize: uintptr;
+ if (dataView.bit_field.is_length_tracking) {
+ viewSize = LoadVariableLengthJSArrayBufferViewByteLength(
+ dataView, dataView.buffer) otherwise unreachable;
+ } else {
+ viewSize = dataView.byte_length;
+ }
- // 11. Let elementSize be the Element Size value specified in Table 62
+ // 13. Let elementSize be the Element Size value specified in Table 62
// for Element Type type.
const elementSize: uintptr = DataViewElementSize(kind);
- // 12. If getIndex + elementSize > viewSize, throw a RangeError exception.
+ // 14. If getIndex + elementSize > viewSize, throw a RangeError exception.
CheckIntegerIndexAdditionOverflow(getIndex, elementSize, viewSize)
otherwise RangeError;
- // 13. Let bufferIndex be getIndex + viewOffset.
+ // 15. Let bufferIndex be getIndex + viewOffset.
const bufferIndex: uintptr = getIndex + viewOffset;
if constexpr (
diff --git a/deps/v8/src/builtins/finalization-registry.tq b/deps/v8/src/builtins/finalization-registry.tq
index 72db154a6f..38cae7ed20 100644
--- a/deps/v8/src/builtins/finalization-registry.tq
+++ b/deps/v8/src/builtins/finalization-registry.tq
@@ -79,10 +79,10 @@ FinalizationRegistryCleanupLoop(implicit context: Context)(
case (weakCell: WeakCell): {
try {
Call(context, callback, Undefined, weakCell.holdings);
- } catch (e) {
+ } catch (e, message) {
runtime::ShrinkFinalizationRegistryUnregisterTokenMap(
context, finalizationRegistry);
- ReThrow(context, e);
+ ReThrowWithMessage(context, e, message);
}
}
}
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index aed3333c71..3beff0d53f 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -214,8 +214,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(eax, FieldOperand(eax, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(eax);
- __ JumpIfIsInRange(eax, kDefaultDerivedConstructor, kDerivedConstructor, ecx,
- &not_create_implicit_receiver, Label::kNear);
+ __ JumpIfIsInRange(
+ eax, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
+ static_cast<uint32_t>(FunctionKind::kDerivedConstructor), ecx,
+ &not_create_implicit_receiver, Label::kNear);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
@@ -837,7 +839,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
- __ cmp(actual_marker, expected_marker);
+ __ cmp(actual_marker, static_cast<int>(expected_marker));
__ j(not_equal, &no_match, Label::kNear);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index 0511c0aa69..ad030a1e9c 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -130,7 +130,7 @@ transitioning macro IteratorCloseOnException(implicit context: Context)(
// c. Set innerResult to Call(return, iterator).
// If an exception occurs, the original exception remains bound
Call(context, method, iterator.object);
- } catch (_e) {
+ } catch (_e, _message) {
// Swallow the exception.
}
diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc
index 3063223227..2e533f6afd 100644
--- a/deps/v8/src/builtins/loong64/builtins-loong64.cc
+++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc
@@ -186,8 +186,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Ld_d(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld_wu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
- __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver);
+ __ JumpIfIsInRange(
+ t2, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
+ static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, t2,
@@ -871,7 +873,8 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
+ __ Branch(&no_match, ne, actual_marker,
+ Operand(static_cast<int>(expected_marker)));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -2298,7 +2301,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- a0 : the number of arguments
// -- a1 : the function to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(a1);
+ __ AssertCallableFunction(a1);
Label class_constructor;
__ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 74493abad3..c1b1b4711d 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -185,8 +185,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
- __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver);
+ __ JumpIfIsInRange(
+ t2, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
+ static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
@@ -865,7 +867,8 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
- __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
+ __ Branch(&no_match, ne, actual_marker,
+ Operand(static_cast<int>(expected_marker)));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -2241,7 +2244,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- a0 : the number of arguments
// -- a1 : the function to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(a1);
+ __ AssertCallableFunction(a1);
Label class_constructor;
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index a357877acf..2ad2fae5db 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -186,8 +186,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
- __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver);
+ __ JumpIfIsInRange(
+ t2, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
+ static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
@@ -876,7 +878,8 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
+ __ Branch(&no_match, ne, actual_marker,
+ Operand(static_cast<int>(expected_marker)));
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
@@ -2294,7 +2297,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- a0 : the number of arguments
// -- a1 : the function to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(a1);
+ __ AssertCallableFunction(a1);
Label class_constructor;
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
diff --git a/deps/v8/src/builtins/object-fromentries.tq b/deps/v8/src/builtins/object-fromentries.tq
index 34ab73148f..cb43a1ea2a 100644
--- a/deps/v8/src/builtins/object-fromentries.tq
+++ b/deps/v8/src/builtins/object-fromentries.tq
@@ -69,9 +69,9 @@ ObjectFromEntries(
CreateDataProperty(result, pair.key, pair.value);
}
return result;
- } catch (e) deferred {
+ } catch (e, message) deferred {
iterator::IteratorCloseOnException(i);
- ReThrow(context, e);
+ ReThrowWithMessage(context, e, message);
}
} label Throw deferred {
ThrowTypeError(MessageTemplate::kNotIterable);
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 56dfcfa262..1c4f571e83 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -79,11 +79,16 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
ArgumentsElementType element_type) {
DCHECK(!AreAliased(array, argc, scratch));
Label loop, done;
- __ cmpi(argc, Operand::Zero());
+ if (kJSArgcIncludesReceiver) {
+ __ subi(scratch, argc, Operand(kJSArgcReceiverSlots));
+ } else {
+ __ mr(scratch, argc);
+ }
+ __ cmpi(scratch, Operand::Zero());
__ beq(&done);
- __ ShiftLeftU64(scratch, argc, Operand(kSystemPointerSizeLog2));
+ __ mtctr(scratch);
+ __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
__ add(scratch, array, scratch);
- __ mtctr(argc);
__ bind(&loop);
__ LoadU64WithUpdate(ip, MemOperand(scratch, -kSystemPointerSize));
@@ -155,7 +160,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ blr();
__ bind(&stack_overflow);
@@ -202,8 +209,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
- __ JumpIfIsInRange(r7, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver);
+ __ JumpIfIsInRange(
+ r7, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
+ static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r7,
@@ -315,7 +324,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(r4, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ blr();
__ bind(&check_receiver);
@@ -423,6 +434,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
__ LoadU16(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ subi(r6, r6, Operand(kJSArgcReceiverSlots));
+ }
__ LoadTaggedPointerField(
r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset),
r0);
@@ -732,7 +746,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
Label enough_stack_space, stack_overflow;
- __ addi(r3, r7, Operand(1));
+ if (kJSArgcIncludesReceiver) {
+ __ mr(r3, r7);
+ } else {
+ __ addi(r3, r7, Operand(1));
+ }
__ StackOverflowCheck(r3, r9, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
@@ -834,7 +852,10 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ ShiftLeftU64(actual_params_size, actual_params_size,
Operand(kSystemPointerSizeLog2));
- __ addi(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+ if (!kJSArgcIncludesReceiver) {
+ __ addi(actual_params_size, actual_params_size,
+ Operand(kSystemPointerSize));
+ }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -856,7 +877,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ cmpi(actual_marker, Operand(expected_marker));
+ __ cmpi(actual_marker, Operand(static_cast<int>(expected_marker)));
__ bne(&no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -1051,7 +1072,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// stack left to right.
//
// The live registers are:
-// o r3: actual argument count (not including the receiver)
+// o r3: actual argument count
// o r4: the JS function object being called.
// o r6: the incoming new target or generator object
// o cp: our context
@@ -1302,7 +1323,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
+ // -- r3 : the number of arguments
// -- r5 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1315,15 +1336,18 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ subi(r3, r3, Operand(1));
}
- // Calculate number of arguments (add one for receiver).
- __ addi(r6, r3, Operand(1));
- __ StackOverflowCheck(r6, ip, &stack_overflow);
-
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // Don't copy receiver. Argument count is correct.
+ const bool skip_receiver =
+ receiver_mode == ConvertReceiverMode::kNullOrUndefined;
+ if (kJSArgcIncludesReceiver && skip_receiver) {
+ __ subi(r6, r3, Operand(kJSArgcReceiverSlots));
+ } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
+ __ addi(r6, r3, Operand(1));
+ } else {
__ mr(r6, r3);
}
+ __ StackOverflowCheck(r6, ip, &stack_overflow);
+
// Push the arguments.
GenerateInterpreterPushArgs(masm, r6, r5, r7);
@@ -1359,23 +1383,28 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- r3 : argument count (not including receiver)
+ // -- r3 : argument count
// -- r6 : new target
// -- r4 : constructor to call
// -- r5 : allocation site feedback if available, undefined otherwise.
// -- r7 : address of the first argument
// -----------------------------------
Label stack_overflow;
- __ addi(r8, r3, Operand(1));
- __ StackOverflowCheck(r8, ip, &stack_overflow);
+ __ StackOverflowCheck(r3, ip, &stack_overflow);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ subi(r3, r3, Operand(1));
}
+ Register argc_without_receiver = r3;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = ip;
+ __ subi(argc_without_receiver, r3, Operand(kJSArgcReceiverSlots));
+ }
+
// Push the arguments.
- GenerateInterpreterPushArgs(masm, r3, r7, r8);
+ GenerateInterpreterPushArgs(masm, argc_without_receiver, r7, r8);
// Push a slot for the receiver to be constructed.
__ li(r0, Operand::Zero());
@@ -1582,13 +1611,14 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
// from LAZY is always the last argument.
- __ addi(r3, r3,
- Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ constexpr int return_value_offset =
+ BuiltinContinuationFrameConstants::kFixedSlotCount -
+ kJSArgcReceiverSlots;
+ __ addi(r3, r3, Operand(return_value_offset));
__ ShiftLeftU64(r0, r3, Operand(kSystemPointerSizeLog2));
__ StoreU64(scratch, MemOperand(sp, r0));
// Recover arguments count.
- __ subi(r3, r3,
- Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ __ subi(r3, r3, Operand(return_value_offset));
}
__ LoadU64(
fp,
@@ -1703,16 +1733,18 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Label done;
__ LoadU64(r4, MemOperand(sp)); // receiver
- __ cmpi(r3, Operand(1));
+ __ CmpS64(r3, Operand(JSParameterCount(1)), r0);
__ blt(&done);
__ LoadU64(r8, MemOperand(sp, kSystemPointerSize)); // thisArg
- __ cmpi(r3, Operand(2));
+ __ CmpS64(r3, Operand(JSParameterCount(2)), r0);
__ blt(&done);
__ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r3, r8, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1738,7 +1770,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ bind(&no_arguments);
{
- __ li(r3, Operand::Zero());
+ __ mov(r3, Operand(JSParameterCount(0)));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -1752,7 +1784,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r3: actual number of arguments
{
Label done;
- __ cmpi(r3, Operand::Zero());
+ __ CmpS64(r3, Operand(JSParameterCount(0)), r0);
__ bne(&done);
__ PushRoot(RootIndex::kUndefinedValue);
__ addi(r3, r3, Operand(1));
@@ -1784,19 +1816,21 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ mr(r5, r4);
Label done;
- __ cmpi(r3, Operand(1));
+ __ CmpS64(r3, Operand(JSParameterCount(1)), r0);
__ blt(&done);
__ LoadU64(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
- __ cmpi(r3, Operand(2));
+ __ CmpS64(r3, Operand(JSParameterCount(2)), r0);
__ blt(&done);
__ LoadU64(r8, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
- __ cmpi(r3, Operand(3));
+ __ CmpS64(r3, Operand(JSParameterCount(3)), r0);
__ blt(&done);
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r3, r8, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1833,19 +1867,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Label done;
__ mr(r7, r4);
- __ cmpi(r3, Operand(1));
+ __ CmpS64(r3, Operand(JSParameterCount(1)), r0);
__ blt(&done);
__ LoadU64(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
__ mr(r6, r4);
- __ cmpi(r3, Operand(2));
+ __ CmpS64(r3, Operand(JSParameterCount(2)), r0);
__ blt(&done);
__ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
- __ cmpi(r3, Operand(3));
+ __ CmpS64(r3, Operand(JSParameterCount(3)), r0);
__ blt(&done);
__ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r3, r7, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1887,14 +1923,21 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
Register dest = pointer_to_new_space_out;
__ addi(dest, sp, Operand(-kSystemPointerSize));
- __ addi(r0, argc_in_out, Operand(1));
+ Label loop, skip;
+ if (!kJSArgcIncludesReceiver) {
+ __ addi(r0, argc_in_out, Operand(1));
+ } else {
+ __ mr(r0, argc_in_out);
+ __ cmpi(r0, Operand::Zero());
+ __ ble(&skip);
+ }
__ mtctr(r0);
- Label loop;
__ bind(&loop);
__ LoadU64WithUpdate(r0, MemOperand(old_sp, kSystemPointerSize));
__ StoreU64WithUpdate(r0, MemOperand(dest, kSystemPointerSize));
__ bdnz(&loop);
+ __ bind(&skip);
// Update total number of arguments, restore dest.
__ add(argc_in_out, argc_in_out, count);
__ addi(dest, dest, Operand(kSystemPointerSize));
@@ -1908,7 +1951,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r4 : target
- // -- r3 : number of parameters on the stack (not including the receiver)
+ // -- r3 : number of parameters on the stack
// -- r5 : arguments list (a FixedArray)
// -- r7 : len (number of elements to push from args)
// -- r6 : new.target (for [[Construct]])
@@ -1980,7 +2023,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
+ // -- r3 : the number of arguments
// -- r6 : the new.target (for [[Construct]] calls)
// -- r4 : the target to call (can be any Object)
// -- r5 : start index (to support rest parameters)
@@ -2008,12 +2051,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ LoadU64(r8, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ subi(r8, r8, Operand(kJSArgcReceiverSlots));
+ }
__ sub(r8, r8, r5, LeaveOE, SetRC);
__ ble(&stack_done, cr0);
{
// ----------- S t a t e -------------
- // -- r3 : the number of arguments already in the stack (not including the
- // receiver)
+ // -- r3 : the number of arguments already in the stack
// -- r4 : the target to call (can be any Object)
// -- r5 : start index (to support rest parameters)
// -- r6 : the new.target (for [[Construct]] calls)
@@ -2069,7 +2114,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
+ // -- r3 : the number of arguments
// -- r4 : the function to call (checked to be a JSFunction)
// -----------------------------------
__ AssertFunction(r4);
@@ -2095,7 +2140,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bne(&done_convert, cr0);
{
// ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
+ // -- r3 : the number of arguments
// -- r4 : the function to call (checked to be a JSFunction)
// -- r5 : the shared function info.
// -- cp : the function context.
@@ -2148,7 +2193,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bind(&done_convert);
// ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
+ // -- r3 : the number of arguments
// -- r4 : the function to call (checked to be a JSFunction)
// -- r5 : the shared function info.
// -- cp : the function context.
@@ -2171,7 +2216,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
+ // -- r3 : the number of arguments
// -- r4 : target (checked to be a JSBoundFunction)
// -- r6 : new.target (only in case of [[Construct]])
// -----------------------------------
@@ -2184,7 +2229,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ beq(&no_bound_arguments, cr0);
{
// ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
+ // -- r3 : the number of arguments
// -- r4 : target (checked to be a JSBoundFunction)
// -- r5 : the [[BoundArguments]] (implemented as FixedArray)
// -- r6 : new.target (only in case of [[Construct]])
@@ -2244,7 +2289,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
+ // -- r3 : the number of arguments
// -- r4 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r4);
@@ -2267,7 +2312,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
+ // -- r3 : the number of arguments
// -- r4 : the target to call (can be any Object).
// -----------------------------------
Register argc = r3;
@@ -2337,7 +2382,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
+ // -- r3 : the number of arguments
// -- r4 : the constructor to call (checked to be a JSFunction)
// -- r6 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2369,7 +2414,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
+ // -- r3 : the number of arguments
// -- r4 : the function to call (checked to be a JSBoundFunction)
// -- r6 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2396,7 +2441,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
+ // -- r3 : the number of arguments
// -- r4 : the constructor to call (can be any Object)
// -- r6 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
diff --git a/deps/v8/src/builtins/promise-abstract-operations.tq b/deps/v8/src/builtins/promise-abstract-operations.tq
index 5c871d3ff0..2e2dd0e1ef 100644
--- a/deps/v8/src/builtins/promise-abstract-operations.tq
+++ b/deps/v8/src/builtins/promise-abstract-operations.tq
@@ -113,7 +113,7 @@ transitioning macro MorphAndEnqueuePromiseReaction(implicit context: Context)(
// Morph {current} from a PromiseReaction into a PromiseReactionJobTask
// and schedule that on the microtask queue. We try to minimize the number
- // of stores here to avoid screwing up the store buffer.
+ // of stores here to avoid write barrier overhead.
static_assert(
kPromiseReactionSize ==
kPromiseReactionJobTaskSizeOfAllPromiseReactionJobTasks);
diff --git a/deps/v8/src/builtins/promise-all.tq b/deps/v8/src/builtins/promise-all.tq
index 602908d7f6..cd55ec9f3b 100644
--- a/deps/v8/src/builtins/promise-all.tq
+++ b/deps/v8/src/builtins/promise-all.tq
@@ -139,7 +139,7 @@ transitioning macro PerformPromiseAll<F1: type, F2: type>(
constructor: Constructor, capability: PromiseCapability,
promiseResolveFunction: JSAny, createResolveElementFunctor: F1,
createRejectElementFunctor: F2): JSAny labels
-Reject(Object) {
+Reject(JSAny) {
const promise = capability.promise;
const resolve = capability.resolve;
const reject = capability.reject;
@@ -172,7 +172,7 @@ Reject(Object) {
// to true.
// ReturnIfAbrupt(nextValue).
nextValue = iterator::IteratorValue(next, fastIteratorResultMap);
- } catch (e) {
+ } catch (e, _message) {
goto Reject(e);
}
@@ -262,7 +262,7 @@ Reject(Object) {
// Set index to index + 1.
index += 1;
}
- } catch (e) deferred {
+ } catch (e, _message) deferred {
iterator::IteratorCloseOnException(iter);
goto Reject(e);
} label Done {}
@@ -354,11 +354,9 @@ transitioning macro GeneratePromiseAll<F1: type, F2: type>(
nativeContext, i, constructor, capability, promiseResolveFunction,
createResolveElementFunctor, createRejectElementFunctor)
otherwise Reject;
- } catch (e) deferred {
+ } catch (e, _message) deferred {
goto Reject(e);
- } label Reject(e: Object) deferred {
- // Exception must be bound to a JS value.
- const e = UnsafeCast<JSAny>(e);
+ } label Reject(e: JSAny) deferred {
const reject = UnsafeCast<JSAny>(capability.reject);
Call(context, reject, Undefined, e);
return capability.promise;
diff --git a/deps/v8/src/builtins/promise-any.tq b/deps/v8/src/builtins/promise-any.tq
index 1555511eda..d50b8b5574 100644
--- a/deps/v8/src/builtins/promise-any.tq
+++ b/deps/v8/src/builtins/promise-any.tq
@@ -159,7 +159,7 @@ transitioning macro PerformPromiseAny(implicit context: Context)(
nativeContext: NativeContext, iteratorRecord: iterator::IteratorRecord,
constructor: Constructor, resultCapability: PromiseCapability,
promiseResolveFunction: JSAny): JSAny labels
-Reject(Object) {
+Reject(JSAny) {
// 1. Assert: ! IsConstructor(constructor) is true.
// 2. Assert: resultCapability is a PromiseCapability Record.
@@ -198,7 +198,7 @@ Reject(Object) {
// g. ReturnIfAbrupt(nextValue).
nextValue = iterator::IteratorValue(next, fastIteratorResultMap);
- } catch (e) {
+ } catch (e, _message) {
goto Reject(e);
}
@@ -280,7 +280,7 @@ Reject(Object) {
context, rejectElement, kPromiseForwardingHandlerSymbol, True);
}
}
- } catch (e) deferred {
+ } catch (e, _message) deferred {
iterator::IteratorCloseOnException(iteratorRecord);
goto Reject(e);
} label Done {}
@@ -361,9 +361,9 @@ PromiseAny(
nativeContext, iteratorRecord, constructor, capability,
promiseResolveFunction)
otherwise Reject;
- } catch (e) deferred {
+ } catch (e, _message) deferred {
goto Reject(e);
- } label Reject(e: Object) deferred {
+ } label Reject(e: JSAny) deferred {
// Exception must be bound to a JS value.
dcheck(e != TheHole);
Call(
diff --git a/deps/v8/src/builtins/promise-constructor.tq b/deps/v8/src/builtins/promise-constructor.tq
index b5f7292a77..eec333f4ce 100644
--- a/deps/v8/src/builtins/promise-constructor.tq
+++ b/deps/v8/src/builtins/promise-constructor.tq
@@ -85,7 +85,7 @@ PromiseConstructor(
const reject = funcs.reject;
try {
Call(context, UnsafeCast<Callable>(executor), Undefined, resolve, reject);
- } catch (e) {
+ } catch (e, _message) {
Call(context, reject, Undefined, e);
}
diff --git a/deps/v8/src/builtins/promise-jobs.tq b/deps/v8/src/builtins/promise-jobs.tq
index 77d2e7cf9c..9a9d22af94 100644
--- a/deps/v8/src/builtins/promise-jobs.tq
+++ b/deps/v8/src/builtins/promise-jobs.tq
@@ -66,7 +66,7 @@ PromiseResolveThenableJob(implicit context: Context)(
try {
return Call(
context, UnsafeCast<Callable>(then), thenable, resolve, reject);
- } catch (e) {
+ } catch (e, _message) {
return Call(context, UnsafeCast<Callable>(reject), Undefined, e);
}
}
diff --git a/deps/v8/src/builtins/promise-misc.tq b/deps/v8/src/builtins/promise-misc.tq
index e8b4842dd5..99c4006da2 100644
--- a/deps/v8/src/builtins/promise-misc.tq
+++ b/deps/v8/src/builtins/promise-misc.tq
@@ -112,7 +112,7 @@ transitioning macro RunContextPromiseHookInit(implicit context: Context)(
try {
Call(context, hook, Undefined, promise, parentObject);
- } catch (e) {
+ } catch (e, _message) {
runtime::ReportMessageFromMicrotask(e);
}
}
@@ -189,7 +189,7 @@ transitioning macro RunContextPromiseHook(implicit context: Context)(
try {
Call(context, hook, Undefined, promise);
- } catch (e) {
+ } catch (e, _message) {
runtime::ReportMessageFromMicrotask(e);
}
}
diff --git a/deps/v8/src/builtins/promise-race.tq b/deps/v8/src/builtins/promise-race.tq
index eed1fae389..1d15dde666 100644
--- a/deps/v8/src/builtins/promise-race.tq
+++ b/deps/v8/src/builtins/promise-race.tq
@@ -47,7 +47,7 @@ PromiseRace(
// Let iterator be GetIterator(iterable).
// IfAbruptRejectPromise(iterator, promiseCapability).
i = iterator::GetIterator(iterable);
- } catch (e) deferred {
+ } catch (e, _message) deferred {
goto Reject(e);
}
@@ -69,7 +69,7 @@ PromiseRace(
// to true.
// ReturnIfAbrupt(nextValue).
nextValue = iterator::IteratorValue(next, fastIteratorResultMap);
- } catch (e) {
+ } catch (e, _message) {
goto Reject(e);
}
// Let nextPromise be ? Call(constructor, _promiseResolve_, «
@@ -91,14 +91,12 @@ PromiseRace(
context, thenResult, kPromiseHandledBySymbol, promise);
}
}
- } catch (e) deferred {
+ } catch (e, _message) deferred {
iterator::IteratorCloseOnException(i);
goto Reject(e);
}
- } label Reject(exception: Object) deferred {
- Call(
- context, UnsafeCast<JSAny>(reject), Undefined,
- UnsafeCast<JSAny>(exception));
+ } label Reject(exception: JSAny) deferred {
+ Call(context, UnsafeCast<JSAny>(reject), Undefined, exception);
return promise;
}
unreachable;
diff --git a/deps/v8/src/builtins/promise-reaction-job.tq b/deps/v8/src/builtins/promise-reaction-job.tq
index 0374b2a3fe..3028359107 100644
--- a/deps/v8/src/builtins/promise-reaction-job.tq
+++ b/deps/v8/src/builtins/promise-reaction-job.tq
@@ -60,7 +60,7 @@ macro FuflfillPromiseReactionJob(
const resolve = UnsafeCast<Callable>(capability.resolve);
try {
return Call(context, resolve, Undefined, result);
- } catch (e) {
+ } catch (e, _message) {
return RejectPromiseReactionJob(
context, promiseOrCapability, e, reactionType);
}
@@ -98,7 +98,7 @@ macro PromiseReactionJob(
return FuflfillPromiseReactionJob(
context, promiseOrCapability, result, reactionType);
}
- } catch (e) {
+ } catch (e, _message) {
return RejectPromiseReactionJob(
context, promiseOrCapability, e, reactionType);
}
diff --git a/deps/v8/src/builtins/promise-resolve.tq b/deps/v8/src/builtins/promise-resolve.tq
index 5b0a82ca3d..114b1e922b 100644
--- a/deps/v8/src/builtins/promise-resolve.tq
+++ b/deps/v8/src/builtins/promise-resolve.tq
@@ -165,7 +165,7 @@ ResolvePromise(implicit context: Context)(
// 10. If then is an abrupt completion, then
try {
then = GetProperty(resolution, kThenString);
- } catch (e) {
+ } catch (e, _message) {
// a. Return RejectPromise(promise, then.[[Value]]).
return RejectPromise(promise, e, False);
}
diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
index 51a08c1296..f5c3600850 100644
--- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
+++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -194,8 +194,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Lwu(func_info,
FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(func_info);
- __ JumpIfIsInRange(func_info, kDefaultDerivedConstructor,
- kDerivedConstructor, &not_create_implicit_receiver);
+ __ JumpIfIsInRange(
+ func_info,
+ static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
+ static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
+ &not_create_implicit_receiver);
Register scratch = func_info;
Register scratch2 = temps.Acquire();
// If not derived class constructor: Allocate the new receiver object.
@@ -921,8 +924,8 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
- __ Branch(&no_match, ne, actual_marker, Operand(expected_marker),
- Label::Distance::kNear);
+ __ Branch(&no_match, ne, actual_marker,
+ Operand(static_cast<int>(expected_marker)), Label::Distance::kNear);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
}
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 3b51a086ec..3fe9ebc683 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -33,6 +33,254 @@ namespace internal {
#define __ ACCESS_MASM(masm)
+namespace {
+
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ LoadU64(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ CmpS64(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+}
+
+static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1,
+ Label* is_baseline) {
+ USE(GetSharedFunctionInfoBytecodeOrBaseline);
+ ASM_CODE_COMMENT(masm);
+ Label done;
+ __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ b(ne, &not_baseline);
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ beq(is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ beq(is_baseline);
+ }
+ __ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE));
+ __ bne(&done);
+ __ LoadTaggedPointerField(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
+void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
+ intptr_t offset) {
+ if (is_int20(offset)) {
+ __ lay(r14, MemOperand(entry_address, offset));
+ } else {
+ __ AddS64(r14, entry_address, Operand(offset));
+ }
+
+ // "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
+ Label start;
+ __ bind(&start);
+
+ // Get function from the frame.
+ Register closure = r3;
+ __ LoadU64(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+
+ // Get the Code object from the shared function info.
+ Register code_obj = r8;
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadTaggedPointerField(
+ code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ CompareObjectType(code_obj, r5, r5, CODET_TYPE);
+ __ b(eq, &start_with_baseline);
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ CompareObjectType(code_obj, r5, r5, CODET_TYPE);
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+ }
+
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, r5);
+ }
+
+ // Load the feedback vector.
+ Register feedback_vector = r4;
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ CompareObjectType(feedback_vector, r5, r5, FEEDBACK_VECTOR_TYPE);
+ __ b(ne, &install_baseline_code);
+
+ // Save BytecodeOffset from the stack frame.
+ __ LoadU64(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ // Replace BytecodeOffset with the feedback vector.
+ __ StoreU64(feedback_vector,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ feedback_vector = no_reg;
+
+ // Compute baseline pc for bytecode offset.
+ ExternalReference get_baseline_pc_extref;
+ if (next_bytecode || is_osr) {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_next_executed_bytecode();
+ } else {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_bytecode_offset();
+ }
+ Register get_baseline_pc = r5;
+ __ Move(get_baseline_pc, get_baseline_pc_extref);
+
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset.
+ // TODO(pthier): Investigate if it is feasible to handle this special case
+ // in TurboFan instead of here.
+ Label valid_bytecode_offset, function_entry_bytecode;
+ if (!is_osr) {
+ __ CmpS64(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ __ b(eq, &function_entry_bytecode);
+ }
+
+ __ SubS64(kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&valid_bytecode_offset);
+ // Get bytecode array from the stack frame.
+ __ LoadU64(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ // Save the accumulator register, since it's clobbered by the below call.
+ __ Push(kInterpreterAccumulatorRegister);
+ {
+ Register arg_reg_1 = r2;
+ Register arg_reg_2 = r3;
+ Register arg_reg_3 = r4;
+ __ mov(arg_reg_1, code_obj);
+ __ mov(arg_reg_2, kInterpreterBytecodeOffsetRegister);
+ __ mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(3, 0, r1);
+ __ CallCFunction(get_baseline_pc, 3, 0);
+ }
+ __ AddS64(code_obj, code_obj, kReturnRegister0);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ if (is_osr) {
+ Register scratch = r1;
+ __ mov(scratch, Operand(0));
+ __ StoreU16(scratch,
+ FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
+ Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ } else {
+ __ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(code_obj);
+ }
+ __ Trap(); // Unreachable.
+
+ if (!is_osr) {
+ __ bind(&function_entry_bytecode);
+ // If the bytecode offset is kFunctionEntryOffset, get the start address of
+ // the first bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister, Operand(0));
+ if (next_bytecode) {
+ __ Move(get_baseline_pc,
+ ExternalReference::baseline_pc_for_bytecode_offset());
+ }
+ __ b(&valid_bytecode_offset);
+ }
+
+ __ bind(&install_baseline_code);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
+ __ Push(closure);
+ __ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ // Retry from the start after installing baseline code.
+ __ b(&start);
+}
+
+void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
+ ASM_CODE_COMMENT(masm);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ }
+
+ // If the code object is null, just return to the caller.
+ Label skip;
+ __ CmpSmiLiteral(r2, Smi::zero(), r0);
+ __ bne(&skip);
+ __ Ret();
+
+ __ bind(&skip);
+
+ if (is_interpreter) {
+ // Drop the handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ __ LeaveFrame(StackFrame::STUB);
+ }
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ LoadTaggedPointerField(
+ r3,
+ FieldMemOperand(r2, Code::kDeoptimizationDataOrInterpreterDataOffset));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ SmiUntagField(
+ r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex)));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ AddS64(r2, r3);
+ Generate_OSREntry(masm, r2, Code::kHeaderSize - kHeapObjectTag);
+}
+
+} // namespace
+
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
__ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
@@ -81,7 +329,11 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
Register counter = scratch;
Register value = ip;
Label loop, entry;
- __ mov(counter, argc);
+ if (kJSArgcIncludesReceiver) {
+ __ SubS64(counter, argc, Operand(kJSArgcReceiverSlots));
+ } else {
+ __ mov(counter, argc);
+ }
__ b(&entry);
__ bind(&loop);
__ ShiftLeftU64(value, counter, Operand(kSystemPointerSizeLog2));
@@ -151,7 +403,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Ret();
__ bind(&stack_overflow);
@@ -198,8 +452,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r6);
- __ JumpIfIsInRange(r6, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver);
+ __ JumpIfIsInRange(
+ r6, static_cast<uint8_t>(FunctionKind::kDefaultDerivedConstructor),
+ static_cast<uint8_t>(FunctionKind::kDerivedConstructor),
+ &not_create_implicit_receiver);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r6,
@@ -307,7 +563,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(r3, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Ret();
__ bind(&check_receiver);
@@ -339,19 +597,6 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
-static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
- Register sfi_data,
- Register scratch1) {
- Label done;
-
- __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
- __ bne(&done, Label::kNear);
- __ LoadTaggedPointerField(
- sfi_data,
- FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
- __ bind(&done);
-}
-
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -416,6 +661,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadU16(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ SubS64(r5, r5, Operand(kJSArgcReceiverSlots));
+ }
__ LoadTaggedPointerField(
r4,
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
@@ -440,13 +688,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ Label is_baseline;
__ LoadTaggedPointerField(
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, r5, ip);
+ GetSharedFunctionInfoBytecodeOrBaseline(masm, r5, ip, &is_baseline);
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
+ __ bind(&is_baseline);
}
// Resume (Ignition/TurboFan) generator object.
@@ -780,7 +1030,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments.
Label enough_stack_space, stack_overflow;
- __ AddS64(r7, r2, Operand(1));
+ if (kJSArgcIncludesReceiver) {
+ __ mov(r7, r2);
+ } else {
+ __ AddS64(r7, r2, Operand(1));
+ }
__ StackOverflowCheck(r7, r1, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
@@ -887,8 +1141,10 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ ShiftLeftU64(actual_params_size, actual_params_size,
Operand(kSystemPointerSizeLog2));
- __ AddS64(actual_params_size, actual_params_size,
- Operand(kSystemPointerSize));
+ if (!kJSArgcIncludesReceiver) {
+ __ AddS64(actual_params_size, actual_params_size,
+ Operand(kSystemPointerSize));
+ }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -911,7 +1167,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
OptimizationMarker expected_marker,
Runtime::FunctionId function_id) {
Label no_match;
- __ CmpS64(actual_marker, Operand(expected_marker));
+ __ CmpS64(actual_marker, Operand(static_cast<int>(expected_marker)));
__ bne(&no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -1097,12 +1353,177 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8);
}
+// Read off the optimization state in the feedback vector and check if there
+// is optimized code or a optimization marker that needs to be processed.
+static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+ MacroAssembler* masm, Register optimization_state, Register feedback_vector,
+ Label* has_optimized_code_or_marker) {
+ ASM_CODE_COMMENT(masm);
+ USE(LoadOptimizationStateAndJumpIfNeedsProcessing);
+ DCHECK(!AreAliased(optimization_state, feedback_vector));
+ __ LoadU32(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+ CHECK(
+ is_uint16(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ tmll(
+ optimization_state,
+ Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ b(Condition(7), has_optimized_code_or_marker);
+}
+
+#if ENABLE_SPARKPLUG
+// static
+void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ // UseScratchRegisterScope temps(masm);
+ // Need a few extra registers
+ // temps.Include(r8, r9);
+
+ auto descriptor =
+ Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
+ Register closure = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ // Load the feedback vector from the closure.
+ Register feedback_vector = ip;
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ if (FLAG_debug_code) {
+ Register scratch = r1;
+ __ CompareObjectType(feedback_vector, scratch, scratch,
+ FEEDBACK_VECTOR_TYPE);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector);
+ }
+
+ // Check for an optimization marker.
+ Label has_optimized_code_or_marker;
+ Register optimization_state = r9;
+ {
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+ }
+
+ // Increment invocation count for the function.
+ {
+ Register invocation_count = r1;
+ __ LoadU64(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ AddU64(invocation_count, Operand(1));
+ __ StoreU64(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ }
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
+ // Normally the first thing we'd do here is Push(lr, fp), but we already
+ // entered the frame in BaselineCompiler::Prologue, as we had to use the
+ // value lr before the call to this BaselineOutOfLinePrologue builtin.
+
+ Register callee_context = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kCalleeContext);
+ Register callee_js_function = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ __ Push(callee_context, callee_js_function);
+ DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
+ DCHECK_EQ(callee_js_function, kJSFunctionRegister);
+
+ Register argc = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
+ // We'll use the bytecode for both code age/OSR resetting, and pushing onto
+ // the frame, so load it into a register.
+ Register bytecodeArray = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
+ // are 8-bit fields next to each other, so we could just optimize by writing
+ // a 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ {
+ Register scratch = r0;
+ __ mov(scratch, Operand(0));
+ __ StoreU16(scratch,
+ FieldMemOperand(bytecodeArray,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
+ }
+
+ __ Push(argc, bytecodeArray);
+
+ // Baseline code frames store the feedback vector where interpreter would
+ // store the bytecode offset.
+ if (FLAG_debug_code) {
+ Register scratch = r1;
+ __ CompareObjectType(feedback_vector, scratch, scratch,
+ FEEDBACK_VECTOR_TYPE);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector);
+ }
+ __ Push(feedback_vector);
+ }
+
+ Label call_stack_guard;
+ Register frame_size = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real stack
+ // limit or tighter. By ensuring we have space until that limit after
+ // building the frame we can quickly precheck both at once.
+
+ Register sp_minus_frame_size = r1;
+ Register interrupt_limit = r0;
+ __ SubS64(sp_minus_frame_size, sp, frame_size);
+ __ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
+ __ CmpU64(sp_minus_frame_size, interrupt_limit);
+ __ blt(&call_stack_guard);
+ }
+
+ // Do "fast" return to the caller pc in lr.
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ __ Ret();
+
+ __ bind(&has_optimized_code_or_marker);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
+
+ // Drop the frame created by the baseline call.
+ __ Pop(r14, fp);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
+ __ Trap();
+ }
+
+ __ bind(&call_stack_guard);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Save incoming new target or generator
+ __ Push(kJavaScriptCallNewTargetRegister);
+ __ SmiTag(frame_size);
+ __ Push(frame_size);
+ __ CallRuntime(Runtime::kStackGuardWithGap);
+ __ Pop(kJavaScriptCallNewTargetRegister);
+ }
+
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ __ Ret();
+}
+#endif
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
//
// The live registers are:
-// o r2: actual argument count (not including the receiver)
+// o r2: actual argument count
// o r3: the JS function object being called.
// o r5: the incoming new target or generator object
// o cp: our context
@@ -1125,7 +1546,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadTaggedPointerField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, ip);
+
+ Label is_baseline;
+ GetSharedFunctionInfoBytecodeOrBaseline(
+ masm, kInterpreterBytecodeArrayRegister, ip, &is_baseline);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
@@ -1320,6 +1744,39 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
+ __ bind(&is_baseline);
+ {
+ // Load the feedback vector from the closure.
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ LoadTaggedPointerField(
+ ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+ __ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE));
+ __ b(ne, &install_baseline_code);
+
+ // Check for an optimization marker.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+
+ // Load the baseline code into the closure.
+ __ mov(r4, kInterpreterBytecodeArrayRegister);
+ static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
+ ReplaceClosureCodeWithOptimizedCode(masm, r4, closure, ip, r1);
+ __ JumpCodeObject(r4);
+
+ __ bind(&install_baseline_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
+ }
+
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
@@ -1346,7 +1803,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
+ // -- r2 : the number of arguments
// -- r4 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1358,15 +1815,18 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ SubS64(r2, r2, Operand(1));
}
- // Calculate number of arguments (AddS64 one for receiver).
- __ AddS64(r5, r2, Operand(1));
- __ StackOverflowCheck(r5, ip, &stack_overflow);
-
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // Don't copy receiver. Argument count is correct.
+ const bool skip_receiver =
+ receiver_mode == ConvertReceiverMode::kNullOrUndefined;
+ if (kJSArgcIncludesReceiver && skip_receiver) {
+ __ SubS64(r5, r2, Operand(kJSArgcReceiverSlots));
+ } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
+ __ AddS64(r5, r2, Operand(1));
+ } else {
__ mov(r5, r2);
}
+ __ StackOverflowCheck(r5, ip, &stack_overflow);
+
// Push the arguments.
GenerateInterpreterPushArgs(masm, r5, r4, r6);
@@ -1402,23 +1862,27 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- r2 : argument count (not including receiver)
+ // -- r2 : argument count
// -- r5 : new target
// -- r3 : constructor to call
// -- r4 : allocation site feedback if available, undefined otherwise.
// -- r6 : address of the first argument
// -----------------------------------
Label stack_overflow;
- __ AddS64(r7, r2, Operand(1));
- __ StackOverflowCheck(r7, ip, &stack_overflow);
+ __ StackOverflowCheck(r2, ip, &stack_overflow);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ SubS64(r2, r2, Operand(1));
}
+ Register argc_without_receiver = r2;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = ip;
+ __ SubS64(argc_without_receiver, r2, Operand(kJSArgcReceiverSlots));
+ }
// Push the arguments. r4 and r5 will be modified.
- GenerateInterpreterPushArgs(masm, r2, r6, r7);
+ GenerateInterpreterPushArgs(masm, argc_without_receiver, r6, r7);
// Push a slot for the receiver to be constructed.
__ mov(r0, Operand::Zero());
@@ -1621,13 +2085,14 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
// from LAZY is always the last argument.
- __ AddS64(r2, r2,
- Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ constexpr int return_value_offset =
+ BuiltinContinuationFrameConstants::kFixedSlotCount -
+ kJSArgcReceiverSlots;
+ __ AddS64(r2, r2, Operand(return_value_offset));
__ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
__ StoreU64(scratch, MemOperand(sp, r1));
// Recover arguments count.
- __ SubS64(r2, r2,
- Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ __ SubS64(r2, r2, Operand(return_value_offset));
}
__ LoadU64(
fp,
@@ -1675,46 +2140,6 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement);
- }
-
- // If the code object is null, just return to the caller.
- Label skip;
- __ CmpSmiLiteral(r2, Smi::zero(), r0);
- __ bne(&skip);
- __ Ret();
-
- __ bind(&skip);
-
- // Drop the handler frame that is be sitting on top of the actual
- // JavaScript frame. This is the case then OSR is triggered from bytecode.
- __ LeaveFrame(StackFrame::STUB);
-
- // Load deoptimization data from the code object.
- // <deopt_data> = <code>[#deoptimization_data_offset]
- __ LoadTaggedPointerField(
- r3,
- FieldMemOperand(r2, Code::kDeoptimizationDataOrInterpreterDataOffset));
-
- // Load the OSR entrypoint offset from the deoptimization data.
- // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ SmiUntagField(
- r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
- DeoptimizationData::kOsrPcOffsetIndex)));
-
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ AddS64(r2, r3);
- __ AddS64(r0, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mov(r14, r0);
-
- // And "return" to the OSR entry point of the function.
- __ Ret();
-}
-
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1733,16 +2158,18 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Label done;
__ LoadU64(r3, MemOperand(sp)); // receiver
- __ cghi(r2, Operand(1));
+ __ CmpS64(r2, Operand(JSParameterCount(1)));
__ blt(&done);
__ LoadU64(r7, MemOperand(sp, kSystemPointerSize)); // thisArg
- __ cghi(r2, Operand(2));
+ __ CmpS64(r2, Operand(JSParameterCount(2)));
__ blt(&done);
__ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r2, r7, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1768,7 +2195,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ bind(&no_arguments);
{
- __ mov(r2, Operand::Zero());
+ __ mov(r2, Operand(JSParameterCount(0)));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -1782,7 +2209,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r2: actual number of arguments
{
Label done;
- __ cghi(r2, Operand::Zero());
+ __ CmpS64(r2, Operand(JSParameterCount(0)));
__ b(ne, &done);
__ PushRoot(RootIndex::kUndefinedValue);
__ AddS64(r2, r2, Operand(1));
@@ -1815,19 +2242,21 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Label done;
- __ cghi(r2, Operand(1));
+ __ CmpS64(r2, Operand(JSParameterCount(1)));
__ blt(&done);
__ LoadU64(r3, MemOperand(sp, kSystemPointerSize)); // thisArg
- __ cghi(r2, Operand(2));
+ __ CmpS64(r2, Operand(JSParameterCount(2)));
__ blt(&done);
__ LoadU64(r7, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
- __ cghi(r2, Operand(3));
+ __ CmpS64(r2, Operand(JSParameterCount(3)));
__ blt(&done);
__ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r2, r7, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1865,19 +2294,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Label done;
__ mov(r6, r3);
- __ cghi(r2, Operand(1));
+ __ CmpS64(r2, Operand(JSParameterCount(1)));
__ blt(&done);
__ LoadU64(r3, MemOperand(sp, kSystemPointerSize)); // thisArg
__ mov(r5, r3);
- __ cghi(r2, Operand(2));
+ __ CmpS64(r2, Operand(JSParameterCount(2)));
__ blt(&done);
__ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
- __ cghi(r2, Operand(3));
+ __ CmpS64(r2, Operand(JSParameterCount(3)));
__ blt(&done);
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(r2, r6, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r2, r6, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1926,7 +2357,11 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
Label loop, done;
__ bind(&loop);
__ CmpS64(old_sp, end);
- __ bgt(&done);
+ if (kJSArgcIncludesReceiver) {
+ __ bge(&done);
+ } else {
+ __ bgt(&done);
+ }
__ LoadU64(value, MemOperand(old_sp));
__ lay(old_sp, MemOperand(old_sp, kSystemPointerSize));
__ StoreU64(value, MemOperand(dest));
@@ -1946,7 +2381,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r3 : target
- // -- r2 : number of parameters on the stack (not including the receiver)
+ // -- r2 : number of parameters on the stack
// -- r4 : arguments list (a FixedArray)
// -- r6 : len (number of elements to push from args)
// -- r5 : new.target (for [[Construct]])
@@ -2019,7 +2454,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
+ // -- r2 : the number of arguments
// -- r5 : the new.target (for [[Construct]] calls)
// -- r3 : the target to call (can be any Object)
// -- r4 : start index (to support rest parameters)
@@ -2047,12 +2482,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ LoadU64(r7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ SubS64(r7, r7, Operand(kJSArgcReceiverSlots));
+ }
__ SubS64(r7, r7, r4);
__ ble(&stack_done);
{
// ----------- S t a t e -------------
- // -- r2 : the number of arguments already in the stack (not including the
- // receiver)
+ // -- r2 : the number of arguments already in the stack
// -- r3 : the target to call (can be any Object)
// -- r4 : start index (to support rest parameters)
// -- r5 : the new.target (for [[Construct]] calls)
@@ -2109,7 +2546,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
+ // -- r2 : the number of arguments
// -- r3 : the function to call (checked to be a JSFunction)
// -----------------------------------
__ AssertFunction(r3);
@@ -2135,7 +2572,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bne(&done_convert);
{
// ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
+ // -- r2 : the number of arguments
// -- r3 : the function to call (checked to be a JSFunction)
// -- r4 : the shared function info.
// -- cp : the function context.
@@ -2188,7 +2625,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bind(&done_convert);
// ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
+ // -- r2 : the number of arguments
// -- r3 : the function to call (checked to be a JSFunction)
// -- r4 : the shared function info.
// -- cp : the function context.
@@ -2211,7 +2648,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
+ // -- r2 : the number of arguments
// -- r3 : target (checked to be a JSBoundFunction)
// -- r5 : new.target (only in case of [[Construct]])
// -----------------------------------
@@ -2225,7 +2662,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ beq(&no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
+ // -- r2 : the number of arguments
// -- r3 : target (checked to be a JSBoundFunction)
// -- r4 : the [[BoundArguments]] (implemented as FixedArray)
// -- r5 : new.target (only in case of [[Construct]])
@@ -2282,7 +2719,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
+ // -- r2 : the number of arguments
// -- r3 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r3);
@@ -2305,7 +2742,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
+ // -- r2 : the number of arguments
// -- r3 : the target to call (can be any Object).
// -----------------------------------
Register argc = r2;
@@ -2376,7 +2813,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
+ // -- r2 : the number of arguments
// -- r3 : the constructor to call (checked to be a JSFunction)
// -- r5 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2407,7 +2844,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
+ // -- r2 : the number of arguments
// -- r3 : the function to call (checked to be a JSBoundFunction)
// -- r5 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2434,7 +2871,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r2 : the number of arguments (not including the receiver)
+ // -- r2 : the number of arguments
// -- r3 : the constructor to call (can be any Object)
// -- r5 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
@@ -3460,22 +3897,31 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, true);
+}
+
+#if ENABLE_SPARKPLUG
+void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ LoadU64(kContextRegister,
+ MemOperand(fp, BaselineFrameConstants::kContextOffset));
+ return OnStackReplacement(masm, false);
+}
+#endif
+
void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
MacroAssembler* masm) {
- // Implement on this platform, https://crrev.com/c/2695591.
- __ bkpt(0);
+ Generate_BaselineOrInterpreterEntry(masm, false);
}
void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
MacroAssembler* masm) {
- // Implement on this platform, https://crrev.com/c/2695591.
- __ bkpt(0);
+ Generate_BaselineOrInterpreterEntry(masm, true);
}
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
- // Implement on this platform, https://crrev.com/c/2800112.
- __ bkpt(0);
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index 9dcecdab33..3153799793 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -199,6 +199,9 @@ void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, Builtin builtin,
Code code) {
DCHECK_EQ(builtin, code.builtin_id());
builtins->set_code(builtin, code);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ builtins->set_codet(builtin, ToCodeT(code));
+ }
}
// static
@@ -220,7 +223,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
// Replace references from all builtin code objects to placeholders.
Builtins* builtins = isolate->builtins();
DisallowGarbageCollection no_gc;
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ CodePageCollectionMemoryModificationScope modification_scope(isolate->heap());
static const int kRelocMask =
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
@@ -230,6 +233,8 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
Code code = builtins->code(builtin);
+ isolate->heap()->UnprotectAndRegisterMemoryChunk(
+ code, UnprotectMemoryOrigin::kMainThread);
bool flush_icache = false;
for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq
index 9fe503f5f5..8765a7b8ac 100644
--- a/deps/v8/src/builtins/torque-internal.tq
+++ b/deps/v8/src/builtins/torque-internal.tq
@@ -24,6 +24,20 @@ macro Subslice<T: type>(slice: MutableSlice<T>, start: intptr, length: intptr):
slice.object, offset, length);
}
+namespace unsafe {
+
+macro AddOffset<T: type>(ref: &T, offset: intptr): &T {
+ return torque_internal::unsafe::NewReference<T>(
+ ref.object, ref.offset + torque_internal::TimesSizeOf<T>(offset));
+}
+
+macro AddOffset<T: type>(ref: const &T, offset: intptr): const &T {
+ return torque_internal::unsafe::NewReference<T>(
+ ref.object, ref.offset + torque_internal::TimesSizeOf<T>(offset));
+}
+
+} // namespace unsafe
+
namespace torque_internal {
// Unsafe is a marker that we require to be passed when calling internal APIs
// that might lead to unsoundness when used incorrectly. Unsafe markers should
@@ -73,12 +87,15 @@ extern macro GCUnsafeReferenceToRawPtr(
struct Slice<T: type, Reference: type> {
macro TryAtIndex(index: intptr): Reference labels OutOfBounds {
if (Convert<uintptr>(index) < Convert<uintptr>(this.length)) {
- return unsafe::NewReference<T>(
- this.object, this.offset + TimesSizeOf<T>(index));
+ return this.UncheckedAtIndex(index);
} else {
goto OutOfBounds;
}
}
+ macro UncheckedAtIndex(index: intptr): Reference {
+ return unsafe::NewReference<T>(
+ this.object, this.offset + TimesSizeOf<T>(index));
+ }
macro AtIndex(index: intptr): Reference {
return this.TryAtIndex(index) otherwise unreachable;
@@ -317,6 +334,16 @@ intrinsic %IndexedFieldLength<T: type>(o: T, f: constexpr string): intptr;
intrinsic %FieldSlice<T: type, TSlice: type>(
o: T, f: constexpr string): TSlice;
+extern macro GetPendingMessage(): TheHole|JSMessageObject;
+extern macro SetPendingMessage(TheHole | JSMessageObject): void;
+
+// This is implicitly performed at the beginning of Torque catch-blocks.
+macro GetAndResetPendingMessage(): TheHole|JSMessageObject {
+ const message = GetPendingMessage();
+ SetPendingMessage(TheHole);
+ return message;
+}
+
} // namespace torque_internal
// Indicates that an array-field should not be initialized.
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index 45a396afe6..9004b32ef7 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -9,7 +9,7 @@ extern builtin IterableToListMayPreserveHoles(
Context, Object, Callable): JSArray;
extern macro TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
- implicit context: Context)(uintptr): JSArrayBuffer;
+ implicit context: Context)(): JSArrayBuffer;
extern macro CodeStubAssembler::AllocateByteArray(uintptr): ByteArray;
extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor(
implicit context: Context)(JSTypedArray): JSFunction;
@@ -93,7 +93,7 @@ transitioning macro TypedArrayInitialize(implicit context: Context)(
if (byteLength > kMaxTypedArrayInHeap) goto AllocateOffHeap;
- const buffer = AllocateEmptyOnHeapBuffer(byteLength);
+ const buffer = AllocateEmptyOnHeapBuffer();
const isOnHeap: constexpr bool = true;
const isLengthTracking: constexpr bool = false;
@@ -292,7 +292,7 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)(
// in the step 12 branch.
newByteLength = bufferByteLength - offset;
newLength = elementsInfo.CalculateLength(newByteLength)
- otherwise IfInvalidLength;
+ otherwise IfInvalidOffset;
// 12. Else,
} else {
@@ -335,6 +335,7 @@ transitioning macro TypedArrayCreateByLength(implicit context: Context)(
// ValidateTypedArray currently returns the array, not the ViewBuffer.
const newTypedArray: JSTypedArray =
ValidateTypedArray(context, newTypedArrayObj, methodName);
+ // TODO(v8:11111): bit_field should be initialized to 0.
newTypedArray.bit_field.is_length_tracking = false;
newTypedArray.bit_field.is_backed_by_rab = false;
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 5ddb1072ae..c242851de2 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -180,17 +180,18 @@ extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
JSTypedArray, ByteArray, uintptr): void;
extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
JSTypedArray, RawPtr, uintptr): void;
-extern macro IsJSTypedArrayDetachedOrOutOfBounds(JSTypedArray):
- never labels Detached, NotDetached;
+extern macro IsJSArrayBufferViewDetachedOrOutOfBounds(JSArrayBufferView):
+ never labels DetachedOrOutOfBounds, NotDetachedNorOutOfBounds;
// AttachedJSTypedArray guards that the array's buffer is not detached.
transient type AttachedJSTypedArray extends JSTypedArray;
macro EnsureAttached(array: JSTypedArray): AttachedJSTypedArray
- labels Detached {
+ labels DetachedOrOutOfBounds {
try {
- IsJSTypedArrayDetachedOrOutOfBounds(array) otherwise Detached, NotDetached;
- } label NotDetached {
+ IsJSArrayBufferViewDetachedOrOutOfBounds(array)
+ otherwise DetachedOrOutOfBounds, NotDetachedNorOutOfBounds;
+ } label NotDetachedNorOutOfBounds {
return %RawDownCast<AttachedJSTypedArray>(array);
}
}
diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq
index aadb17c3a0..cfe17018ba 100644
--- a/deps/v8/src/builtins/wasm.tq
+++ b/deps/v8/src/builtins/wasm.tq
@@ -64,7 +64,7 @@ extern macro WasmBuiltinsAssembler::LoadContextFromInstance(WasmInstanceObject):
NativeContext;
extern macro WasmBuiltinsAssembler::LoadTablesFromInstance(WasmInstanceObject):
FixedArray;
-extern macro WasmBuiltinsAssembler::LoadExternalFunctionsFromInstance(
+extern macro WasmBuiltinsAssembler::LoadInternalFunctionsFromInstance(
WasmInstanceObject): FixedArray;
extern macro WasmBuiltinsAssembler::LoadManagedObjectMapsFromInstance(
WasmInstanceObject): FixedArray;
@@ -227,7 +227,7 @@ builtin WasmTableSet(tableIndex: intptr, index: int32, value: Object): Object {
builtin WasmRefFunc(index: uint32): Object {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
try {
- const table: FixedArray = LoadExternalFunctionsFromInstance(instance);
+ const table: FixedArray = LoadInternalFunctionsFromInstance(instance);
if (table == Undefined) goto CallRuntime;
const functionIndex: intptr = Signed(ChangeUint32ToWord(index));
const result: Object = LoadFixedArrayElement(table, functionIndex);
@@ -475,16 +475,11 @@ struct TargetAndInstance {
instance: HeapObject; // WasmInstanceObject or WasmApiFunctionRef
}
-macro GetTargetAndInstance(funcref: JSFunction): TargetAndInstance {
- const sfi = funcref.shared_function_info;
- dcheck(Is<WasmFunctionData>(sfi.function_data));
- const funcData = UnsafeCast<WasmFunctionData>(sfi.function_data);
- const ref = funcData.ref;
- let target = funcData.foreign_address_ptr;
+macro GetTargetAndInstance(funcref: WasmInternalFunction): TargetAndInstance {
+ const ref = funcref.ref;
+ let target = funcref.foreign_address_ptr;
if (Signed(target) == IntPtrConstant(0)) {
- const wrapper =
- UnsafeCast<WasmJSFunctionData>(funcData).wasm_to_js_wrapper_code;
- target = GetCodeEntry(wrapper);
+ target = GetCodeEntry(funcref.code);
}
return TargetAndInstance{target: target, instance: ref};
}
@@ -493,19 +488,23 @@ macro GetTargetAndInstance(funcref: JSFunction): TargetAndInstance {
// Two slots per call_ref instruction. These slots' values can be:
// - uninitialized: (undefined, <unused>). Note: we use {undefined} as the
// sentinel as an optimization, as it's the default value for FixedArrays.
-// - monomorphic: (funcref, call_ref_data)
+// - monomorphic: (funcref, count (smi)). The second slot is a counter for how
+// often the funcref in the first slot has been seen.
// - polymorphic: (fixed_array, <unused>). In this case, the array
-// contains 2..4 pairs (funcref, call_ref_data) (like monomorphic data).
+// contains 2..4 pairs (funcref, count (smi)) (like monomorphic data).
// - megamorphic: ("megamorphic" sentinel, <unused>)
-
+//
+// TODO(rstz): The counter might overflow if it exceeds the range of a Smi.
+// This can lead to incorrect inlining decisions.
builtin CallRefIC(
- vector: FixedArray, index: intptr, funcref: JSFunction): TargetAndInstance {
+ vector: FixedArray, index: intptr,
+ funcref: WasmInternalFunction): TargetAndInstance {
const value = vector.objects[index];
if (value == funcref) {
// Monomorphic hit. Check for this case first to maximize its performance.
- const data = UnsafeCast<CallRefData>(vector.objects[index + 1]);
- data.count = data.count + 1;
- return TargetAndInstance{target: data.target, instance: data.instance};
+ const count = UnsafeCast<Smi>(vector.objects[index + 1]) + SmiConstant(1);
+ vector.objects[index + 1] = count;
+ return GetTargetAndInstance(funcref);
}
// Check for polymorphic hit; its performance is second-most-important.
if (Is<FixedArray>(value)) {
@@ -513,9 +512,9 @@ builtin CallRefIC(
for (let i: intptr = 0; i < entries.length_intptr; i += 2) {
if (entries.objects[i] == funcref) {
// Polymorphic hit.
- const data = UnsafeCast<CallRefData>(entries.objects[i + 1]);
- data.count = data.count + 1;
- return TargetAndInstance{target: data.target, instance: data.instance};
+ const count = UnsafeCast<Smi>(entries.objects[i + 1]) + SmiConstant(1);
+ entries.objects[i + 1] = count;
+ return GetTargetAndInstance(funcref);
}
}
}
@@ -523,10 +522,8 @@ builtin CallRefIC(
// instance. They all fall through to returning the computed data.
const result = GetTargetAndInstance(funcref);
if (TaggedEqual(value, Undefined)) {
- const data = new
- CallRefData{instance: result.instance, target: result.target, count: 1};
vector.objects[index] = funcref;
- vector.objects[index + 1] = data;
+ vector.objects[index + 1] = SmiConstant(1);
} else if (Is<FixedArray>(value)) {
// Polymorphic miss.
const entries = UnsafeCast<FixedArray>(value);
@@ -534,8 +531,6 @@ builtin CallRefIC(
vector.objects[index] = ic::kMegamorphicSymbol;
vector.objects[index + 1] = ic::kMegamorphicSymbol;
} else {
- const data = new
- CallRefData{instance: result.instance, target: result.target, count: 1};
const newEntries = UnsafeCast<FixedArray>(AllocateFixedArray(
ElementsKind::PACKED_ELEMENTS, entries.length_intptr + 2,
AllocationFlag::kNone));
@@ -544,22 +539,20 @@ builtin CallRefIC(
}
const newIndex = entries.length_intptr;
newEntries.objects[newIndex] = funcref;
- newEntries.objects[newIndex + 1] = data;
+ newEntries.objects[newIndex + 1] = SmiConstant(1);
vector.objects[index] = newEntries;
}
- } else if (Is<JSFunction>(value)) {
+ } else if (Is<WasmInternalFunction>(value)) {
// Monomorphic miss.
- const data = new
- CallRefData{instance: result.instance, target: result.target, count: 1};
const newEntries = UnsafeCast<FixedArray>(AllocateFixedArray(
ElementsKind::PACKED_ELEMENTS, 4, AllocationFlag::kNone));
newEntries.objects[0] = value;
newEntries.objects[1] = vector.objects[index + 1];
newEntries.objects[2] = funcref;
- newEntries.objects[3] = data;
+ newEntries.objects[3] = SmiConstant(1);
vector.objects[index] = newEntries;
- // Clear the old pointer to the first entry's data object; the specific
- // value we write doesn't matter.
+ // Clear the first entry's counter; the specific value we write doesn't
+ // matter.
vector.objects[index + 1] = Undefined;
}
// The "ic::IsMegamorphic(value)" case doesn't need to do anything.
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 7beedbc3fd..125614fa3d 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -78,7 +78,7 @@ static void GenerateTailCallToReturnedCode(
__ Pop(kJavaScriptCallTargetRegister);
}
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ JumpCodeObject(rcx, jump_mode);
+ __ JumpCodeTObject(rcx, jump_mode);
}
namespace {
@@ -212,8 +212,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movl(rbx, FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
- __ JumpIfIsInRange(rbx, kDefaultDerivedConstructor, kDerivedConstructor,
- &not_create_implicit_receiver, Label::kNear);
+ __ JumpIfIsInRange(
+ rbx, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
+ static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
+ &not_create_implicit_receiver, Label::kNear);
// If not derived class constructor: Allocate the new receiver object.
__ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
@@ -948,7 +950,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match;
- __ Cmp(actual_marker, expected_marker);
+ __ Cmp(actual_marker, static_cast<int>(expected_marker));
__ j(not_equal, &no_match);
GenerateTailCallToReturnedCode(masm, function_id);
__ bind(&no_match);
@@ -2974,15 +2976,9 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// -------------------------------------------
// Compute offsets and prepare for GC.
// -------------------------------------------
- // We will have to save a value indicating the GC the number
- // of values on the top of the stack that have to be scanned before calling
- // the Wasm function.
- constexpr int kFrameMarkerOffset = -kSystemPointerSize;
- constexpr int kGCScanSlotCountOffset =
- kFrameMarkerOffset - kSystemPointerSize;
// The number of parameters passed to this function.
constexpr int kInParamCountOffset =
- kGCScanSlotCountOffset - kSystemPointerSize;
+ BuiltinWasmWrapperConstants::kGCScanSlotCountOffset - kSystemPointerSize;
// The number of parameters according to the signature.
constexpr int kParamCountOffset = kInParamCountOffset - kSystemPointerSize;
constexpr int kReturnCountOffset = kParamCountOffset - kSystemPointerSize;
@@ -3389,17 +3385,20 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
Register function_entry = function_data;
Register scratch = r12;
+ __ LoadAnyTaggedField(
+ function_entry,
+ FieldOperand(function_data, WasmExportedFunctionData::kInternalOffset));
__ LoadExternalPointerField(
function_entry,
- FieldOperand(function_data,
- WasmExportedFunctionData::kForeignAddressOffset),
+ FieldOperand(function_entry, WasmInternalFunction::kForeignAddressOffset),
kForeignForeignAddressTag, scratch);
function_data = no_reg;
scratch = no_reg;
// We set the indicating value for the GC to the proper one for Wasm call.
constexpr int kWasmCallGCScanSlotCount = 0;
- __ Move(MemOperand(rbp, kGCScanSlotCountOffset), kWasmCallGCScanSlotCount);
+ __ Move(MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset),
+ kWasmCallGCScanSlotCount);
// -------------------------------------------
// Call the Wasm function.
@@ -3482,10 +3481,12 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// The builtin expects the parameter to be in register param = rax.
constexpr int kBuiltinCallGCScanSlotCount = 2;
- PrepareForBuiltinCall(masm, MemOperand(rbp, kGCScanSlotCountOffset),
- kBuiltinCallGCScanSlotCount, current_param, param_limit,
- current_int_param_slot, current_float_param_slot,
- valuetypes_array_ptr, wasm_instance, function_data);
+ PrepareForBuiltinCall(
+ masm,
+ MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset),
+ kBuiltinCallGCScanSlotCount, current_param, param_limit,
+ current_int_param_slot, current_float_param_slot, valuetypes_array_ptr,
+ wasm_instance, function_data);
Label param_kWasmI32_not_smi;
Label param_kWasmI64;
@@ -3632,7 +3633,8 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// -------------------------------------------
__ bind(&compile_wrapper);
// Enable GC.
- MemOperand GCScanSlotPlace = MemOperand(rbp, kGCScanSlotCountOffset);
+ MemOperand GCScanSlotPlace =
+ MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
__ Move(GCScanSlotPlace, 4);
// Save registers to the stack.
__ pushq(wasm_instance);
@@ -3656,6 +3658,7 @@ namespace {
// Helper function for WasmReturnPromiseOnSuspend.
void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf) {
__ movq(rsp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
+ __ movq(rbp, MemOperand(jmpbuf, wasm::kJmpBufFpOffset));
// The stack limit is set separately under the ExecutionAccess lock.
// TODO(thibaudm): Reload live registers.
}
@@ -3663,7 +3666,7 @@ void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf) {
void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
// Set up the stackframe.
- __ EnterFrame(StackFrame::JS_TO_WASM);
+ __ EnterFrame(StackFrame::RETURN_PROMISE_ON_SUSPEND);
// Parameters.
Register closure = kJSFunctionRegister; // rdi
@@ -3672,14 +3675,11 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
__ decq(param_count);
}
- constexpr int kFrameMarkerOffset = -kSystemPointerSize;
- constexpr int kParamCountOffset = kFrameMarkerOffset - kSystemPointerSize;
- // The frame marker is not included in the slot count.
- constexpr int kNumSpillSlots =
- -(kParamCountOffset - kFrameMarkerOffset) / kSystemPointerSize;
- __ subq(rsp, Immediate(kNumSpillSlots * kSystemPointerSize));
+ __ subq(rsp, Immediate(ReturnPromiseOnSuspendFrameConstants::kSpillAreaSize));
- __ movq(MemOperand(rbp, kParamCountOffset), param_count);
+ __ movq(
+ MemOperand(rbp, ReturnPromiseOnSuspendFrameConstants::kParamCountOffset),
+ param_count);
// -------------------------------------------
// Get the instance and wasm call target.
@@ -3707,10 +3707,7 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
// -------------------------------------------
Register active_continuation = rax;
Register foreign_jmpbuf = rbx;
- __ LoadAnyTaggedField(
- active_continuation,
- FieldOperand(wasm_instance,
- WasmInstanceObject::kActiveContinuationOffset));
+ __ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
__ LoadAnyTaggedField(
foreign_jmpbuf,
FieldOperand(active_continuation, WasmContinuationObject::kJmpbufOffset));
@@ -3719,6 +3716,7 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
jmpbuf, FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset),
kForeignForeignAddressTag, r8);
__ movq(MemOperand(jmpbuf, wasm::kJmpBufSpOffset), rsp);
+ __ movq(MemOperand(jmpbuf, wasm::kJmpBufFpOffset), rbp);
Register stack_limit_address = rcx;
__ movq(stack_limit_address,
FieldOperand(wasm_instance,
@@ -3735,11 +3733,12 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
// -------------------------------------------
// Allocate a new continuation.
// -------------------------------------------
+ MemOperand GCScanSlotPlace =
+ MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
+ __ Move(GCScanSlotPlace, 2);
__ Push(wasm_instance);
__ Push(function_data);
- __ Push(wasm_instance);
__ Move(kContextRegister, Smi::zero());
- // TODO(thibaudm): Handle GC.
__ CallRuntime(Runtime::kWasmAllocateContinuation);
__ Pop(function_data);
__ Pop(wasm_instance);
@@ -3759,9 +3758,9 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
target_jmpbuf,
FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset),
kForeignForeignAddressTag, r8);
+ __ Move(GCScanSlotPlace, 0);
// Switch stack!
LoadJumpBuffer(masm, target_jmpbuf);
- __ movq(rbp, rsp); // New stack, there is no frame yet.
foreign_jmpbuf = no_reg;
target_jmpbuf = no_reg;
// live: [rsi, rdi]
@@ -3778,10 +3777,12 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset()));
__ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(1));
Register function_entry = function_data;
+ __ LoadAnyTaggedField(
+ function_entry,
+ FieldOperand(function_entry, WasmExportedFunctionData::kInternalOffset));
__ LoadExternalPointerField(
function_entry,
- FieldOperand(function_data,
- WasmExportedFunctionData::kForeignAddressOffset),
+ FieldOperand(function_data, WasmInternalFunction::kForeignAddressOffset),
kForeignForeignAddressTag, r8);
__ Push(wasm_instance);
__ call(function_entry);
@@ -3800,10 +3801,7 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
// Reload parent continuation.
// -------------------------------------------
active_continuation = rbx;
- __ LoadAnyTaggedField(
- active_continuation,
- FieldOperand(wasm_instance,
- WasmInstanceObject::kActiveContinuationOffset));
+ __ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
Register parent = rdx;
__ LoadAnyTaggedField(
parent,
@@ -3814,20 +3812,7 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
// -------------------------------------------
// Update instance active continuation.
// -------------------------------------------
- Register object = WriteBarrierDescriptor::ObjectRegister();
- Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
- DCHECK_EQ(object, rdi);
- DCHECK((slot_address == rbx || slot_address == r8));
- // Save reg clobbered by the write barrier.
- __ movq(rax, parent);
- __ movq(object, wasm_instance);
- __ StoreTaggedField(
- FieldOperand(object, WasmInstanceObject::kActiveContinuationOffset),
- parent);
- __ RecordWriteField(object, WasmInstanceObject::kActiveContinuationOffset,
- parent, slot_address, SaveFPRegsMode::kIgnore);
- // Restore reg clobbered by the write barrier.
- __ movq(parent, rax);
+ __ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), parent);
foreign_jmpbuf = rax;
__ LoadAnyTaggedField(
foreign_jmpbuf,
@@ -3838,9 +3823,8 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
kForeignForeignAddressTag, r8);
// Switch stack!
LoadJumpBuffer(masm, jmpbuf);
- __ leaq(rbp, Operand(rsp, (kNumSpillSlots + 1) * kSystemPointerSize));
+ __ Move(GCScanSlotPlace, 1);
__ Push(wasm_instance); // Spill.
- __ Push(wasm_instance); // First arg.
__ Move(kContextRegister, Smi::zero());
__ CallRuntime(Runtime::kWasmSyncStackLimit);
__ Pop(wasm_instance);
@@ -3852,8 +3836,10 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
// -------------------------------------------
// Epilogue.
// -------------------------------------------
- __ movq(param_count, MemOperand(rbp, kParamCountOffset));
- __ LeaveFrame(StackFrame::JS_TO_WASM);
+ __ movq(
+ param_count,
+ MemOperand(rbp, ReturnPromiseOnSuspendFrameConstants::kParamCountOffset));
+ __ LeaveFrame(StackFrame::RETURN_PROMISE_ON_SUSPEND);
__ DropArguments(param_count, r8, TurboAssembler::kCountIsInteger,
TurboAssembler::kCountExcludesReceiver);
__ ret(0);
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index a3c3ffdba6..bf654f6789 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -10,3 +10,5 @@ mslekova@chromium.org
mvstanton@chromium.org
nicohartmann@chromium.org
zhin@chromium.org
+
+per-file compiler.*=marja@chromium.org
diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h
index 0ee81b2f94..9080b3e0b3 100644
--- a/deps/v8/src/codegen/arm/assembler-arm-inl.h
+++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h
@@ -195,7 +195,7 @@ Operand::Operand(const ExternalReference& f)
value_.immediate = static_cast<int32_t>(f.address());
}
-Operand::Operand(Smi value) : rmode_(RelocInfo::NONE) {
+Operand::Operand(Smi value) : rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 38d691007f..e434cac32d 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -1132,7 +1132,7 @@ bool MustOutputRelocInfo(RelocInfo::Mode rmode, const Assembler* assembler) {
if (RelocInfo::IsOnlyForSerializer(rmode)) {
if (assembler->predictable_code_size()) return true;
return assembler->options().record_reloc_info_for_serialization;
- } else if (RelocInfo::IsNone(rmode)) {
+ } else if (RelocInfo::IsNoInfo(rmode)) {
return false;
}
return true;
@@ -1464,7 +1464,7 @@ int Assembler::branch_offset(Label* L) {
// Branch instructions.
void Assembler::b(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
- if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode);
+ if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
const bool b_imm_check = is_int24(imm24);
@@ -1478,7 +1478,7 @@ void Assembler::b(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
}
void Assembler::bl(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
- if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode);
+ if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
const bool bl_imm_check = is_int24(imm24);
@@ -5226,7 +5226,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
// blocked before using dd.
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -5240,7 +5240,7 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
// blocked before using dq.
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index a7d224a094..4cce50f795 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -87,7 +87,7 @@ class V8_EXPORT_PRIVATE Operand {
public:
// immediate
V8_INLINE explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rmode_(rmode) {
value_.immediate = immediate;
}
@@ -405,9 +405,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Branch instructions
void b(int branch_offset, Condition cond = al,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO);
void bl(int branch_offset, Condition cond = al,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO);
void blx(int branch_offset); // v5 and above
void blx(Register target, Condition cond = al); // v5 and above
void bx(Register target, Condition cond = al); // v5 and above, plus v4t
@@ -1095,9 +1095,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// called before any use of db/dd/dq/dp to ensure that constant pools
// are not emitted as part of the tables generated.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dd(data, rmode);
}
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 5c46c64b3e..95eb8795e9 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -2022,7 +2022,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index 73efa12002..e43aec485f 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -800,7 +800,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// In-place weak references.
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index c5a1d4fd8a..40b9a94dd8 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -192,7 +192,7 @@ inline VRegister CPURegister::Q() const {
// Default initializer is for int types
template <typename T>
struct ImmediateInitializer {
- static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; }
+ static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NO_INFO; }
static inline int64_t immediate_for(T t) {
STATIC_ASSERT(sizeof(T) <= 8);
STATIC_ASSERT(std::is_integral<T>::value || std::is_enum<T>::value);
@@ -202,7 +202,7 @@ struct ImmediateInitializer {
template <>
struct ImmediateInitializer<Smi> {
- static inline RelocInfo::Mode rmode_for(Smi t) { return RelocInfo::NONE; }
+ static inline RelocInfo::Mode rmode_for(Smi t) { return RelocInfo::NO_INFO; }
static inline int64_t immediate_for(Smi t) {
return static_cast<int64_t>(t.ptr());
}
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index 627c7ae021..fd5cd326ec 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -314,7 +314,7 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const {
return assembler->options().record_reloc_info_for_serialization;
}
- return !RelocInfo::IsNone(rmode);
+ return !RelocInfo::IsNoInfo(rmode);
}
// Assembler
@@ -4375,13 +4375,15 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
void Assembler::near_jump(int offset, RelocInfo::Mode rmode) {
BlockPoolsScope no_pool_before_b_instr(this);
- if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
+ if (!RelocInfo::IsNoInfo(rmode))
+ RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
b(offset);
}
void Assembler::near_call(int offset, RelocInfo::Mode rmode) {
BlockPoolsScope no_pool_before_bl_instr(this);
- if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
+ if (!RelocInfo::IsNoInfo(rmode))
+ RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
bl(offset);
}
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index dac90f8058..df8fadf1f1 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -2065,27 +2065,27 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Required by V8.
void db(uint8_t data) { dc8(data); }
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
BlockPoolsScope no_pool_scope(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
dc32(data);
}
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
BlockPoolsScope no_pool_scope(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
}
dc64(data);
}
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
BlockPoolsScope no_pool_scope(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index bcf2e4574a..58920c343a 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -1655,7 +1655,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
Ldr(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Br(kOffHeapTrampolineRegister);
}
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 165d702c31..7c972bd307 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -1911,7 +1911,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index ae6c4c9200..29a4212aac 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -547,8 +547,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 50711046e6..6519520278 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -39,6 +39,7 @@
#include <memory>
#include <unordered_map>
+#include "src/base/macros.h"
#include "src/base/memory.h"
#include "src/codegen/code-comments.h"
#include "src/codegen/cpu-features.h"
@@ -64,7 +65,7 @@ using base::WriteUnalignedValue;
// Forward declarations.
class EmbeddedData;
-class InstructionStream;
+class OffHeapInstructionStream;
class Isolate;
class SCTableReference;
class SourcePosition;
@@ -387,7 +388,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
void RequestHeapObject(HeapObjectRequest request);
bool ShouldRecordRelocInfo(RelocInfo::Mode rmode) const {
- DCHECK(!RelocInfo::IsNone(rmode));
+ DCHECK(!RelocInfo::IsNoInfo(rmode));
if (options().disable_reloc_info_for_patching) return false;
if (RelocInfo::IsOnlyForSerializer(rmode) &&
!options().record_reloc_info_for_serialization && !FLAG_debug_code) {
@@ -470,7 +471,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope {
#ifdef V8_CODE_COMMENTS
#define ASM_CODE_COMMENT(asm) ASM_CODE_COMMENT_STRING(asm, __func__)
#define ASM_CODE_COMMENT_STRING(asm, comment) \
- AssemblerBase::CodeComment asm_code_comment(asm, comment)
+ AssemblerBase::CodeComment UNIQUE_IDENTIFIER(asm_code_comment)(asm, comment)
#else
#define ASM_CODE_COMMENT(asm)
#define ASM_CODE_COMMENT_STRING(asm, ...)
diff --git a/deps/v8/src/codegen/code-reference.cc b/deps/v8/src/codegen/code-reference.cc
index 0c550fa0d3..27ff425a2f 100644
--- a/deps/v8/src/codegen/code-reference.cc
+++ b/deps/v8/src/codegen/code-reference.cc
@@ -86,26 +86,26 @@ struct CodeDescOps {
ret CodeReference::method() const { \
DCHECK(!is_null()); \
switch (kind_) { \
- case JS: \
+ case Kind::JS: \
return JSOps{js_code_}.method(); \
- case WASM: \
+ case Kind::WASM: \
return WasmOps{wasm_code_}.method(); \
- case CODE_DESC: \
+ case Kind::CODE_DESC: \
return CodeDescOps{code_desc_}.method(); \
default: \
UNREACHABLE(); \
} \
}
#else
-#define DISPATCH(ret, method) \
- ret CodeReference::method() const { \
- DCHECK(!is_null()); \
- DCHECK(kind_ == JS || kind_ == CODE_DESC); \
- if (kind_ == JS) { \
- return JSOps{js_code_}.method(); \
- } else { \
- return CodeDescOps{code_desc_}.method(); \
- } \
+#define DISPATCH(ret, method) \
+ ret CodeReference::method() const { \
+ DCHECK(!is_null()); \
+ DCHECK(kind_ == Kind::JS || kind_ == Kind::CODE_DESC); \
+ if (kind_ == Kind::JS) { \
+ return JSOps{js_code_}.method(); \
+ } else { \
+ return CodeDescOps{code_desc_}.method(); \
+ } \
}
#endif // V8_ENABLE_WEBASSEMBLY
diff --git a/deps/v8/src/codegen/code-reference.h b/deps/v8/src/codegen/code-reference.h
index 8ff3581689..9b54b6074e 100644
--- a/deps/v8/src/codegen/code-reference.h
+++ b/deps/v8/src/codegen/code-reference.h
@@ -20,12 +20,13 @@ class WasmCode;
class CodeReference {
public:
- CodeReference() : kind_(NONE), null_(nullptr) {}
+ CodeReference() : kind_(Kind::NONE), null_(nullptr) {}
explicit CodeReference(const wasm::WasmCode* wasm_code)
- : kind_(WASM), wasm_code_(wasm_code) {}
+ : kind_(Kind::WASM), wasm_code_(wasm_code) {}
explicit CodeReference(const CodeDesc* code_desc)
- : kind_(CODE_DESC), code_desc_(code_desc) {}
- explicit CodeReference(Handle<Code> js_code) : kind_(JS), js_code_(js_code) {}
+ : kind_(Kind::CODE_DESC), code_desc_(code_desc) {}
+ explicit CodeReference(Handle<Code> js_code)
+ : kind_(Kind::JS), js_code_(js_code) {}
Address constant_pool() const;
Address instruction_start() const;
@@ -37,22 +38,22 @@ class CodeReference {
Address code_comments() const;
int code_comments_size() const;
- bool is_null() const { return kind_ == NONE; }
- bool is_js() const { return kind_ == JS; }
- bool is_wasm_code() const { return kind_ == WASM; }
+ bool is_null() const { return kind_ == Kind::NONE; }
+ bool is_js() const { return kind_ == Kind::JS; }
+ bool is_wasm_code() const { return kind_ == Kind::WASM; }
Handle<Code> as_js_code() const {
- DCHECK_EQ(JS, kind_);
+ DCHECK_EQ(Kind::JS, kind_);
return js_code_;
}
const wasm::WasmCode* as_wasm_code() const {
- DCHECK_EQ(WASM, kind_);
+ DCHECK_EQ(Kind::WASM, kind_);
return wasm_code_;
}
private:
- enum { NONE, JS, WASM, CODE_DESC } kind_;
+ enum class Kind { NONE, JS, WASM, CODE_DESC } kind_;
union {
std::nullptr_t null_;
const wasm::WasmCode* wasm_code_;
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 4a9c06bdd8..db50f7d3e4 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -22,6 +22,7 @@
#include "src/objects/descriptor-array.h"
#include "src/objects/function-kind.h"
#include "src/objects/heap-number.h"
+#include "src/objects/instance-type.h"
#include "src/objects/js-generator.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
@@ -1539,16 +1540,21 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(TNode<Object> value,
}
}
-#ifdef V8_CAGED_POINTERS
-
-TNode<CagedPtrT> CodeStubAssembler::LoadCagedPointerFromObject(
+TNode<RawPtrT> CodeStubAssembler::LoadCagedPointerFromObject(
TNode<HeapObject> object, TNode<IntPtrT> field_offset) {
- return LoadObjectField<CagedPtrT>(object, field_offset);
+#ifdef V8_CAGED_POINTERS
+ return ReinterpretCast<RawPtrT>(
+ LoadObjectField<CagedPtrT>(object, field_offset));
+#else
+ return LoadObjectField<RawPtrT>(object, field_offset);
+#endif // V8_CAGED_POINTERS
}
void CodeStubAssembler::StoreCagedPointerToObject(TNode<HeapObject> object,
TNode<IntPtrT> offset,
- TNode<CagedPtrT> pointer) {
+ TNode<RawPtrT> pointer) {
+#ifdef V8_CAGED_POINTERS
+ TNode<CagedPtrT> caged_pointer = ReinterpretCast<CagedPtrT>(pointer);
#ifdef DEBUG
// Verify pointer points into the cage.
TNode<ExternalReference> cage_base_address =
@@ -1557,13 +1563,26 @@ void CodeStubAssembler::StoreCagedPointerToObject(TNode<HeapObject> object,
ExternalConstant(ExternalReference::virtual_memory_cage_end_address());
TNode<UintPtrT> cage_base = Load<UintPtrT>(cage_base_address);
TNode<UintPtrT> cage_end = Load<UintPtrT>(cage_end_address);
- CSA_CHECK(this, UintPtrGreaterThanOrEqual(pointer, cage_base));
- CSA_CHECK(this, UintPtrLessThan(pointer, cage_end));
-#endif
- StoreObjectFieldNoWriteBarrier<CagedPtrT>(object, offset, pointer);
+ CSA_DCHECK(this, UintPtrGreaterThanOrEqual(caged_pointer, cage_base));
+ CSA_DCHECK(this, UintPtrLessThan(caged_pointer, cage_end));
+#endif // DEBUG
+ StoreObjectFieldNoWriteBarrier<CagedPtrT>(object, offset, caged_pointer);
+#else
+ StoreObjectFieldNoWriteBarrier<RawPtrT>(object, offset, pointer);
+#endif // V8_CAGED_POINTERS
}
+TNode<RawPtrT> CodeStubAssembler::EmptyBackingStoreBufferConstant() {
+#ifdef V8_CAGED_POINTERS
+ // TODO(chromium:1218005) consider creating a LoadCagedPointerConstant() if
+ // more of these constants are required later on.
+ TNode<ExternalReference> empty_backing_store_buffer =
+ ExternalConstant(ExternalReference::empty_backing_store_buffer());
+ return Load<RawPtrT>(empty_backing_store_buffer);
+#else
+ return ReinterpretCast<RawPtrT>(IntPtrConstant(0));
#endif // V8_CAGED_POINTERS
+}
TNode<ExternalPointerT> CodeStubAssembler::ChangeUint32ToExternalPointer(
TNode<Uint32T> value) {
@@ -1679,6 +1698,11 @@ TNode<Object> CodeStubAssembler::LoadFromParentFrame(int offset) {
return LoadFullTagged(frame_pointer, IntPtrConstant(offset));
}
+TNode<Uint8T> CodeStubAssembler::LoadUint8Ptr(TNode<RawPtrT> ptr,
+ TNode<IntPtrT> offset) {
+ return Load<Uint8T>(IntPtrAdd(ReinterpretCast<IntPtrT>(ptr), offset));
+}
+
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
TNode<HeapObject> object, int offset) {
// Please use LoadMap(object) instead.
@@ -2892,8 +2916,10 @@ TNode<BoolT> CodeStubAssembler::IsGeneratorFunction(
SharedFunctionInfo::kFlagsOffset));
// See IsGeneratorFunction(FunctionKind kind).
- return IsInRange(function_kind, FunctionKind::kAsyncConciseGeneratorMethod,
- FunctionKind::kConciseGeneratorMethod);
+ return IsInRange(
+ function_kind,
+ static_cast<uint32_t>(FunctionKind::kAsyncConciseGeneratorMethod),
+ static_cast<uint32_t>(FunctionKind::kConciseGeneratorMethod));
}
TNode<BoolT> CodeStubAssembler::IsJSFunctionWithPrototypeSlot(
@@ -6142,6 +6168,20 @@ void CodeStubAssembler::ThrowTypeError(TNode<Context> context,
Unreachable();
}
+TNode<HeapObject> CodeStubAssembler::GetPendingMessage() {
+ TNode<ExternalReference> pending_message = ExternalConstant(
+ ExternalReference::address_of_pending_message(isolate()));
+ return UncheckedCast<HeapObject>(LoadFullTagged(pending_message));
+}
+void CodeStubAssembler::SetPendingMessage(TNode<HeapObject> message) {
+ CSA_DCHECK(this, Word32Or(IsTheHole(message),
+ InstanceTypeEqual(LoadInstanceType(message),
+ JS_MESSAGE_OBJECT_TYPE)));
+ TNode<ExternalReference> pending_message = ExternalConstant(
+ ExternalReference::address_of_pending_message(isolate()));
+ StoreFullTaggedNoWriteBarrier(pending_message, message);
+}
+
TNode<BoolT> CodeStubAssembler::InstanceTypeEqual(TNode<Int32T> instance_type,
int type) {
return Word32Equal(instance_type, Int32Constant(type));
@@ -6362,8 +6402,8 @@ TNode<BoolT> CodeStubAssembler::IsSeqOneByteStringInstanceType(
CSA_DCHECK(this, IsStringInstanceType(instance_type));
return Word32Equal(
Word32And(instance_type,
- Int32Constant(kStringRepresentationMask | kStringEncodingMask)),
- Int32Constant(kSeqStringTag | kOneByteStringTag));
+ Int32Constant(kStringRepresentationAndEncodingMask)),
+ Int32Constant(kSeqOneByteStringTag));
}
TNode<BoolT> CodeStubAssembler::IsConsStringInstanceType(
@@ -8089,6 +8129,25 @@ TNode<RawPtr<Uint16T>> CodeStubAssembler::ExternalTwoByteStringGetChars(
std::make_pair(MachineType::AnyTagged(), string)));
}
+TNode<RawPtr<Uint8T>> CodeStubAssembler::IntlAsciiCollationWeightsL1() {
+#ifdef V8_INTL_SUPPORT
+ TNode<RawPtrT> ptr =
+ ExternalConstant(ExternalReference::intl_ascii_collation_weights_l1());
+ return ReinterpretCast<RawPtr<Uint8T>>(ptr);
+#else
+ UNREACHABLE();
+#endif
+}
+TNode<RawPtr<Uint8T>> CodeStubAssembler::IntlAsciiCollationWeightsL3() {
+#ifdef V8_INTL_SUPPORT
+ TNode<RawPtrT> ptr =
+ ExternalConstant(ExternalReference::intl_ascii_collation_weights_l3());
+ return ReinterpretCast<RawPtr<Uint8T>>(ptr);
+#else
+ UNREACHABLE();
+#endif
+}
+
void CodeStubAssembler::TryInternalizeString(
TNode<String> string, Label* if_index, TVariable<IntPtrT>* var_index,
Label* if_internalized, TVariable<Name>* var_internalized,
@@ -8561,7 +8620,9 @@ TNode<Object> CodeStubAssembler::BasicLoadNumberDictionaryElement(
TNode<Uint32T> details = LoadDetailsByKeyIndex(dictionary, index);
TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details);
// TODO(jkummerow): Support accessors without missing?
- GotoIfNot(Word32Equal(kind, Int32Constant(kData)), not_data);
+ GotoIfNot(
+ Word32Equal(kind, Int32Constant(static_cast<int>(PropertyKind::kData))),
+ not_data);
// Finally, load the value.
return LoadValueByKeyIndex(dictionary, index);
}
@@ -8607,7 +8668,7 @@ void CodeStubAssembler::InsertEntry<NameDictionary>(
StoreValueByKeyIndex<NameDictionary>(dictionary, index, value);
// Prepare details of the new property.
- PropertyDetails d(kData, NONE,
+ PropertyDetails d(PropertyKind::kData, NONE,
PropertyDetails::kConstIfDictConstnessTracking);
enum_index =
@@ -8677,10 +8738,10 @@ template <>
void CodeStubAssembler::Add(TNode<SwissNameDictionary> dictionary,
TNode<Name> key, TNode<Object> value,
Label* bailout) {
- PropertyDetails d(kData, NONE,
+ PropertyDetails d(PropertyKind::kData, NONE,
PropertyDetails::kConstIfDictConstnessTracking);
- PropertyDetails d_dont_enum(kData, DONT_ENUM,
+ PropertyDetails d_dont_enum(PropertyKind::kData, DONT_ENUM,
PropertyDetails::kConstIfDictConstnessTracking);
TNode<Uint8T> details_byte_enum =
UncheckedCast<Uint8T>(Uint32Constant(d.ToByte()));
@@ -9517,7 +9578,9 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
Label done(this), if_accessor_info(this, Label::kDeferred);
TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details);
- GotoIf(Word32Equal(kind, Int32Constant(kData)), &done);
+ GotoIf(
+ Word32Equal(kind, Int32Constant(static_cast<int>(PropertyKind::kData))),
+ &done);
// Accessor case.
GotoIfNot(IsAccessorPair(CAST(value)), &if_accessor_info);
@@ -11399,7 +11462,7 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
// Store an empty fixed array for the code dependency.
StoreObjectFieldRoot(site, AllocationSite::kDependentCodeOffset,
- RootIndex::kEmptyWeakFixedArray);
+ DependentCode::kEmptyDependentCode);
// Link the object to the allocation site list
TNode<ExternalReference> site_list = ExternalConstant(
@@ -13830,8 +13893,8 @@ void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached(
TNode<RawPtrT> CodeStubAssembler::LoadJSArrayBufferBackingStorePtr(
TNode<JSArrayBuffer> array_buffer) {
- return LoadObjectField<RawPtrT>(array_buffer,
- JSArrayBuffer::kBackingStoreOffset);
+ return LoadCagedPointerFromObject(array_buffer,
+ JSArrayBuffer::kBackingStoreOffset);
}
TNode<JSArrayBuffer> CodeStubAssembler::LoadJSArrayBufferViewBuffer(
@@ -13858,7 +13921,7 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLengthAndCheckDetached(
TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(typed_array);
Label variable_length(this), fixed_length(this), end(this);
- Branch(IsVariableLengthTypedArray(typed_array), &variable_length,
+ Branch(IsVariableLengthJSArrayBufferView(typed_array), &variable_length,
&fixed_length);
BIND(&variable_length);
{
@@ -13881,36 +13944,55 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLengthAndCheckDetached(
// ES #sec-integerindexedobjectlength
TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
- TNode<JSTypedArray> array, TNode<JSArrayBuffer> buffer, Label* miss) {
+ TNode<JSTypedArray> array, TNode<JSArrayBuffer> buffer,
+ Label* detached_or_out_of_bounds) {
+ // byte_length already takes array's offset into account.
+ TNode<UintPtrT> byte_length = LoadVariableLengthJSArrayBufferViewByteLength(
+ array, buffer, detached_or_out_of_bounds);
+ TNode<IntPtrT> element_size =
+ RabGsabElementsKindToElementByteSize(LoadElementsKind(array));
+ return Unsigned(IntPtrDiv(Signed(byte_length), element_size));
+}
+
+TNode<UintPtrT>
+CodeStubAssembler::LoadVariableLengthJSArrayBufferViewByteLength(
+ TNode<JSArrayBufferView> array, TNode<JSArrayBuffer> buffer,
+ Label* detached_or_out_of_bounds) {
Label is_gsab(this), is_rab(this), end(this);
TVARIABLE(UintPtrT, result);
+ TNode<UintPtrT> array_byte_offset = LoadJSArrayBufferViewByteOffset(array);
Branch(IsSharedArrayBuffer(buffer), &is_gsab, &is_rab);
BIND(&is_gsab);
{
- // Non-length-tracking GSAB-backed TypedArrays shouldn't end up here.
- CSA_DCHECK(this, IsLengthTrackingTypedArray(array));
+ // Non-length-tracking GSAB-backed ArrayBufferViews shouldn't end up here.
+ CSA_DCHECK(this, IsLengthTrackingJSArrayBufferView(array));
// Read the byte length from the BackingStore.
- const TNode<ExternalReference> length_function = ExternalConstant(
- ExternalReference::length_tracking_gsab_backed_typed_array_length());
+ const TNode<ExternalReference> byte_length_function =
+ ExternalConstant(ExternalReference::gsab_byte_length());
TNode<ExternalReference> isolate_ptr =
ExternalConstant(ExternalReference::isolate_address(isolate()));
- result = UncheckedCast<UintPtrT>(
- CallCFunction(length_function, MachineType::UintPtr(),
+ TNode<UintPtrT> buffer_byte_length = UncheckedCast<UintPtrT>(
+ CallCFunction(byte_length_function, MachineType::UintPtr(),
std::make_pair(MachineType::Pointer(), isolate_ptr),
- std::make_pair(MachineType::AnyTagged(), array)));
+ std::make_pair(MachineType::AnyTagged(), buffer)));
+ // Since the SharedArrayBuffer can't shrink, and we've managed to create
+ // this JSArrayBufferDataView without throwing an exception, we know that
+ // buffer_byte_length >= array_byte_offset.
+ CSA_CHECK(this,
+ UintPtrGreaterThanOrEqual(buffer_byte_length, array_byte_offset));
+ result = UintPtrSub(buffer_byte_length, array_byte_offset);
Goto(&end);
}
BIND(&is_rab);
{
- GotoIf(IsDetachedBuffer(buffer), miss);
+ GotoIf(IsDetachedBuffer(buffer), detached_or_out_of_bounds);
TNode<UintPtrT> buffer_byte_length = LoadJSArrayBufferByteLength(buffer);
- TNode<UintPtrT> array_byte_offset = LoadJSArrayBufferViewByteOffset(array);
Label is_length_tracking(this), not_length_tracking(this);
- Branch(IsLengthTrackingTypedArray(array), &is_length_tracking,
+ Branch(IsLengthTrackingJSArrayBufferView(array), &is_length_tracking,
&not_length_tracking);
BIND(&is_length_tracking);
@@ -13918,16 +14000,8 @@ TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
// The backing RAB might have been shrunk so that the start of the
// TypedArray is already out of bounds.
GotoIfNot(UintPtrLessThanOrEqual(array_byte_offset, buffer_byte_length),
- miss);
- // length = (buffer_byte_length - byte_offset) / element_size
- // Conversion to signed is OK since buffer_byte_length <
- // JSArrayBuffer::kMaxByteLength.
- TNode<IntPtrT> element_size =
- RabGsabElementsKindToElementByteSize(LoadElementsKind(array));
- TNode<IntPtrT> length =
- IntPtrDiv(Signed(UintPtrSub(buffer_byte_length, array_byte_offset)),
- element_size);
- result = Unsigned(length);
+ detached_or_out_of_bounds);
+ result = UintPtrSub(buffer_byte_length, array_byte_offset);
Goto(&end);
}
@@ -13940,8 +14014,8 @@ TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
GotoIfNot(UintPtrGreaterThanOrEqual(
buffer_byte_length,
UintPtrAdd(array_byte_offset, array_byte_length)),
- miss);
- result = LoadJSTypedArrayLength(array);
+ detached_or_out_of_bounds);
+ result = array_byte_length;
Goto(&end);
}
}
@@ -13949,13 +14023,13 @@ TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
return result.value();
}
-void CodeStubAssembler::IsJSTypedArrayDetachedOrOutOfBounds(
- TNode<JSTypedArray> array, Label* detached_or_oob,
+void CodeStubAssembler::IsJSArrayBufferViewDetachedOrOutOfBounds(
+ TNode<JSArrayBufferView> array, Label* detached_or_oob,
Label* not_detached_nor_oob) {
TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array);
GotoIf(IsDetachedBuffer(buffer), detached_or_oob);
- GotoIfNot(IsVariableLengthTypedArray(array), not_detached_nor_oob);
+ GotoIfNot(IsVariableLengthJSArrayBufferView(array), not_detached_nor_oob);
GotoIf(IsSharedArrayBuffer(buffer), not_detached_nor_oob);
{
@@ -13963,7 +14037,7 @@ void CodeStubAssembler::IsJSTypedArrayDetachedOrOutOfBounds(
TNode<UintPtrT> array_byte_offset = LoadJSArrayBufferViewByteOffset(array);
Label length_tracking(this), not_length_tracking(this);
- Branch(IsLengthTrackingTypedArray(array), &length_tracking,
+ Branch(IsLengthTrackingJSArrayBufferView(array), &length_tracking,
&not_length_tracking);
BIND(&length_tracking);
@@ -14066,10 +14140,10 @@ TNode<JSArrayBuffer> CodeStubAssembler::GetTypedArrayBuffer(
Label call_runtime(this), done(this);
TVARIABLE(Object, var_result);
+ GotoIf(IsOnHeapTypedArray(array), &call_runtime);
+
TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array);
GotoIf(IsDetachedBuffer(buffer), &call_runtime);
- TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStorePtr(buffer);
- GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime);
var_result = buffer;
Goto(&done);
@@ -14332,24 +14406,30 @@ TNode<BoolT> CodeStubAssembler::NeedsAnyPromiseHooks(TNode<Uint32T> flags) {
return Word32NotEqual(flags, Int32Constant(0));
}
-TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
+TNode<CodeT> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
CSA_DCHECK(this, SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount)));
TNode<IntPtrT> offset =
ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS);
- return CAST(BitcastWordToTagged(Load<RawPtrT>(
- ExternalConstant(ExternalReference::builtins_address(isolate())),
- offset)));
+ TNode<ExternalReference> table = ExternalConstant(
+#ifdef V8_EXTERNAL_CODE_SPACE
+ ExternalReference::builtins_code_data_container_table(isolate())
+#else
+ ExternalReference::builtins_table(isolate())
+#endif // V8_EXTERNAL_CODE_SPACE
+ ); // NOLINT(whitespace/parens)
+
+ return CAST(BitcastWordToTagged(Load<RawPtrT>(table, offset)));
}
-TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
+TNode<CodeT> CodeStubAssembler::GetSharedFunctionInfoCode(
TNode<SharedFunctionInfo> shared_info, TVariable<Uint16T>* data_type_out,
Label* if_compile_lazy) {
TNode<Object> sfi_data =
LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
- TVARIABLE(Code, sfi_code);
+ TVARIABLE(CodeT, sfi_code);
Label done(this);
Label check_instance_type(this);
@@ -14378,6 +14458,8 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
CODET_TYPE,
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
+ UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE,
+ UNCOMPILED_DATA_WITH_PREPARSE_DATA_AND_JOB_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
#if V8_ENABLE_WEBASSEMBLY
WASM_CAPI_FUNCTION_DATA_TYPE,
@@ -14389,16 +14471,17 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
Label check_is_bytecode_array(this);
Label check_is_baseline_data(this);
Label check_is_asm_wasm_data(this);
- Label check_is_uncompiled_data_without_preparse_data(this);
- Label check_is_uncompiled_data_with_preparse_data(this);
+ Label check_is_uncompiled_data(this);
Label check_is_function_template_info(this);
Label check_is_interpreter_data(this);
Label check_is_wasm_function_data(this);
Label* case_labels[] = {
&check_is_bytecode_array,
&check_is_baseline_data,
- &check_is_uncompiled_data_without_preparse_data,
- &check_is_uncompiled_data_with_preparse_data,
+ &check_is_uncompiled_data,
+ &check_is_uncompiled_data,
+ &check_is_uncompiled_data,
+ &check_is_uncompiled_data,
&check_is_function_template_info,
#if V8_ENABLE_WEBASSEMBLY
&check_is_wasm_function_data,
@@ -14413,28 +14496,26 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsBytecodeArray: Interpret bytecode
BIND(&check_is_bytecode_array);
- sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InterpreterEntryTrampoline));
+ sfi_code = HeapConstant(BUILTIN_CODET(isolate(), InterpreterEntryTrampoline));
Goto(&done);
// IsBaselineData: Execute baseline code
BIND(&check_is_baseline_data);
{
TNode<CodeT> baseline_code = CAST(sfi_data);
- sfi_code = FromCodeT(baseline_code);
+ sfi_code = baseline_code;
Goto(&done);
}
// IsUncompiledDataWithPreparseData | IsUncompiledDataWithoutPreparseData:
// Compile lazy
- BIND(&check_is_uncompiled_data_with_preparse_data);
- Goto(&check_is_uncompiled_data_without_preparse_data);
- BIND(&check_is_uncompiled_data_without_preparse_data);
- sfi_code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
+ BIND(&check_is_uncompiled_data);
+ sfi_code = HeapConstant(BUILTIN_CODET(isolate(), CompileLazy));
Goto(if_compile_lazy ? if_compile_lazy : &done);
// IsFunctionTemplateInfo: API call
BIND(&check_is_function_template_info);
- sfi_code = HeapConstant(BUILTIN_CODE(isolate(), HandleApiCall));
+ sfi_code = HeapConstant(BUILTIN_CODET(isolate(), HandleApiCall));
Goto(&done);
// IsInterpreterData: Interpret bytecode
@@ -14445,7 +14526,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
{
TNode<CodeT> trampoline =
LoadInterpreterDataInterpreterTrampoline(CAST(sfi_data));
- sfi_code = FromCodeT(trampoline);
+ sfi_code = trampoline;
}
Goto(&done);
@@ -14458,7 +14539,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsAsmWasmData: Instantiate using AsmWasmData
BIND(&check_is_asm_wasm_data);
- sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InstantiateAsmJs));
+ sfi_code = HeapConstant(BUILTIN_CODET(isolate(), InstantiateAsmJs));
Goto(&done);
#endif // V8_ENABLE_WEBASSEMBLY
@@ -14482,8 +14563,7 @@ TNode<RawPtrT> CodeStubAssembler::GetCodeEntry(TNode<CodeT> code) {
TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
TNode<Map> map, TNode<SharedFunctionInfo> shared_info,
TNode<Context> context) {
- // TODO(v8:11880): avoid roundtrips between cdc and code.
- const TNode<Code> code = GetSharedFunctionInfoCode(shared_info);
+ const TNode<CodeT> code = GetSharedFunctionInfoCode(shared_info);
// TODO(ishell): All the callers of this function pass map loaded from
// Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX. So we can remove
@@ -14502,7 +14582,7 @@ TNode<JSFunction> CodeStubAssembler::AllocateFunctionWithMapAndContext(
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset,
shared_info);
StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context);
- StoreObjectField(fun, JSFunction::kCodeOffset, ToCodeT(code));
+ StoreObjectField(fun, JSFunction::kCodeOffset, code);
return CAST(fun);
}
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 4d16af8a3d..109bd9cfa4 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -9,6 +9,7 @@
#include "src/base/macros.h"
#include "src/codegen/bailout-reason.h"
+#include "src/codegen/tnode.h"
#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/compiler/code-assembler.h"
@@ -1042,32 +1043,29 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise.
void GotoIfForceSlowPath(Label* if_true);
-#ifdef V8_CAGED_POINTERS
-
//
// Caged pointer related functionality.
//
// Load a caged pointer value from an object.
- TNode<CagedPtrT> LoadCagedPointerFromObject(TNode<HeapObject> object,
- int offset) {
+ TNode<RawPtrT> LoadCagedPointerFromObject(TNode<HeapObject> object,
+ int offset) {
return LoadCagedPointerFromObject(object, IntPtrConstant(offset));
}
- TNode<CagedPtrT> LoadCagedPointerFromObject(TNode<HeapObject> object,
- TNode<IntPtrT> offset);
+ TNode<RawPtrT> LoadCagedPointerFromObject(TNode<HeapObject> object,
+ TNode<IntPtrT> offset);
// Stored a caged pointer value to an object.
void StoreCagedPointerToObject(TNode<HeapObject> object, int offset,
- TNode<CagedPtrT> pointer) {
+ TNode<RawPtrT> pointer) {
StoreCagedPointerToObject(object, IntPtrConstant(offset), pointer);
}
void StoreCagedPointerToObject(TNode<HeapObject> object,
- TNode<IntPtrT> offset,
- TNode<CagedPtrT> pointer);
+ TNode<IntPtrT> offset, TNode<RawPtrT> pointer);
-#endif // V8_CAGED_POINTERS
+ TNode<RawPtrT> EmptyBackingStoreBufferConstant();
//
// ExternalPointerT-related functionality.
@@ -1147,14 +1145,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<RawPtrT> LoadJSTypedArrayExternalPointerPtr(
TNode<JSTypedArray> holder) {
- return LoadObjectField<RawPtrT>(holder,
- JSTypedArray::kExternalPointerOffset);
+ return LoadCagedPointerFromObject(holder,
+ JSTypedArray::kExternalPointerOffset);
}
void StoreJSTypedArrayExternalPointerPtr(TNode<JSTypedArray> holder,
TNode<RawPtrT> value) {
- StoreObjectFieldNoWriteBarrier<RawPtrT>(
- holder, JSTypedArray::kExternalPointerOffset, value);
+ StoreCagedPointerToObject(holder, JSTypedArray::kExternalPointerOffset,
+ value);
}
// Load value from current parent frame by given offset in bytes.
@@ -1178,6 +1176,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> LoadBufferIntptr(TNode<RawPtrT> buffer, int offset) {
return LoadBufferData<IntPtrT>(buffer, offset);
}
+ TNode<Uint8T> LoadUint8Ptr(TNode<RawPtrT> ptr, TNode<IntPtrT> offset);
+
// Load a field from an object on the heap.
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<Object>>::value &&
@@ -2461,6 +2461,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
base::Optional<TNode<Object>> arg1 = base::nullopt,
base::Optional<TNode<Object>> arg2 = base::nullopt);
+ TNode<HeapObject> GetPendingMessage();
+ void SetPendingMessage(TNode<HeapObject> message);
+
// Type checks.
// Check whether the map is for an object with special properties, such as a
// JSProxy or an object with interceptors.
@@ -2937,6 +2940,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<RawPtr<Uint16T>> ExternalTwoByteStringGetChars(
TNode<ExternalTwoByteString> string);
+ TNode<RawPtr<Uint8T>> IntlAsciiCollationWeightsL1();
+ TNode<RawPtr<Uint8T>> IntlAsciiCollationWeightsL3();
+
// Performs a hash computation and string table lookup for the given string,
// and jumps to:
// - |if_index| if the string is an array index like "123"; |var_index|
@@ -3603,15 +3609,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Helper for length tracking JSTypedArrays and JSTypedArrays backed by
// ResizableArrayBuffer.
TNode<UintPtrT> LoadVariableLengthJSTypedArrayLength(
- TNode<JSTypedArray> array, TNode<JSArrayBuffer> buffer, Label* miss);
+ TNode<JSTypedArray> array, TNode<JSArrayBuffer> buffer,
+ Label* detached_or_out_of_bounds);
// Helper for length tracking JSTypedArrays and JSTypedArrays backed by
// ResizableArrayBuffer.
TNode<UintPtrT> LoadVariableLengthJSTypedArrayByteLength(
TNode<Context> context, TNode<JSTypedArray> array,
TNode<JSArrayBuffer> buffer);
- void IsJSTypedArrayDetachedOrOutOfBounds(TNode<JSTypedArray> array,
- Label* detached_or_oob,
- Label* not_detached_nor_oob);
+ TNode<UintPtrT> LoadVariableLengthJSArrayBufferViewByteLength(
+ TNode<JSArrayBufferView> array, TNode<JSArrayBuffer> buffer,
+ Label* detached_or_out_of_bounds);
+
+ void IsJSArrayBufferViewDetachedOrOutOfBounds(TNode<JSArrayBufferView> array,
+ Label* detached_or_oob,
+ Label* not_detached_nor_oob);
TNode<IntPtrT> RabGsabElementsKindToElementByteSize(
TNode<Int32T> elementsKind);
@@ -3629,7 +3640,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
ElementsKind kind = HOLEY_ELEMENTS);
// Load a builtin's code from the builtin array in the isolate.
- TNode<Code> LoadBuiltin(TNode<Smi> builtin_id);
+ TNode<CodeT> LoadBuiltin(TNode<Smi> builtin_id);
// Figure out the SFI's code object using its data field.
// If |data_type_out| is provided, the instance type of the function data will
@@ -3637,7 +3648,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// data_type_out will be set to 0.
// If |if_compile_lazy| is provided then the execution will go to the given
// label in case of an CompileLazy code object.
- TNode<Code> GetSharedFunctionInfoCode(
+ TNode<CodeT> GetSharedFunctionInfoCode(
TNode<SharedFunctionInfo> shared_info,
TVariable<Uint16T>* data_type_out = nullptr,
Label* if_compile_lazy = nullptr);
diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc
index 861bd2904f..725f054c4e 100644
--- a/deps/v8/src/codegen/compilation-cache.cc
+++ b/deps/v8/src/codegen/compilation-cache.cc
@@ -136,12 +136,16 @@ bool HasOrigin(Isolate* isolate, Handle<SharedFunctionInfo> function_info,
return false;
}
- Handle<FixedArray> host_defined_options;
- if (!script_details.host_defined_options.ToHandle(&host_defined_options)) {
- host_defined_options = isolate->factory()->empty_fixed_array();
+ // TODO(cbruni, chromium:1244145): Remove once migrated to the context
+ Handle<Object> maybe_host_defined_options;
+ if (!script_details.host_defined_options.ToHandle(
+ &maybe_host_defined_options)) {
+ maybe_host_defined_options = isolate->factory()->empty_fixed_array();
}
-
- Handle<FixedArray> script_options(script->host_defined_options(), isolate);
+ Handle<FixedArray> host_defined_options =
+ Handle<FixedArray>::cast(maybe_host_defined_options);
+ Handle<FixedArray> script_options(
+ FixedArray::cast(script->host_defined_options()), isolate);
int length = host_defined_options->length();
if (length != script_options->length()) return false;
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index b7eafaf0d9..d603298897 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -36,7 +36,9 @@
#include "src/execution/local-isolate.h"
#include "src/execution/runtime-profiler.h"
#include "src/execution/vm-state-inl.h"
+#include "src/handles/handles.h"
#include "src/handles/maybe-handles.h"
+#include "src/handles/persistent-handles.h"
#include "src/heap/heap-inl.h"
#include "src/heap/local-factory-inl.h"
#include "src/heap/local-heap-inl.h"
@@ -551,7 +553,7 @@ void InstallInterpreterTrampolineCopy(
INTERPRETER_DATA_TYPE, AllocationType::kOld));
interpreter_data->set_bytecode_array(*bytecode_array);
- interpreter_data->set_interpreter_trampoline(*code);
+ interpreter_data->set_interpreter_trampoline(ToCodeT(*code));
shared_info->set_interpreter_data(*interpreter_data);
@@ -637,16 +639,18 @@ void UpdateSharedFunctionFlagsAfterCompilation(FunctionLiteral* literal,
SharedFunctionInfo shared_info) {
DCHECK_EQ(shared_info.language_mode(), literal->language_mode());
+ // These fields are all initialised in ParseInfo from the SharedFunctionInfo,
+ // and then set back on the literal after parse. Hence, they should already
+ // match.
+ DCHECK_EQ(shared_info.requires_instance_members_initializer(),
+ literal->requires_instance_members_initializer());
+ DCHECK_EQ(shared_info.class_scope_has_private_brand(),
+ literal->class_scope_has_private_brand());
+ DCHECK_EQ(shared_info.has_static_private_methods_or_accessors(),
+ literal->has_static_private_methods_or_accessors());
+
shared_info.set_has_duplicate_parameters(literal->has_duplicate_parameters());
shared_info.UpdateAndFinalizeExpectedNofPropertiesFromEstimate(literal);
- if (literal->dont_optimize_reason() != BailoutReason::kNoReason) {
- shared_info.DisableOptimization(literal->dont_optimize_reason());
- }
-
- shared_info.set_class_scope_has_private_brand(
- literal->class_scope_has_private_brand());
- shared_info.set_has_static_private_methods_or_accessors(
- literal->has_static_private_methods_or_accessors());
shared_info.SetScopeInfo(*literal->scope()->scope_info());
}
@@ -683,7 +687,7 @@ CompilationJob::Status FinalizeSingleUnoptimizedCompilationJob(
std::unique_ptr<UnoptimizedCompilationJob>
ExecuteSingleUnoptimizedCompilationJob(
- ParseInfo* parse_info, FunctionLiteral* literal,
+ ParseInfo* parse_info, FunctionLiteral* literal, Handle<Script> script,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals,
LocalIsolate* local_isolate) {
@@ -703,7 +707,8 @@ ExecuteSingleUnoptimizedCompilationJob(
#endif
std::unique_ptr<UnoptimizedCompilationJob> job(
interpreter::Interpreter::NewCompilationJob(
- parse_info, literal, allocator, eager_inner_literals, local_isolate));
+ parse_info, literal, script, allocator, eager_inner_literals,
+ local_isolate));
if (job->ExecuteJob() != CompilationJob::SUCCEEDED) {
// Compilation failed, return null.
@@ -713,33 +718,6 @@ ExecuteSingleUnoptimizedCompilationJob(
return job;
}
-bool RecursivelyExecuteUnoptimizedCompilationJobs(
- ParseInfo* parse_info, FunctionLiteral* literal,
- AccountingAllocator* allocator,
- UnoptimizedCompilationJobList* function_jobs) {
- std::vector<FunctionLiteral*> eager_inner_literals;
-
- // We need to pass nullptr here because we are on the background
- // thread but don't have a LocalIsolate.
- DCHECK_NULL(LocalHeap::Current());
- std::unique_ptr<UnoptimizedCompilationJob> job =
- ExecuteSingleUnoptimizedCompilationJob(parse_info, literal, allocator,
- &eager_inner_literals, nullptr);
-
- if (!job) return false;
-
- // Recursively compile eager inner literals.
- for (FunctionLiteral* inner_literal : eager_inner_literals) {
- if (!RecursivelyExecuteUnoptimizedCompilationJobs(
- parse_info, inner_literal, allocator, function_jobs)) {
- return false;
- }
- }
-
- function_jobs->emplace_front(std::move(job));
- return true;
-}
-
template <typename IsolateT>
bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
IsolateT* isolate, Handle<SharedFunctionInfo> outer_shared_info,
@@ -754,16 +732,28 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
std::vector<FunctionLiteral*> functions_to_compile;
functions_to_compile.push_back(parse_info->literal());
+ bool is_first = true;
while (!functions_to_compile.empty()) {
FunctionLiteral* literal = functions_to_compile.back();
functions_to_compile.pop_back();
- Handle<SharedFunctionInfo> shared_info =
- Compiler::GetSharedFunctionInfo(literal, script, isolate);
+ Handle<SharedFunctionInfo> shared_info;
+ if (is_first) {
+ // We get the first SharedFunctionInfo directly as outer_shared_info
+ // rather than with Compiler::GetSharedFunctionInfo, to support
+ // placeholder SharedFunctionInfos that aren't on the script's SFI list.
+ DCHECK_EQ(literal->function_literal_id(),
+ outer_shared_info->function_literal_id());
+ shared_info = outer_shared_info;
+ is_first = false;
+ } else {
+ shared_info = Compiler::GetSharedFunctionInfo(literal, script, isolate);
+ }
+
if (shared_info->is_compiled()) continue;
std::unique_ptr<UnoptimizedCompilationJob> job =
- ExecuteSingleUnoptimizedCompilationJob(parse_info, literal, allocator,
- &functions_to_compile,
+ ExecuteSingleUnoptimizedCompilationJob(parse_info, literal, script,
+ allocator, &functions_to_compile,
isolate->AsLocalIsolate());
if (!job) return false;
@@ -809,44 +799,6 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
return true;
}
-bool FinalizeAllUnoptimizedCompilationJobs(
- ParseInfo* parse_info, Isolate* isolate, Handle<Script> script,
- UnoptimizedCompilationJobList* compilation_jobs,
- FinalizeUnoptimizedCompilationDataList*
- finalize_unoptimized_compilation_data_list) {
- DCHECK(AllowCompilation::IsAllowed(isolate));
- DCHECK(!compilation_jobs->empty());
-
- // TODO(rmcilroy): Clear native context in debug once AsmJS generates doesn't
- // rely on accessing native context during finalization.
-
- // Allocate scope infos for the literal.
- DeclarationScope::AllocateScopeInfos(parse_info, isolate);
-
- // Finalize the functions' compilation jobs.
- for (auto&& job : *compilation_jobs) {
- FunctionLiteral* literal = job->compilation_info()->literal();
- Handle<SharedFunctionInfo> shared_info =
- Compiler::GetSharedFunctionInfo(literal, script, isolate);
- // The inner function might be compiled already if compiling for debug.
- if (shared_info->is_compiled()) continue;
- UpdateSharedFunctionFlagsAfterCompilation(literal, *shared_info);
- if (FinalizeSingleUnoptimizedCompilationJob(
- job.get(), shared_info, isolate,
- finalize_unoptimized_compilation_data_list) !=
- CompilationJob::SUCCEEDED) {
- return false;
- }
- }
-
- // Report any warnings generated during compilation.
- if (parse_info->pending_error_handler()->has_pending_warnings()) {
- parse_info->pending_error_handler()->PrepareWarnings(isolate);
- }
-
- return true;
-}
-
bool FinalizeDeferredUnoptimizedCompilationJobs(
Isolate* isolate, Handle<Script> script,
DeferredFinalizationJobDataList* deferred_jobs,
@@ -1072,9 +1024,9 @@ Handle<Code> ContinuationForConcurrentOptimization(
}
return handle(function->code(), isolate);
} else if (function->shared().HasBaselineCode()) {
- Code baseline_code = function->shared().baseline_code(kAcquireLoad);
+ CodeT baseline_code = function->shared().baseline_code(kAcquireLoad);
function->set_code(baseline_code);
- return handle(baseline_code, isolate);
+ return handle(FromCodeT(baseline_code), isolate);
}
DCHECK(function->ActiveTierIsIgnition());
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
@@ -1102,7 +1054,7 @@ MaybeHandle<Code> GetOptimizedCode(
if (function->HasOptimizationMarker()) function->ClearOptimizationMarker();
if (shared->optimization_disabled() &&
- shared->disable_optimization_reason() == BailoutReason::kNeverOptimize) {
+ shared->disabled_optimization_reason() == BailoutReason::kNeverOptimize) {
return {};
}
@@ -1211,7 +1163,12 @@ bool PreparePendingException(IsolateT* isolate, ParseInfo* parse_info) {
bool FailWithPreparedPendingException(
Isolate* isolate, Handle<Script> script,
- const PendingCompilationErrorHandler* pending_error_handler) {
+ const PendingCompilationErrorHandler* pending_error_handler,
+ Compiler::ClearExceptionFlag flag = Compiler::KEEP_EXCEPTION) {
+ if (flag == Compiler::CLEAR_EXCEPTION) {
+ return FailAndClearPendingException(isolate);
+ }
+
if (!isolate->has_pending_exception()) {
if (pending_error_handler->has_pending_error()) {
pending_error_handler->ReportErrors(isolate, script);
@@ -1225,13 +1182,9 @@ bool FailWithPreparedPendingException(
bool FailWithPendingException(Isolate* isolate, Handle<Script> script,
ParseInfo* parse_info,
Compiler::ClearExceptionFlag flag) {
- if (flag == Compiler::CLEAR_EXCEPTION) {
- return FailAndClearPendingException(isolate);
- }
-
PreparePendingException(isolate, parse_info);
- return FailWithPreparedPendingException(isolate, script,
- parse_info->pending_error_handler());
+ return FailWithPreparedPendingException(
+ isolate, script, parse_info->pending_error_handler(), flag);
}
void FinalizeUnoptimizedCompilation(
@@ -1293,24 +1246,6 @@ void FinalizeUnoptimizedScriptCompilation(
script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
- UnoptimizedCompileState::ParallelTasks* parallel_tasks =
- compile_state->parallel_tasks();
- if (parallel_tasks) {
- LazyCompileDispatcher* dispatcher = parallel_tasks->dispatcher();
- for (auto& it : *parallel_tasks) {
- FunctionLiteral* literal = it.first;
- LazyCompileDispatcher::JobId job_id = it.second;
- MaybeHandle<SharedFunctionInfo> maybe_shared_for_task =
- Script::FindSharedFunctionInfo(script, isolate, literal);
- Handle<SharedFunctionInfo> shared_for_task;
- if (maybe_shared_for_task.ToHandle(&shared_for_task)) {
- dispatcher->RegisterSharedFunctionInfo(job_id, *shared_for_task);
- } else {
- dispatcher->AbortJob(job_id);
- }
- }
- }
-
if (isolate->NeedsSourcePositionsForProfiling()) {
Script::InitLineEnds(isolate, script);
}
@@ -1373,8 +1308,6 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
parse_info->flags().is_eval() ? "V8.CompileEval" : "V8.Compile");
- // Prepare and execute compilation of the outer-most function.
-
// Create the SharedFunctionInfo and add it to the script's list.
Handle<SharedFunctionInfo> shared_info =
CreateTopLevelSharedFunctionInfo(parse_info, script, isolate);
@@ -1382,6 +1315,7 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
FinalizeUnoptimizedCompilationDataList
finalize_unoptimized_compilation_data_list;
+ // Prepare and execute compilation of the outer-most function.
if (!IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
isolate, shared_info, script, parse_info, isolate->allocator(),
is_compiled_scope, &finalize_unoptimized_compilation_data_list,
@@ -1418,57 +1352,6 @@ RuntimeCallCounterId RuntimeCallCounterIdForCompileBackground(
}
#endif // V8_RUNTIME_CALL_STATS
-MaybeHandle<SharedFunctionInfo> CompileAndFinalizeOnBackgroundThread(
- ParseInfo* parse_info, AccountingAllocator* allocator,
- Handle<Script> script, LocalIsolate* isolate,
- FinalizeUnoptimizedCompilationDataList*
- finalize_unoptimized_compilation_data_list,
- DeferredFinalizationJobDataList* jobs_to_retry_finalization_on_main_thread,
- IsCompiledScope* is_compiled_scope) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompileCodeBackground");
- RCS_SCOPE(parse_info->runtime_call_stats(),
- RuntimeCallCounterIdForCompileBackground(parse_info));
-
- Handle<SharedFunctionInfo> shared_info =
- CreateTopLevelSharedFunctionInfo(parse_info, script, isolate);
-
- if (!IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
- isolate, shared_info, script, parse_info, allocator,
- is_compiled_scope, finalize_unoptimized_compilation_data_list,
- jobs_to_retry_finalization_on_main_thread)) {
- return kNullMaybeHandle;
- }
-
- // Character stream shouldn't be used again.
- parse_info->ResetCharacterStream();
-
- return shared_info;
-}
-
-// TODO(leszeks): Remove this once off-thread finalization is always on.
-void CompileOnBackgroundThread(ParseInfo* parse_info,
- AccountingAllocator* allocator,
- UnoptimizedCompilationJobList* jobs) {
- DisallowHeapAccess no_heap_access;
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompileCodeBackground");
- RCS_SCOPE(parse_info->runtime_call_stats(),
- RuntimeCallCounterIdForCompileBackground(parse_info));
-
- // Generate the unoptimized bytecode or asm-js data.
- DCHECK(jobs->empty());
-
- bool success = RecursivelyExecuteUnoptimizedCompilationJobs(
- parse_info, parse_info->literal(), allocator, jobs);
-
- USE(success);
- DCHECK_EQ(success, !jobs->empty());
-
- // Character stream shouldn't be used again.
- parse_info->ResetCharacterStream();
-}
-
} // namespace
CompilationHandleScope::~CompilationHandleScope() {
@@ -1494,204 +1377,327 @@ DeferredFinalizationJobData::DeferredFinalizationJobData(
BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* streamed_data,
Isolate* isolate, ScriptType type)
- : flags_(UnoptimizedCompileFlags::ForToplevelCompile(
+ : isolate_for_local_isolate_(isolate),
+ flags_(UnoptimizedCompileFlags::ForToplevelCompile(
isolate, true, construct_language_mode(FLAG_use_strict),
REPLMode::kNo, type, FLAG_lazy_streaming)),
- compile_state_(isolate),
- info_(std::make_unique<ParseInfo>(isolate, flags_, &compile_state_)),
- isolate_for_local_isolate_(isolate),
- start_position_(0),
- end_position_(0),
- function_literal_id_(kFunctionLiteralIdTopLevel),
+ character_stream_(ScannerStream::For(streamed_data->source_stream.get(),
+ streamed_data->encoding)),
stack_size_(i::FLAG_stack_size),
worker_thread_runtime_call_stats_(
isolate->counters()->worker_thread_runtime_call_stats()),
timer_(isolate->counters()->compile_script_on_background()),
- language_mode_(info_->language_mode()) {
+ start_position_(0),
+ end_position_(0),
+ function_literal_id_(kFunctionLiteralIdTopLevel),
+ language_mode_(flags_.outer_language_mode()) {
VMState<PARSER> state(isolate);
- // Prepare the data for the internalization phase and compilation phase, which
- // will happen in the main thread after parsing.
-
LOG(isolate, ScriptEvent(Logger::ScriptEventType::kStreamingCompile,
- info_->flags().script_id()));
-
- std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
- streamed_data->source_stream.get(), streamed_data->encoding));
- info_->set_character_stream(std::move(stream));
+ flags_.script_id()));
}
BackgroundCompileTask::BackgroundCompileTask(
- const ParseInfo* outer_parse_info, const AstRawString* function_name,
- const FunctionLiteral* function_literal,
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
+ std::unique_ptr<Utf16CharacterStream> character_stream,
WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,
TimedHistogram* timer, int max_stack_size)
- : flags_(UnoptimizedCompileFlags::ForToplevelFunction(
- outer_parse_info->flags(), function_literal)),
- compile_state_(*outer_parse_info->state()),
- info_(ParseInfo::ForToplevelFunction(flags_, &compile_state_,
- function_literal, function_name)),
- isolate_for_local_isolate_(nullptr),
- start_position_(function_literal->start_position()),
- end_position_(function_literal->end_position()),
- function_literal_id_(function_literal->function_literal_id()),
+ : isolate_for_local_isolate_(isolate),
+ // TODO(leszeks): Create this from parent compile flags, to avoid
+ // accessing the Isolate.
+ flags_(
+ UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared_info)),
+ character_stream_(std::move(character_stream)),
stack_size_(max_stack_size),
worker_thread_runtime_call_stats_(worker_thread_runtime_stats),
timer_(timer),
- language_mode_(info_->language_mode()) {
- DCHECK_EQ(outer_parse_info->parameters_end_pos(), kNoSourcePosition);
- DCHECK_NULL(outer_parse_info->extension());
+ input_shared_info_(shared_info),
+ start_position_(shared_info->StartPosition()),
+ end_position_(shared_info->EndPosition()),
+ function_literal_id_(shared_info->function_literal_id()),
+ language_mode_(flags_.outer_language_mode()) {
+ DCHECK(!shared_info->is_toplevel());
- DCHECK(!function_literal->is_toplevel());
+ character_stream_->Seek(start_position_);
- // Clone the character stream so both can be accessed independently.
- std::unique_ptr<Utf16CharacterStream> character_stream =
- outer_parse_info->character_stream()->Clone();
- character_stream->Seek(start_position_);
- info_->set_character_stream(std::move(character_stream));
-
- // Get preparsed scope data from the function literal.
- if (function_literal->produced_preparse_data()) {
- ZonePreparseData* serialized_data =
- function_literal->produced_preparse_data()->Serialize(info_->zone());
- info_->set_consumed_preparse_data(
- ConsumedPreparseData::For(info_->zone(), serialized_data));
- }
+ // Get the script out of the outer ParseInfo and turn it into a persistent
+ // handle we can transfer to the background thread.
+ persistent_handles_ = std::make_unique<PersistentHandles>(isolate);
+ input_shared_info_ = persistent_handles_->NewHandle(shared_info);
}
BackgroundCompileTask::~BackgroundCompileTask() = default;
namespace {
-// A scope object that ensures a parse info's runtime call stats and stack limit
-// are set correctly during worker-thread compile, and restores it after going
-// out of scope.
-class V8_NODISCARD OffThreadParseInfoScope {
- public:
- OffThreadParseInfoScope(
- ParseInfo* parse_info,
- WorkerThreadRuntimeCallStats* worker_thread_runtime_stats, int stack_size)
- : parse_info_(parse_info),
- original_stack_limit_(parse_info_->stack_limit()),
- original_runtime_call_stats_(parse_info_->runtime_call_stats()),
- worker_thread_scope_(worker_thread_runtime_stats) {
- parse_info_->SetPerThreadState(GetCurrentStackPosition() - stack_size * KB,
- worker_thread_scope_.Get());
+void SetScriptFieldsFromDetails(Isolate* isolate, Script script,
+ ScriptDetails script_details,
+ DisallowGarbageCollection* no_gc) {
+ Handle<Object> script_name;
+ if (script_details.name_obj.ToHandle(&script_name)) {
+ script.set_name(*script_name);
+ script.set_line_offset(script_details.line_offset);
+ script.set_column_offset(script_details.column_offset);
}
-
- OffThreadParseInfoScope(const OffThreadParseInfoScope&) = delete;
- OffThreadParseInfoScope& operator=(const OffThreadParseInfoScope&) = delete;
-
- ~OffThreadParseInfoScope() {
- DCHECK_NOT_NULL(parse_info_);
- parse_info_->SetPerThreadState(original_stack_limit_,
- original_runtime_call_stats_);
+ // The API can provide a source map URL, but a source map URL could also have
+ // been inferred by the parser from a magic comment. The latter takes
+ // preference over the former, so we don't want to override the source mapping
+ // URL if it already exists.
+ Handle<Object> source_map_url;
+ if (script_details.source_map_url.ToHandle(&source_map_url) &&
+ script.source_mapping_url(isolate).IsUndefined(isolate)) {
+ script.set_source_mapping_url(*source_map_url);
}
-
- private:
- ParseInfo* parse_info_;
- uintptr_t original_stack_limit_;
- RuntimeCallStats* original_runtime_call_stats_;
- WorkerThreadRuntimeCallStatsScope worker_thread_scope_;
-};
+ Handle<Object> host_defined_options;
+ if (script_details.host_defined_options.ToHandle(&host_defined_options)) {
+ // TODO(cbruni, chromium:1244145): Remove once migrated to the context.
+ if (host_defined_options->IsFixedArray()) {
+ script.set_host_defined_options(FixedArray::cast(*host_defined_options));
+ }
+ }
+}
} // namespace
void BackgroundCompileTask::Run() {
+ WorkerThreadRuntimeCallStatsScope worker_thread_scope(
+ worker_thread_runtime_call_stats_);
+
+ LocalIsolate isolate(isolate_for_local_isolate_, ThreadKind::kBackground,
+ worker_thread_scope.Get());
+ UnparkedScope unparked_scope(&isolate);
+ LocalHandleScope handle_scope(&isolate);
+
+ ReusableUnoptimizedCompileState reusable_state(&isolate);
+
+ Run(&isolate, &reusable_state);
+}
+
+void BackgroundCompileTask::Run(
+ LocalIsolate* isolate, ReusableUnoptimizedCompileState* reusable_state) {
TimedHistogramScope timer(timer_);
- base::Optional<OffThreadParseInfoScope> off_thread_scope(
- base::in_place, info_.get(), worker_thread_runtime_call_stats_,
- stack_size_);
+
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"BackgroundCompileTask::Run");
- RCS_SCOPE(info_->runtime_call_stats(),
- RuntimeCallCounterId::kCompileBackgroundCompileTask);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBackgroundCompileTask);
+
+ bool toplevel_script_compilation = flags_.is_toplevel();
+
+ ParseInfo info(isolate, flags_, &compile_state_, reusable_state,
+ GetCurrentStackPosition() - stack_size_ * KB);
+ info.set_character_stream(std::move(character_stream_));
+
+ if (toplevel_script_compilation) {
+ DCHECK_NULL(persistent_handles_);
+ DCHECK(input_shared_info_.is_null());
+
+ // We don't have the script source, origin, or details yet, so use default
+ // values for them. These will be fixed up during the main-thread merge.
+ Handle<Script> script = info.CreateScript(
+ isolate, isolate->factory()->empty_string(), kNullMaybeHandle,
+ ScriptOriginOptions(false, false, false, info.flags().is_module()));
+ script_ = isolate->heap()->NewPersistentHandle(script);
+ } else {
+ DCHECK_NOT_NULL(persistent_handles_);
+ isolate->heap()->AttachPersistentHandles(std::move(persistent_handles_));
+ Handle<SharedFunctionInfo> shared_info =
+ input_shared_info_.ToHandleChecked();
+ script_ = isolate->heap()->NewPersistentHandle(
+ Script::cast(shared_info->script()));
+ info.CheckFlagsForFunctionFromScript(*script_);
+
+ {
+ SharedStringAccessGuardIfNeeded access_guard(isolate);
+ info.set_function_name(info.ast_value_factory()->GetString(
+ shared_info->Name(), access_guard));
+ }
+
+ // Get preparsed scope data from the function literal.
+ if (shared_info->HasUncompiledDataWithPreparseData()) {
+ info.set_consumed_preparse_data(ConsumedPreparseData::For(
+ isolate, handle(shared_info->uncompiled_data_with_preparse_data()
+ .preparse_data(isolate),
+ isolate)));
+ }
+ }
// Update the character stream's runtime call stats.
- info_->character_stream()->set_runtime_call_stats(
- info_->runtime_call_stats());
+ info.character_stream()->set_runtime_call_stats(info.runtime_call_stats());
// Parser needs to stay alive for finalizing the parsing on the main
// thread.
- parser_.reset(new Parser(info_.get()));
- parser_->InitializeEmptyScopeChain(info_.get());
+ Parser parser(isolate, &info, script_);
+ if (flags().is_toplevel()) {
+ parser.InitializeEmptyScopeChain(&info);
+ } else {
+ // TODO(leszeks): Consider keeping Scope zones alive between compile tasks
+ // and passing the Scope for the FunctionLiteral through here directly
+ // without copying/deserializing.
+ Handle<SharedFunctionInfo> shared_info =
+ input_shared_info_.ToHandleChecked();
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info;
+ if (shared_info->HasOuterScopeInfo()) {
+ maybe_outer_scope_info =
+ handle(shared_info->GetOuterScopeInfo(), isolate);
+ }
+ parser.DeserializeScopeChain(
+ isolate, &info, maybe_outer_scope_info,
+ Scope::DeserializationMode::kIncludingVariables);
+ }
- parser_->ParseOnBackground(info_.get(), start_position_, end_position_,
- function_literal_id_);
+ parser.ParseOnBackground(isolate, &info, start_position_, end_position_,
+ function_literal_id_);
+ parser.UpdateStatistics(script_, &use_counts_, &total_preparse_skipped_);
// Save the language mode.
- language_mode_ = info_->language_mode();
+ language_mode_ = info.language_mode();
- if (!FLAG_finalize_streaming_on_background) {
- if (info_->literal() != nullptr) {
- CompileOnBackgroundThread(info_.get(), compile_state_.allocator(),
- &compilation_jobs_);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileCodeBackground");
+ RCS_SCOPE(info.runtime_call_stats(),
+ RuntimeCallCounterIdForCompileBackground(&info));
+
+ MaybeHandle<SharedFunctionInfo> maybe_result;
+ if (info.literal() != nullptr) {
+ Handle<SharedFunctionInfo> shared_info;
+ if (toplevel_script_compilation) {
+ shared_info = CreateTopLevelSharedFunctionInfo(&info, script_, isolate);
+ } else {
+ // Clone into a placeholder SFI for storing the results.
+ shared_info = isolate->factory()->CloneSharedFunctionInfo(
+ input_shared_info_.ToHandleChecked());
}
- } else {
- DCHECK(info_->flags().is_toplevel());
- {
- LocalIsolate isolate(isolate_for_local_isolate_, ThreadKind::kBackground);
- UnparkedScope unparked_scope(&isolate);
- LocalHandleScope handle_scope(&isolate);
-
- info_->ast_value_factory()->Internalize(&isolate);
-
- // We don't have the script source, origin, or details yet, so use default
- // values for them. These will be fixed up during the main-thread merge.
- Handle<Script> script = info_->CreateScript(
- &isolate, isolate.factory()->empty_string(), kNullMaybeHandle,
- ScriptOriginOptions(false, false, false, info_->flags().is_module()));
-
- parser_->UpdateStatistics(script, use_counts_, &total_preparse_skipped_);
- parser_->HandleSourceURLComments(&isolate, script);
-
- MaybeHandle<SharedFunctionInfo> maybe_result;
- if (info_->literal() != nullptr) {
- maybe_result = CompileAndFinalizeOnBackgroundThread(
- info_.get(), compile_state_.allocator(), script, &isolate,
- &finalize_unoptimized_compilation_data_,
- &jobs_to_retry_finalization_on_main_thread_, &is_compiled_scope_);
- } else {
- DCHECK(compile_state_.pending_error_handler()->has_pending_error());
- PreparePendingException(&isolate, info_.get());
- }
+ if (IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
+ isolate, shared_info, script_, &info, reusable_state->allocator(),
+ &is_compiled_scope_, &finalize_unoptimized_compilation_data_,
+ &jobs_to_retry_finalization_on_main_thread_)) {
+ maybe_result = shared_info;
+ }
+ }
- outer_function_sfi_ =
- isolate.heap()->NewPersistentMaybeHandle(maybe_result);
- script_ = isolate.heap()->NewPersistentHandle(script);
+ if (maybe_result.is_null()) {
+ PreparePendingException(isolate, &info);
+ }
- persistent_handles_ = isolate.heap()->DetachPersistentHandles();
- }
+ outer_function_sfi_ = isolate->heap()->NewPersistentMaybeHandle(maybe_result);
+ DCHECK(isolate->heap()->ContainsPersistentHandle(script_.location()));
+ persistent_handles_ = isolate->heap()->DetachPersistentHandles();
- {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.FinalizeCodeBackground.ReleaseParser");
- DCHECK_EQ(language_mode_, info_->language_mode());
- off_thread_scope.reset();
- parser_.reset();
- info_.reset();
- }
+ // Make sure the language mode didn't change.
+ DCHECK_EQ(language_mode_, info.language_mode());
+}
+
+MaybeHandle<SharedFunctionInfo> BackgroundCompileTask::FinalizeScript(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details) {
+ ScriptOriginOptions origin_options = script_details.origin_options;
+
+ DCHECK(flags_.is_toplevel());
+ DCHECK_EQ(flags_.is_module(), origin_options.IsModule());
+
+ MaybeHandle<SharedFunctionInfo> maybe_result;
+
+ // We might not have been able to finalize all jobs on the background
+ // thread (e.g. asm.js jobs), so finalize those deferred jobs now.
+ if (FinalizeDeferredUnoptimizedCompilationJobs(
+ isolate, script_, &jobs_to_retry_finalization_on_main_thread_,
+ compile_state_.pending_error_handler(),
+ &finalize_unoptimized_compilation_data_)) {
+ maybe_result = outer_function_sfi_;
}
+
+ script_->set_source(*source);
+ script_->set_origin_options(origin_options);
+
+ // The one post-hoc fix-up: Add the script to the script list.
+ Handle<WeakArrayList> scripts = isolate->factory()->script_list();
+ scripts =
+ WeakArrayList::Append(isolate, scripts, MaybeObjectHandle::Weak(script_));
+ isolate->heap()->SetRootScriptList(*scripts);
+
+ // Set the script fields after finalization, to keep this path the same
+ // between main-thread and off-thread finalization.
+ {
+ DisallowGarbageCollection no_gc;
+ SetScriptFieldsFromDetails(isolate, *script_, script_details, &no_gc);
+ LOG(isolate, ScriptDetails(*script_));
+ }
+
+ ReportStatistics(isolate);
+
+ Handle<SharedFunctionInfo> result;
+ if (!maybe_result.ToHandle(&result)) {
+ FailWithPreparedPendingException(isolate, script_,
+ compile_state_.pending_error_handler());
+ return kNullMaybeHandle;
+ }
+
+ FinalizeUnoptimizedScriptCompilation(isolate, script_, flags_,
+ &compile_state_,
+ finalize_unoptimized_compilation_data_);
+
+ return handle(*result, isolate);
}
-MaybeHandle<SharedFunctionInfo> BackgroundCompileTask::GetOuterFunctionSfi(
- Isolate* isolate) {
- // outer_function_sfi_ is a persistent Handle, tied to the lifetime of the
- // persistent_handles_ member, so create a new Handle to let it outlive
- // the BackgroundCompileTask.
+bool BackgroundCompileTask::FinalizeFunction(
+ Isolate* isolate, Compiler::ClearExceptionFlag flag) {
+ DCHECK(!flags_.is_toplevel());
+
+ MaybeHandle<SharedFunctionInfo> maybe_result;
+ Handle<SharedFunctionInfo> input_shared_info =
+ input_shared_info_.ToHandleChecked();
+
+ // The UncompiledData on the input SharedFunctionInfo will have a pointer to
+ // the LazyCompileDispatcher Job that launched this task, which will now be
+ // considered complete, so clear that regardless of whether the finalize
+ // succeeds or not.
+ input_shared_info->ClearUncompiledDataJobPointer();
+
+ // We might not have been able to finalize all jobs on the background
+ // thread (e.g. asm.js jobs), so finalize those deferred jobs now.
+ if (FinalizeDeferredUnoptimizedCompilationJobs(
+ isolate, script_, &jobs_to_retry_finalization_on_main_thread_,
+ compile_state_.pending_error_handler(),
+ &finalize_unoptimized_compilation_data_)) {
+ maybe_result = outer_function_sfi_;
+ }
+
+ ReportStatistics(isolate);
+
Handle<SharedFunctionInfo> result;
- if (outer_function_sfi_.ToHandle(&result)) {
- return handle(*result, isolate);
+ if (!maybe_result.ToHandle(&result)) {
+ FailWithPreparedPendingException(
+ isolate, script_, compile_state_.pending_error_handler(), flag);
+ return false;
}
- return kNullMaybeHandle;
+
+ FinalizeUnoptimizedCompilation(isolate, script_, flags_, &compile_state_,
+ finalize_unoptimized_compilation_data_);
+
+ // Move the compiled data from the placeholder SFI back to the real SFI.
+ input_shared_info->CopyFrom(*result);
+
+ return true;
}
-Handle<Script> BackgroundCompileTask::GetScript(Isolate* isolate) {
- // script_ is a persistent Handle, tied to the lifetime of the
- // persistent_handles_ member, so create a new Handle to let it outlive
- // the BackgroundCompileTask.
- return handle(*script_, isolate);
+void BackgroundCompileTask::AbortFunction() {
+ // The UncompiledData on the input SharedFunctionInfo will have a pointer to
+ // the LazyCompileDispatcher Job that launched this task, which is about to be
+ // deleted, so clear that to avoid the SharedFunctionInfo from pointing to
+ // deallocated memory.
+ input_shared_info_.ToHandleChecked()->ClearUncompiledDataJobPointer();
+}
+
+void BackgroundCompileTask::ReportStatistics(Isolate* isolate) {
+ // Update use-counts.
+ for (auto feature : use_counts_) {
+ isolate->CountUsage(feature);
+ }
+ if (total_preparse_skipped_ > 0) {
+ isolate->counters()->total_preparse_skipped()->Increment(
+ total_preparse_skipped_);
+ }
}
BackgroundDeserializeTask::BackgroundDeserializeTask(
@@ -1777,9 +1783,13 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
UnoptimizedCompileFlags flags =
UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared_info);
flags.set_collect_source_positions(true);
+ // Prevent parallel tasks from being spawned by this job.
+ flags.set_post_parallel_compile_tasks_for_eager_toplevel(false);
+ flags.set_post_parallel_compile_tasks_for_lazy(false);
- UnoptimizedCompileState compile_state(isolate);
- ParseInfo parse_info(isolate, flags, &compile_state);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state, &reusable_state);
// Parse and update ParseInfo with the results. Don't update parsing
// statistics since we've already parsed the code before.
@@ -1830,7 +1840,8 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
// static
bool Compiler::Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
ClearExceptionFlag flag,
- IsCompiledScope* is_compiled_scope) {
+ IsCompiledScope* is_compiled_scope,
+ CreateSourcePositions create_source_positions_flag) {
// We should never reach here if the function is already compiled.
DCHECK(!shared_info->is_compiled());
DCHECK(!is_compiled_scope->is_compiled());
@@ -1851,9 +1862,13 @@ bool Compiler::Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
// Set up parse info.
UnoptimizedCompileFlags flags =
UnoptimizedCompileFlags::ForFunctionCompile(isolate, *shared_info);
+ if (create_source_positions_flag == CreateSourcePositions::kYes) {
+ flags.set_collect_source_positions(true);
+ }
- UnoptimizedCompileState compile_state(isolate);
- ParseInfo parse_info(isolate, flags, &compile_state);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state, &reusable_state);
// Check if the compiler dispatcher has shared_info enqueued for compile.
LazyCompileDispatcher* dispatcher = isolate->lazy_compile_dispatcher();
@@ -1927,7 +1942,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
}
DCHECK(is_compiled_scope->is_compiled());
- Handle<Code> code = handle(shared_info->GetCode(), isolate);
+ Handle<Code> code = handle(FromCodeT(shared_info->GetCode()), isolate);
// Initialize the feedback cell for this JSFunction and reset the interrupt
// budget for feedback vector allocation even if there is a closure feedback
@@ -2011,7 +2026,7 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
// report these somehow, or silently ignore them?
return false;
}
- shared->set_baseline_code(*code, kReleaseStore);
+ shared->set_baseline_code(ToCodeT(*code), kReleaseStore);
if (V8_LIKELY(FLAG_use_osr)) {
// Arm back edges for OSR
@@ -2045,7 +2060,7 @@ bool Compiler::CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
// Baseline code needs a feedback vector.
JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
- Code baseline_code = shared->baseline_code(kAcquireLoad);
+ CodeT baseline_code = shared->baseline_code(kAcquireLoad);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
function->set_code(baseline_code);
@@ -2061,45 +2076,19 @@ MaybeHandle<SharedFunctionInfo> Compiler::CompileToplevel(
}
// static
-bool Compiler::FinalizeBackgroundCompileTask(
- BackgroundCompileTask* task, Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate, ClearExceptionFlag flag) {
- DCHECK(!FLAG_finalize_streaming_on_background);
-
+bool Compiler::FinalizeBackgroundCompileTask(BackgroundCompileTask* task,
+ Isolate* isolate,
+ ClearExceptionFlag flag) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.FinalizeBackgroundCompileTask");
RCS_SCOPE(isolate,
RuntimeCallCounterId::kCompileFinalizeBackgroundCompileTask);
- HandleScope scope(isolate);
- ParseInfo* parse_info = task->info();
- DCHECK(!parse_info->flags().is_toplevel());
- DCHECK(!shared_info->is_compiled());
-
- Handle<Script> script(Script::cast(shared_info->script()), isolate);
- parse_info->CheckFlagsForFunctionFromScript(*script);
-
- task->parser()->UpdateStatistics(isolate, script);
- task->parser()->HandleSourceURLComments(isolate, script);
- if (task->compilation_jobs()->empty()) {
- // Parsing or compile failed on background thread - report error messages.
- return FailWithPendingException(isolate, script, parse_info, flag);
- }
+ HandleScope scope(isolate);
- // Parsing has succeeded - finalize compilation.
- parse_info->ast_value_factory()->Internalize(isolate);
- if (!FinalizeAllUnoptimizedCompilationJobs(
- parse_info, isolate, script, task->compilation_jobs(),
- task->finalize_unoptimized_compilation_data())) {
- // Finalization failed - throw an exception.
- return FailWithPendingException(isolate, script, parse_info, flag);
- }
- FinalizeUnoptimizedCompilation(
- isolate, script, parse_info->flags(), parse_info->state(),
- *task->finalize_unoptimized_compilation_data());
+ if (!task->FinalizeFunction(isolate, flag)) return false;
DCHECK(!isolate->has_pending_exception());
- DCHECK(shared_info->is_compiled());
return true;
}
@@ -2204,8 +2193,9 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
DCHECK(!flags.is_module());
flags.set_parse_restriction(restriction);
- UnoptimizedCompileState compile_state(isolate);
- ParseInfo parse_info(isolate, flags, &compile_state);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state, &reusable_state);
parse_info.set_parameters_end_pos(parameters_end_pos);
MaybeHandle<ScopeInfo> maybe_outer_scope_info;
@@ -2650,30 +2640,6 @@ struct ScriptCompileTimerScope {
}
};
-void SetScriptFieldsFromDetails(Isolate* isolate, Script script,
- ScriptDetails script_details,
- DisallowGarbageCollection* no_gc) {
- Handle<Object> script_name;
- if (script_details.name_obj.ToHandle(&script_name)) {
- script.set_name(*script_name);
- script.set_line_offset(script_details.line_offset);
- script.set_column_offset(script_details.column_offset);
- }
- // The API can provide a source map URL, but a source map URL could also have
- // been inferred by the parser from a magic comment. The latter takes
- // preference over the former, so we don't want to override the source mapping
- // URL if it already exists.
- Handle<Object> source_map_url;
- if (script_details.source_map_url.ToHandle(&source_map_url) &&
- script.source_mapping_url(isolate).IsUndefined(isolate)) {
- script.set_source_mapping_url(*source_map_url);
- }
- Handle<FixedArray> host_defined_options;
- if (script_details.host_defined_options.ToHandle(&host_defined_options)) {
- script.set_host_defined_options(*host_defined_options);
- }
-}
-
Handle<Script> NewScript(
Isolate* isolate, ParseInfo* parse_info, Handle<String> source,
ScriptDetails script_details, NativesFlag natives,
@@ -2693,8 +2659,9 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnMainThread(
const ScriptDetails& script_details, NativesFlag natives,
v8::Extension* extension, Isolate* isolate,
IsCompiledScope* is_compiled_scope) {
- UnoptimizedCompileState compile_state(isolate);
- ParseInfo parse_info(isolate, flags, &compile_state);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state, &reusable_state);
parse_info.set_extension(extension);
Handle<Script> script =
@@ -2867,7 +2834,8 @@ MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScriptImpl(
if (V8_UNLIKELY(
i::FLAG_experimental_web_snapshots &&
- (source->IsExternalOneByteString() || source->IsSeqOneByteString()) &&
+ (source->IsExternalOneByteString() || source->IsSeqOneByteString() ||
+ source->IsExternalTwoByteString() || source->IsSeqTwoByteString()) &&
source_length > 4)) {
// Experimental: Treat the script as a web snapshot if it starts with the
// magic byte sequence. TODO(v8:11525): Remove this once proper embedder
@@ -3080,8 +3048,9 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
flags.set_collect_source_positions(true);
// flags.set_eager(compile_options == ScriptCompiler::kEagerCompile);
- UnoptimizedCompileState compile_state(isolate);
- ParseInfo parse_info(isolate, flags, &compile_state);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ ParseInfo parse_info(isolate, flags, &compile_state, &reusable_state);
MaybeHandle<ScopeInfo> maybe_outer_scope_info;
if (!context->IsNativeContext()) {
@@ -3123,8 +3092,7 @@ MaybeHandle<SharedFunctionInfo>
Compiler::GetSharedFunctionInfoForStreamedScript(
Isolate* isolate, Handle<String> source,
const ScriptDetails& script_details, ScriptStreamingData* streaming_data) {
- ScriptOriginOptions origin_options = script_details.origin_options;
- DCHECK(!origin_options.IsWasm());
+ DCHECK(!script_details.origin_options.IsWasm());
ScriptCompileTimerScope compile_timer(
isolate, ScriptCompiler::kNoCacheBecauseStreamingSource);
@@ -3153,94 +3121,15 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
if (maybe_result.is_null()) {
// No cache entry found, finalize compilation of the script and add it to
// the isolate cache.
- DCHECK_EQ(task->flags().is_module(), origin_options.IsModule());
-
- Handle<Script> script;
- if (FLAG_finalize_streaming_on_background) {
- RCS_SCOPE(isolate,
- RuntimeCallCounterId::kCompilePublishBackgroundFinalization);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.OffThreadFinalization.Publish");
-
- script = task->GetScript(isolate);
-
- // We might not have been able to finalize all jobs on the background
- // thread (e.g. asm.js jobs), so finalize those deferred jobs now.
- if (FinalizeDeferredUnoptimizedCompilationJobs(
- isolate, script,
- task->jobs_to_retry_finalization_on_main_thread(),
- task->compile_state()->pending_error_handler(),
- task->finalize_unoptimized_compilation_data())) {
- maybe_result = task->GetOuterFunctionSfi(isolate);
- }
-
- script->set_source(*source);
- script->set_origin_options(origin_options);
-
- // The one post-hoc fix-up: Add the script to the script list.
- Handle<WeakArrayList> scripts = isolate->factory()->script_list();
- scripts = WeakArrayList::Append(isolate, scripts,
- MaybeObjectHandle::Weak(script));
- isolate->heap()->SetRootScriptList(*scripts);
-
- for (int i = 0;
- i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount); ++i) {
- v8::Isolate::UseCounterFeature feature =
- static_cast<v8::Isolate::UseCounterFeature>(i);
- isolate->CountUsage(feature, task->use_count(feature));
- }
- isolate->counters()->total_preparse_skipped()->Increment(
- task->total_preparse_skipped());
- } else {
- ParseInfo* parse_info = task->info();
- DCHECK_EQ(parse_info->flags().is_module(), origin_options.IsModule());
- DCHECK(parse_info->flags().is_toplevel());
-
- script = parse_info->CreateScript(isolate, source, kNullMaybeHandle,
- origin_options);
-
- task->parser()->UpdateStatistics(isolate, script);
- task->parser()->HandleSourceURLComments(isolate, script);
-
- if (!task->compilation_jobs()->empty()) {
- // Off-thread parse & compile has succeeded - finalize compilation.
- DCHECK_NOT_NULL(parse_info->literal());
-
- parse_info->ast_value_factory()->Internalize(isolate);
-
- Handle<SharedFunctionInfo> shared_info =
- CreateTopLevelSharedFunctionInfo(parse_info, script, isolate);
- if (FinalizeAllUnoptimizedCompilationJobs(
- parse_info, isolate, script, task->compilation_jobs(),
- task->finalize_unoptimized_compilation_data())) {
- maybe_result = shared_info;
- }
- }
-
- if (maybe_result.is_null()) {
- // Compilation failed - prepare to throw an exception after script
- // fields have been set.
- PreparePendingException(isolate, parse_info);
- }
- }
+ RCS_SCOPE(isolate,
+ RuntimeCallCounterId::kCompilePublishBackgroundFinalization);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.OffThreadFinalization.Publish");
- // Set the script fields after finalization, to keep this path the same
- // between main-thread and off-thread finalization.
- {
- DisallowGarbageCollection no_gc;
- SetScriptFieldsFromDetails(isolate, *script, script_details, &no_gc);
- LOG(isolate, ScriptDetails(*script));
- }
+ maybe_result = task->FinalizeScript(isolate, source, script_details);
Handle<SharedFunctionInfo> result;
- if (!maybe_result.ToHandle(&result)) {
- FailWithPreparedPendingException(
- isolate, script, task->compile_state()->pending_error_handler());
- } else {
- FinalizeUnoptimizedScriptCompilation(
- isolate, script, task->flags(), task->compile_state(),
- *task->finalize_unoptimized_compilation_data());
-
+ if (maybe_result.ToHandle(&result)) {
// Add compiled code to the isolate cache.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.StreamingFinalization.AddToCache");
@@ -3252,7 +3141,7 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
"V8.StreamingFinalization.Release");
streaming_data->Release();
return maybe_result;
-}
+} // namespace internal
// static
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForWebSnapshot(
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index 5298d139ff..f49bd727bc 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -8,7 +8,9 @@
#include <forward_list>
#include <memory>
+#include "src/ast/ast-value-factory.h"
#include "src/base/platform/elapsed-timer.h"
+#include "src/base/small-vector.h"
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
@@ -69,7 +71,9 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag,
- IsCompiledScope* is_compiled_scope);
+ IsCompiledScope* is_compiled_scope,
+ CreateSourcePositions create_source_positions_flag =
+ CreateSourcePositions::kNo);
static bool Compile(Isolate* isolate, Handle<JSFunction> function,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
@@ -104,9 +108,9 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
Isolate* isolate);
// Finalize and install code from previously run background compile task.
- static bool FinalizeBackgroundCompileTask(
- BackgroundCompileTask* task, Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate, ClearExceptionFlag flag);
+ static bool FinalizeBackgroundCompileTask(BackgroundCompileTask* task,
+ Isolate* isolate,
+ ClearExceptionFlag flag);
// Finalize and install optimized code from previously run job.
static bool FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
@@ -495,8 +499,7 @@ using DeferredFinalizationJobDataList =
class V8_EXPORT_PRIVATE BackgroundCompileTask {
public:
// Creates a new task that when run will parse and compile the streamed
- // script associated with |data| and can be finalized with
- // Compiler::GetSharedFunctionInfoForStreamedScript.
+ // script associated with |data| and can be finalized with FinalizeScript.
// Note: does not take ownership of |data|.
BackgroundCompileTask(ScriptStreamingData* data, Isolate* isolate,
v8::ScriptType type);
@@ -504,83 +507,61 @@ class V8_EXPORT_PRIVATE BackgroundCompileTask {
BackgroundCompileTask& operator=(const BackgroundCompileTask&) = delete;
~BackgroundCompileTask();
- // Creates a new task that when run will parse and compile the
- // |function_literal| and can be finalized with
+ // Creates a new task that when run will parse and compile the top-level
+ // |shared_info| and can be finalized with FinalizeFunction in
// Compiler::FinalizeBackgroundCompileTask.
BackgroundCompileTask(
- const ParseInfo* outer_parse_info, const AstRawString* function_name,
- const FunctionLiteral* function_literal,
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
+ std::unique_ptr<Utf16CharacterStream> character_stream,
WorkerThreadRuntimeCallStats* worker_thread_runtime_stats,
TimedHistogram* timer, int max_stack_size);
void Run();
+ void Run(LocalIsolate* isolate,
+ ReusableUnoptimizedCompileState* reusable_state);
- ParseInfo* info() {
- DCHECK_NOT_NULL(info_);
- return info_.get();
- }
- Parser* parser() { return parser_.get(); }
- UnoptimizedCompilationJobList* compilation_jobs() {
- return &compilation_jobs_;
- }
- UnoptimizedCompileFlags flags() const { return flags_; }
- UnoptimizedCompileState* compile_state() { return &compile_state_; }
- LanguageMode language_mode() { return language_mode_; }
- FinalizeUnoptimizedCompilationDataList*
- finalize_unoptimized_compilation_data() {
- return &finalize_unoptimized_compilation_data_;
- }
+ MaybeHandle<SharedFunctionInfo> FinalizeScript(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details);
- int use_count(v8::Isolate::UseCounterFeature feature) const {
- return use_counts_[static_cast<int>(feature)];
- }
- int total_preparse_skipped() const { return total_preparse_skipped_; }
+ bool FinalizeFunction(Isolate* isolate, Compiler::ClearExceptionFlag flag);
- // Jobs which could not be finalized in the background task, and need to be
- // finalized on the main thread.
- DeferredFinalizationJobDataList* jobs_to_retry_finalization_on_main_thread() {
- return &jobs_to_retry_finalization_on_main_thread_;
- }
+ void AbortFunction();
- // Getters for the off-thread finalization results, that create main-thread
- // handles to the objects.
- MaybeHandle<SharedFunctionInfo> GetOuterFunctionSfi(Isolate* isolate);
- Handle<Script> GetScript(Isolate* isolate);
+ UnoptimizedCompileFlags flags() const { return flags_; }
+ LanguageMode language_mode() const { return language_mode_; }
private:
- // Data needed for parsing, and data needed to to be passed between thread
- // between parsing and compilation. These need to be initialized before the
- // compilation starts.
+ void ReportStatistics(Isolate* isolate);
+
+ void ClearFunctionJobPointer();
+
+ // Data needed for parsing and compilation. These need to be initialized
+ // before the compilation starts.
+ Isolate* isolate_for_local_isolate_;
UnoptimizedCompileFlags flags_;
UnoptimizedCompileState compile_state_;
- std::unique_ptr<ParseInfo> info_;
- std::unique_ptr<Parser> parser_;
-
- // Data needed for finalizing compilation after background compilation.
- UnoptimizedCompilationJobList compilation_jobs_;
+ std::unique_ptr<Utf16CharacterStream> character_stream_;
+ int stack_size_;
+ WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
+ TimedHistogram* timer_;
// Data needed for merging onto the main thread after background finalization.
- // TODO(leszeks): When these are available, the above fields are not. We
- // should add some stricter type-safety or DCHECKs to ensure that the user of
- // the task knows this.
- Isolate* isolate_for_local_isolate_;
std::unique_ptr<PersistentHandles> persistent_handles_;
MaybeHandle<SharedFunctionInfo> outer_function_sfi_;
Handle<Script> script_;
IsCompiledScope is_compiled_scope_;
FinalizeUnoptimizedCompilationDataList finalize_unoptimized_compilation_data_;
DeferredFinalizationJobDataList jobs_to_retry_finalization_on_main_thread_;
- int use_counts_[v8::Isolate::kUseCounterFeatureCount] = {0};
+ base::SmallVector<v8::Isolate::UseCounterFeature, 8> use_counts_;
int total_preparse_skipped_ = 0;
// Single function data for top-level function compilation.
+ MaybeHandle<SharedFunctionInfo> input_shared_info_;
int start_position_;
int end_position_;
int function_literal_id_;
- int stack_size_;
- WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
- TimedHistogram* timer_;
LanguageMode language_mode_;
};
diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h
index b2d890c6f4..76956381a2 100644
--- a/deps/v8/src/codegen/constant-pool.h
+++ b/deps/v8/src/codegen/constant-pool.h
@@ -24,13 +24,13 @@ class ConstantPoolEntry {
public:
ConstantPoolEntry() = default;
ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: position_(position),
merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
value_(value),
rmode_(rmode) {}
ConstantPoolEntry(int position, base::Double value,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: position_(position),
merged_index_(SHARING_ALLOWED),
value64_(value.AsUint64()),
@@ -168,11 +168,11 @@ class ConstantPoolBuilder {
class ConstantPoolKey {
public:
explicit ConstantPoolKey(uint64_t value,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: is_value32_(false), value64_(value), rmode_(rmode) {}
explicit ConstantPoolKey(uint32_t value,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: is_value32_(true), value32_(value), rmode_(rmode) {}
uint64_t value64() const {
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index 3cdae6d4c8..e80d560fd1 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -26,7 +26,7 @@ enum CpuFeature {
BMI2,
LZCNT,
POPCNT,
- ATOM,
+ INTEL_ATOM,
#elif V8_TARGET_ARCH_ARM
// - Standard configurations. The baseline is ARMv6+VFPv2.
diff --git a/deps/v8/src/codegen/external-reference-table.cc b/deps/v8/src/codegen/external-reference-table.cc
index 0a22fbdd75..d07f021a8b 100644
--- a/deps/v8/src/codegen/external-reference-table.cc
+++ b/deps/v8/src/codegen/external-reference-table.cc
@@ -290,9 +290,11 @@ void ExternalReferenceTable::AddStubCache(Isolate* isolate, int* index) {
}
Address ExternalReferenceTable::GetStatsCounterAddress(StatsCounter* counter) {
- int* address = counter->Enabled()
- ? counter->GetInternalPointer()
- : reinterpret_cast<int*>(&dummy_stats_counter_);
+ if (!counter->Enabled()) {
+ return reinterpret_cast<Address>(&dummy_stats_counter_);
+ }
+ std::atomic<int>* address = counter->GetInternalPointer();
+ STATIC_ASSERT(sizeof(address) == sizeof(Address));
return reinterpret_cast<Address>(address);
}
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 1981e29911..075eaf8c09 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -4,6 +4,7 @@
#include "src/codegen/external-reference.h"
+#include "include/v8-fast-api-calls.h"
#include "src/api/api.h"
#include "src/base/ieee754.h"
#include "src/codegen/cpu-features.h"
@@ -11,10 +12,11 @@
#include "src/date/date.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/encoded-c-signature.h"
#include "src/execution/isolate-utils.h"
#include "src/execution/isolate.h"
#include "src/execution/microtask-queue.h"
-#include "src/execution/simulator-base.h"
+#include "src/execution/simulator.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/ic/stub-cache.h"
@@ -173,8 +175,18 @@ static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
}
// static
+ExternalReference ExternalReference::Create(ApiFunction* fun, Type type) {
+ return ExternalReference(Redirect(fun->address(), type));
+}
+
+// static
ExternalReference ExternalReference::Create(
- ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL) {
+ Isolate* isolate, ApiFunction* fun, Type type, Address* c_functions,
+ const CFunctionInfo* const* c_signatures, unsigned num_functions) {
+#ifdef V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+ isolate->simulator_data()->RegisterFunctionsAndSignatures(
+ c_functions, c_signatures, num_functions);
+#endif // V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
return ExternalReference(Redirect(fun->address(), type));
}
@@ -198,16 +210,23 @@ ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
return ExternalReference(isolate);
}
-ExternalReference ExternalReference::builtins_address(Isolate* isolate) {
+ExternalReference ExternalReference::builtins_table(Isolate* isolate) {
return ExternalReference(isolate->builtin_table());
}
+#ifdef V8_EXTERNAL_CODE_SPACE
+ExternalReference ExternalReference::builtins_code_data_container_table(
+ Isolate* isolate) {
+ return ExternalReference(isolate->builtin_code_data_container_table());
+}
+#endif // V8_EXTERNAL_CODE_SPACE
+
ExternalReference ExternalReference::handle_scope_implementer_address(
Isolate* isolate) {
return ExternalReference(isolate->handle_scope_implementer_address());
}
-#ifdef V8_VIRTUAL_MEMORY_CAGE
+#ifdef V8_CAGED_POINTERS
ExternalReference ExternalReference::virtual_memory_cage_base_address() {
return ExternalReference(GetProcessWideVirtualMemoryCage()->base_address());
}
@@ -215,7 +234,13 @@ ExternalReference ExternalReference::virtual_memory_cage_base_address() {
ExternalReference ExternalReference::virtual_memory_cage_end_address() {
return ExternalReference(GetProcessWideVirtualMemoryCage()->end_address());
}
-#endif
+
+ExternalReference ExternalReference::empty_backing_store_buffer() {
+ return ExternalReference(GetProcessWideVirtualMemoryCage()
+ ->constants()
+ .empty_backing_store_buffer_address());
+}
+#endif // V8_CAGED_POINTERS
#ifdef V8_HEAP_SANDBOX
ExternalReference ExternalReference::external_pointer_table_address(
@@ -871,8 +896,7 @@ ExternalReference ExternalReference::search_string_raw() {
FUNCTION_REFERENCE(jsarray_array_join_concat_to_sequential_string,
JSArray::ArrayJoinConcatToSequentialString)
-FUNCTION_REFERENCE(length_tracking_gsab_backed_typed_array_length,
- JSTypedArray::LengthTrackingGsabBackedTypedArrayLength)
+FUNCTION_REFERENCE(gsab_byte_length, JSArrayBuffer::GsabByteLength)
ExternalReference ExternalReference::search_string_raw_one_one() {
return search_string_raw<const uint8_t, const uint8_t>();
@@ -1001,6 +1025,17 @@ ExternalReference ExternalReference::intl_to_latin1_lower_table() {
uint8_t* ptr = const_cast<uint8_t*>(Intl::ToLatin1LowerTable());
return ExternalReference(reinterpret_cast<Address>(ptr));
}
+
+ExternalReference ExternalReference::intl_ascii_collation_weights_l1() {
+ uint8_t* ptr = const_cast<uint8_t*>(Intl::AsciiCollationWeightsL1());
+ return ExternalReference(reinterpret_cast<Address>(ptr));
+}
+
+ExternalReference ExternalReference::intl_ascii_collation_weights_l3() {
+ uint8_t* ptr = const_cast<uint8_t*>(Intl::AsciiCollationWeightsL3());
+ return ExternalReference(reinterpret_cast<Address>(ptr));
+}
+
#endif // V8_INTL_SUPPORT
// Explicit instantiations for all combinations of 1- and 2-byte strings.
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index d7cffa966b..a0c27d207e 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -11,6 +11,7 @@
namespace v8 {
class ApiFunction;
+class CFunctionInfo;
namespace internal {
@@ -24,7 +25,7 @@ class StatsCounter;
#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(V) \
V(isolate_address, "isolate") \
- V(builtins_address, "builtins") \
+ V(builtins_table, "builtins_table") \
V(handle_scope_implementer_address, \
"Isolate::handle_scope_implementer_address") \
V(address_of_interpreter_entry_trampoline_instruction_start, \
@@ -78,8 +79,16 @@ class StatsCounter;
V(thread_in_wasm_flag_address_address, \
"Isolate::thread_in_wasm_flag_address_address") \
V(javascript_execution_assert, "javascript_execution_assert") \
+ EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_EXTERNAL_CODE_SPACE(V) \
EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V)
+#ifdef V8_EXTERNAL_CODE_SPACE
+#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_EXTERNAL_CODE_SPACE(V) \
+ V(builtins_code_data_container_table, "builtins_code_data_container_table")
+#else
+#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_EXTERNAL_CODE_SPACE(V)
+#endif // V8_EXTERNAL_CODE_SPACE
+
#ifdef V8_HEAP_SANDBOX
#define EXTERNAL_REFERENCE_LIST_WITH_ISOLATE_HEAP_SANDBOX(V) \
V(external_pointer_table_address, \
@@ -126,6 +135,7 @@ class StatsCounter;
V(f64_mod_wrapper_function, "f64_mod_wrapper") \
V(get_date_field_function, "JSDate::GetField") \
V(get_or_create_hash_raw, "get_or_create_hash_raw") \
+ V(gsab_byte_length, "GsabByteLength") \
V(ieee754_acos_function, "base::ieee754::acos") \
V(ieee754_acosh_function, "base::ieee754::acosh") \
V(ieee754_asin_function, "base::ieee754::asin") \
@@ -155,8 +165,6 @@ class StatsCounter;
V(jsarray_array_join_concat_to_sequential_string, \
"jsarray_array_join_concat_to_sequential_string") \
V(jsreceiver_create_identity_hash, "jsreceiver_create_identity_hash") \
- V(length_tracking_gsab_backed_typed_array_length, \
- "LengthTrackingGsabBackedTypedArrayLength") \
V(libc_memchr_function, "libc_memchr") \
V(libc_memcpy_function, "libc_memcpy") \
V(libc_memmove_function, "libc_memmove") \
@@ -303,18 +311,21 @@ class StatsCounter;
#ifdef V8_INTL_SUPPORT
#define EXTERNAL_REFERENCE_LIST_INTL(V) \
V(intl_convert_one_byte_to_lower, "intl_convert_one_byte_to_lower") \
- V(intl_to_latin1_lower_table, "intl_to_latin1_lower_table")
+ V(intl_to_latin1_lower_table, "intl_to_latin1_lower_table") \
+ V(intl_ascii_collation_weights_l1, "Intl::AsciiCollationWeightsL1") \
+ V(intl_ascii_collation_weights_l3, "Intl::AsciiCollationWeightsL3")
#else
#define EXTERNAL_REFERENCE_LIST_INTL(V)
#endif // V8_INTL_SUPPORT
-#ifdef V8_VIRTUAL_MEMORY_CAGE
+#ifdef V8_CAGED_POINTERS
#define EXTERNAL_REFERENCE_LIST_VIRTUAL_MEMORY_CAGE(V) \
V(virtual_memory_cage_base_address, "V8VirtualMemoryCage::base()") \
- V(virtual_memory_cage_end_address, "V8VirtualMemoryCage::end()")
+ V(virtual_memory_cage_end_address, "V8VirtualMemoryCage::end()") \
+ V(empty_backing_store_buffer, "EmptyBackingStoreBuffer()")
#else
#define EXTERNAL_REFERENCE_LIST_VIRTUAL_MEMORY_CAGE(V)
-#endif // V8_VIRTUAL_MEMORY_CAGE
+#endif // V8_CAGED_POINTERS
#ifdef V8_HEAP_SANDBOX
#define EXTERNAL_REFERENCE_LIST_HEAP_SANDBOX(V) \
@@ -398,6 +409,15 @@ class ExternalReference {
static ExternalReference Create(StatsCounter* counter);
static V8_EXPORT_PRIVATE ExternalReference Create(ApiFunction* ptr,
Type type);
+ // The following version is used by JSCallReducer in the compiler
+ // to create a reference for a fast API call, with one or more
+ // overloads. In simulator builds, it additionally "registers"
+ // the overloads with the simulator to ensure it maintains a
+ // mapping of callable Address'es to a function signature, encoding
+ // GP and FP arguments.
+ static V8_EXPORT_PRIVATE ExternalReference
+ Create(Isolate* isolate, ApiFunction* ptr, Type type, Address* c_functions,
+ const CFunctionInfo* const* c_signatures, unsigned num_functions);
static ExternalReference Create(const Runtime::Function* f);
static ExternalReference Create(IsolateAddressId id, Isolate* isolate);
static ExternalReference Create(Runtime::FunctionId id);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
index 2d2b368c7b..607c0aca6e 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
@@ -179,7 +179,7 @@ void Assembler::emit(Handle<HeapObject> handle) {
}
void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
RecordRelocInfo(rmode);
}
emit(x);
@@ -195,7 +195,7 @@ void Assembler::emit(const Immediate& x) {
emit_code_relative_offset(label);
return;
}
- if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
+ if (!RelocInfo::IsNoInfo(x.rmode_)) RecordRelocInfo(x.rmode_);
if (x.is_heap_object_request()) {
RequestHeapObject(x.heap_object_request());
emit(0);
@@ -221,7 +221,7 @@ void Assembler::emit_b(Immediate x) {
}
void Assembler::emit_w(const Immediate& x) {
- DCHECK(RelocInfo::IsNone(x.rmode_));
+ DCHECK(RelocInfo::IsNoInfo(x.rmode_));
uint16_t value = static_cast<uint16_t>(x.immediate());
WriteUnalignedValue(reinterpret_cast<Address>(pc_), value);
pc_ += sizeof(uint16_t);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index e14d16c00a..389640e89a 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -153,9 +153,9 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.has_lzcnt() && FLAG_enable_lzcnt) SetSupported(LZCNT);
if (cpu.has_popcnt() && FLAG_enable_popcnt) SetSupported(POPCNT);
if (strcmp(FLAG_mcpu, "auto") == 0) {
- if (cpu.is_atom()) SetSupported(ATOM);
+ if (cpu.is_atom()) SetSupported(INTEL_ATOM);
} else if (strcmp(FLAG_mcpu, "atom") == 0) {
- SetSupported(ATOM);
+ SetSupported(INTEL_ATOM);
}
// Ensure that supported cpu features make sense. E.g. it is wrong to support
@@ -188,7 +188,7 @@ void CpuFeatures::PrintFeatures() {
CpuFeatures::IsSupported(AVX2), CpuFeatures::IsSupported(FMA3),
CpuFeatures::IsSupported(BMI1), CpuFeatures::IsSupported(BMI2),
CpuFeatures::IsSupported(LZCNT), CpuFeatures::IsSupported(POPCNT),
- CpuFeatures::IsSupported(ATOM));
+ CpuFeatures::IsSupported(INTEL_ATOM));
}
// -----------------------------------------------------------------------------
@@ -235,11 +235,11 @@ uint32_t RelocInfo::wasm_call_tag() const {
Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
// [base + disp/r]
- if (disp == 0 && RelocInfo::IsNone(rmode) && base != ebp) {
+ if (disp == 0 && RelocInfo::IsNoInfo(rmode) && base != ebp) {
// [base]
set_modrm(0, base);
if (base == esp) set_sib(times_1, esp, base);
- } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
+ } else if (is_int8(disp) && RelocInfo::IsNoInfo(rmode)) {
// [base + disp8]
set_modrm(1, base);
if (base == esp) set_sib(times_1, esp, base);
@@ -256,11 +256,11 @@ Operand::Operand(Register base, Register index, ScaleFactor scale, int32_t disp,
RelocInfo::Mode rmode) {
DCHECK(index != esp); // illegal addressing mode
// [base + index*scale + disp/r]
- if (disp == 0 && RelocInfo::IsNone(rmode) && base != ebp) {
+ if (disp == 0 && RelocInfo::IsNoInfo(rmode) && base != ebp) {
// [base + index*scale]
set_modrm(0, esp);
set_sib(scale, index, base);
- } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
+ } else if (is_int8(disp) && RelocInfo::IsNoInfo(rmode)) {
// [base + index*scale + disp8]
set_modrm(1, esp);
set_sib(scale, index, base);
@@ -2861,23 +2861,6 @@ void Assembler::pd(byte opcode, XMMRegister dst, Operand src) {
}
// AVX instructions
-void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
- Operand src2) {
- DCHECK(IsEnabled(FMA3));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(src1, kLIG, k66, k0F38, kW1);
- EMIT(op);
- emit_sse_operand(dst, src2);
-}
-
-void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
- Operand src2) {
- DCHECK(IsEnabled(FMA3));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(src1, kLIG, k66, k0F38, kW0);
- EMIT(op);
- emit_sse_operand(dst, src2);
-}
void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(op, dst, src1, src2, kF3, k0F, kWIG);
@@ -3222,19 +3205,31 @@ void Assembler::sse4_instr(XMMRegister dst, Operand src, byte prefix,
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w,
CpuFeature feature) {
+ vinstr(op, dst, src1, src2, kL128, pp, m, w, feature);
+}
+
+void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature) {
+ vinstr(op, dst, src1, src2, kL128, pp, m, w, feature);
+}
+
+void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, VectorLength l, SIMDPrefix pp,
+ LeadingOpcode m, VexW w, CpuFeature feature) {
DCHECK(IsEnabled(feature));
EnsureSpace ensure_space(this);
- emit_vex_prefix(src1, kL128, pp, m, w);
+ emit_vex_prefix(src1, l, pp, m, w);
EMIT(op);
emit_sse_operand(dst, src2);
}
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
- SIMDPrefix pp, LeadingOpcode m, VexW w,
+ VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w,
CpuFeature feature) {
DCHECK(IsEnabled(feature));
EnsureSpace ensure_space(this);
- emit_vex_prefix(src1, kL128, pp, m, w);
+ emit_vex_prefix(src1, l, pp, m, w);
EMIT(op);
emit_sse_operand(dst, src2);
}
@@ -3383,7 +3378,7 @@ void Assembler::emit_operand(int code, Operand adr) {
for (unsigned i = 1; i < length; i++) EMIT(adr.encoded_bytes()[i]);
// Emit relocation information if necessary.
- if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode())) {
+ if (length >= sizeof(int32_t) && !RelocInfo::IsNoInfo(adr.rmode())) {
pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
RecordRelocInfo(adr.rmode());
if (adr.rmode() == RelocInfo::INTERNAL_REFERENCE) { // Fixup for labels
@@ -3417,7 +3412,7 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -3427,7 +3422,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
RecordRelocInfo(rmode);
}
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index bdf2007485..8c5f20a112 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -42,6 +42,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/ia32/constants-ia32.h"
+#include "src/codegen/ia32/fma-instr.h"
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/ia32/sse-instr.h"
#include "src/codegen/label.h"
@@ -105,7 +106,7 @@ enum RoundingMode {
class Immediate {
public:
// Calls where x is an Address (uintptr_t) resolve to this overload.
- inline explicit Immediate(int x, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ inline explicit Immediate(int x, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
value_.immediate = x;
rmode_ = rmode;
}
@@ -156,19 +157,21 @@ class Immediate {
return bit_cast<ExternalReference>(immediate());
}
- bool is_zero() const { return RelocInfo::IsNone(rmode_) && immediate() == 0; }
+ bool is_zero() const {
+ return RelocInfo::IsNoInfo(rmode_) && immediate() == 0;
+ }
bool is_int8() const {
- return RelocInfo::IsNone(rmode_) && i::is_int8(immediate());
+ return RelocInfo::IsNoInfo(rmode_) && i::is_int8(immediate());
}
bool is_uint8() const {
- return RelocInfo::IsNone(rmode_) && i::is_uint8(immediate());
+ return RelocInfo::IsNoInfo(rmode_) && i::is_uint8(immediate());
}
bool is_int16() const {
- return RelocInfo::IsNone(rmode_) && i::is_int16(immediate());
+ return RelocInfo::IsNoInfo(rmode_) && i::is_int16(immediate());
}
bool is_uint16() const {
- return RelocInfo::IsNone(rmode_) && i::is_uint16(immediate());
+ return RelocInfo::IsNoInfo(rmode_) && i::is_uint16(immediate());
}
RelocInfo::Mode rmode() const { return rmode_; }
@@ -233,7 +236,7 @@ class V8_EXPORT_PRIVATE Operand {
// [base + disp/r]
explicit Operand(Register base, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO);
// [rip + disp/r]
explicit Operand(Label* label) {
@@ -243,11 +246,11 @@ class V8_EXPORT_PRIVATE Operand {
// [base + index*scale + disp/r]
explicit Operand(Register base, Register index, ScaleFactor scale,
- int32_t disp, RelocInfo::Mode rmode = RelocInfo::NONE);
+ int32_t disp, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
// [index*scale + disp/r]
explicit Operand(Register index, ScaleFactor scale, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO);
static Operand JumpTable(Register index, ScaleFactor scale, Label* table) {
return Operand(index, scale, reinterpret_cast<int32_t>(table),
@@ -300,7 +303,7 @@ class V8_EXPORT_PRIVATE Operand {
// The number of bytes in buf_.
uint8_t len_ = 0;
// Only valid if len_ > 4.
- RelocInfo::Mode rmode_ = RelocInfo::NONE;
+ RelocInfo::Mode rmode_ = RelocInfo::NO_INFO;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
@@ -1071,154 +1074,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
// AVX instructions
- void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmadd132sd(dst, src1, Operand(src2));
- }
- void vfmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmadd213sd(dst, src1, Operand(src2));
- }
- void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmadd231sd(dst, src1, Operand(src2));
- }
- void vfmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0x99, dst, src1, src2);
- }
- void vfmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xa9, dst, src1, src2);
- }
- void vfmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xb9, dst, src1, src2);
- }
- void vfmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmsub132sd(dst, src1, Operand(src2));
- }
- void vfmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmsub213sd(dst, src1, Operand(src2));
- }
- void vfmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmsub231sd(dst, src1, Operand(src2));
- }
- void vfmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0x9b, dst, src1, src2);
- }
- void vfmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xab, dst, src1, src2);
- }
- void vfmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xbb, dst, src1, src2);
- }
- void vfnmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmadd132sd(dst, src1, Operand(src2));
- }
- void vfnmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmadd213sd(dst, src1, Operand(src2));
- }
- void vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmadd231sd(dst, src1, Operand(src2));
- }
- void vfnmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0x9d, dst, src1, src2);
- }
- void vfnmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xad, dst, src1, src2);
- }
- void vfnmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xbd, dst, src1, src2);
- }
- void vfnmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmsub132sd(dst, src1, Operand(src2));
- }
- void vfnmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmsub213sd(dst, src1, Operand(src2));
- }
- void vfnmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmsub231sd(dst, src1, Operand(src2));
- }
- void vfnmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0x9f, dst, src1, src2);
- }
- void vfnmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xaf, dst, src1, src2);
- }
- void vfnmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmasd(0xbf, dst, src1, src2);
- }
- void vfmasd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
-
- void vfmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmadd132ss(dst, src1, Operand(src2));
- }
- void vfmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmadd213ss(dst, src1, Operand(src2));
- }
- void vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmadd231ss(dst, src1, Operand(src2));
- }
- void vfmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0x99, dst, src1, src2);
- }
- void vfmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xa9, dst, src1, src2);
- }
- void vfmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xb9, dst, src1, src2);
- }
- void vfmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmsub132ss(dst, src1, Operand(src2));
- }
- void vfmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmsub213ss(dst, src1, Operand(src2));
- }
- void vfmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfmsub231ss(dst, src1, Operand(src2));
- }
- void vfmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0x9b, dst, src1, src2);
- }
- void vfmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xab, dst, src1, src2);
- }
- void vfmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xbb, dst, src1, src2);
- }
- void vfnmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmadd132ss(dst, src1, Operand(src2));
- }
- void vfnmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmadd213ss(dst, src1, Operand(src2));
- }
- void vfnmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmadd231ss(dst, src1, Operand(src2));
- }
- void vfnmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0x9d, dst, src1, src2);
- }
- void vfnmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xad, dst, src1, src2);
- }
- void vfnmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xbd, dst, src1, src2);
- }
- void vfnmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmsub132ss(dst, src1, Operand(src2));
- }
- void vfnmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmsub213ss(dst, src1, Operand(src2));
- }
- void vfnmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vfnmsub231ss(dst, src1, Operand(src2));
- }
- void vfnmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0x9f, dst, src1, src2);
- }
- void vfnmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xaf, dst, src1, src2);
- }
- void vfnmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
- vfmass(0xbf, dst, src1, src2);
- }
- void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
-
void vaddss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vaddss(dst, src1, Operand(src2));
}
@@ -1755,6 +1610,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
AVX2_BROADCAST_LIST(AVX2_INSTRUCTION)
#undef AVX2_INSTRUCTION
+#define FMA(instr, length, prefix, escape1, escape2, extension, opcode) \
+ void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vinstr(0x##opcode, dst, src1, src2, k##length, k##prefix, \
+ k##escape1##escape2, k##extension, FMA3); \
+ } \
+ void instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
+ vinstr(0x##opcode, dst, src1, src2, k##length, k##prefix, \
+ k##escape1##escape2, k##extension, FMA3); \
+ }
+ FMA_INSTRUCTION_LIST(FMA)
+#undef FMA
+
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
// non-temporal
@@ -1774,9 +1641,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dd(data, rmode);
}
void dd(Label* label);
@@ -1883,9 +1750,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature = AVX);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature = AVX);
+ void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature = AVX);
+ void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
+ VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature = AVX);
// Most BMI instructions are similar.
void bmi1(byte op, Register reg, Register vreg, Operand rm);
void bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
+ void fma_instr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w);
+ void fma_instr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
+ VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w);
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
diff --git a/deps/v8/src/codegen/ia32/fma-instr.h b/deps/v8/src/codegen/ia32/fma-instr.h
new file mode 100644
index 0000000000..ab8746aec8
--- /dev/null
+++ b/deps/v8/src/codegen/ia32/fma-instr.h
@@ -0,0 +1,58 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+#ifndef V8_CODEGEN_IA32_FMA_INSTR_H_
+#define V8_CODEGEN_IA32_FMA_INSTR_H_
+
+#define FMA_SD_INSTRUCTION_LIST(V) \
+ V(vfmadd132sd, L128, 66, 0F, 38, W1, 99) \
+ V(vfmadd213sd, L128, 66, 0F, 38, W1, a9) \
+ V(vfmadd231sd, L128, 66, 0F, 38, W1, b9) \
+ V(vfmsub132sd, L128, 66, 0F, 38, W1, 9b) \
+ V(vfmsub213sd, L128, 66, 0F, 38, W1, ab) \
+ V(vfmsub231sd, L128, 66, 0F, 38, W1, bb) \
+ V(vfnmadd132sd, L128, 66, 0F, 38, W1, 9d) \
+ V(vfnmadd213sd, L128, 66, 0F, 38, W1, ad) \
+ V(vfnmadd231sd, L128, 66, 0F, 38, W1, bd) \
+ V(vfnmsub132sd, L128, 66, 0F, 38, W1, 9f) \
+ V(vfnmsub213sd, L128, 66, 0F, 38, W1, af) \
+ V(vfnmsub231sd, L128, 66, 0F, 38, W1, bf)
+
+#define FMA_SS_INSTRUCTION_LIST(V) \
+ V(vfmadd132ss, LIG, 66, 0F, 38, W0, 99) \
+ V(vfmadd213ss, LIG, 66, 0F, 38, W0, a9) \
+ V(vfmadd231ss, LIG, 66, 0F, 38, W0, b9) \
+ V(vfmsub132ss, LIG, 66, 0F, 38, W0, 9b) \
+ V(vfmsub213ss, LIG, 66, 0F, 38, W0, ab) \
+ V(vfmsub231ss, LIG, 66, 0F, 38, W0, bb) \
+ V(vfnmadd132ss, LIG, 66, 0F, 38, W0, 9d) \
+ V(vfnmadd213ss, LIG, 66, 0F, 38, W0, ad) \
+ V(vfnmadd231ss, LIG, 66, 0F, 38, W0, bd) \
+ V(vfnmsub132ss, LIG, 66, 0F, 38, W0, 9f) \
+ V(vfnmsub213ss, LIG, 66, 0F, 38, W0, af) \
+ V(vfnmsub231ss, LIG, 66, 0F, 38, W0, bf)
+
+#define FMA_PS_INSTRUCTION_LIST(V) \
+ V(vfmadd132ps, L128, 66, 0F, 38, W0, 98) \
+ V(vfmadd213ps, L128, 66, 0F, 38, W0, a8) \
+ V(vfmadd231ps, L128, 66, 0F, 38, W0, b8) \
+ V(vfnmadd132ps, L128, 66, 0F, 38, W0, 9c) \
+ V(vfnmadd213ps, L128, 66, 0F, 38, W0, ac) \
+ V(vfnmadd231ps, L128, 66, 0F, 38, W0, bc)
+
+#define FMA_PD_INSTRUCTION_LIST(V) \
+ V(vfmadd132pd, L128, 66, 0F, 38, W1, 98) \
+ V(vfmadd213pd, L128, 66, 0F, 38, W1, a8) \
+ V(vfmadd231pd, L128, 66, 0F, 38, W1, b8) \
+ V(vfnmadd132pd, L128, 66, 0F, 38, W1, 9c) \
+ V(vfnmadd213pd, L128, 66, 0F, 38, W1, ac) \
+ V(vfnmadd231pd, L128, 66, 0F, 38, W1, bc)
+
+#define FMA_INSTRUCTION_LIST(V) \
+ FMA_SD_INSTRUCTION_LIST(V) \
+ FMA_SS_INSTRUCTION_LIST(V) \
+ FMA_PS_INSTRUCTION_LIST(V) \
+ FMA_PD_INSTRUCTION_LIST(V)
+
+#endif // V8_CODEGEN_IA32_FMA_INSTR_H_
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 5a60679853..b4824736b9 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -326,7 +326,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
- // We don't allow a GC during a store buffer overflow so there is no need to
+ // We don't allow a GC in a write barrier slow path so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
int bytes = 0;
@@ -1212,7 +1212,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
jmp(entry, RelocInfo::OFF_HEAP_TARGET);
}
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 89dd2dbcfd..e1b7e15363 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -622,7 +622,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// Utilities
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.cc b/deps/v8/src/codegen/loong64/assembler-loong64.cc
index 131fff9a6a..d212bec035 100644
--- a/deps/v8/src/codegen/loong64/assembler-loong64.cc
+++ b/deps/v8/src/codegen/loong64/assembler-loong64.cc
@@ -672,7 +672,7 @@ int Assembler::BranchOffset(Instr instr) {
// instruction space. There is no guarantee that the relocated location can be
// similarly encoded.
bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
- return !RelocInfo::IsNone(rmode);
+ return !RelocInfo::IsNoInfo(rmode);
}
void Assembler::GenB(Opcode opcode, Register rj, int32_t si21) {
@@ -2168,7 +2168,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -2181,7 +2181,7 @@ void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.h b/deps/v8/src/codegen/loong64/assembler-loong64.h
index 5264258d93..63fe001d22 100644
--- a/deps/v8/src/codegen/loong64/assembler-loong64.h
+++ b/deps/v8/src/codegen/loong64/assembler-loong64.h
@@ -33,7 +33,7 @@ class Operand {
public:
// Immediate.
V8_INLINE explicit Operand(int64_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
@@ -43,7 +43,8 @@ class Operand {
}
V8_INLINE explicit Operand(const char* s);
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi value)
+ : rm_(no_reg), rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
@@ -738,9 +739,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dq(data, rmode);
}
void dd(Label* label);
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
index 6577b194c4..cccfa6294c 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
@@ -865,7 +865,7 @@ void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa,
void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa,
Register scratch) {
- DCHECK(sa >= 1 && sa <= 31);
+ DCHECK(sa >= 1 && sa <= 63);
if (sa <= 4) {
alsl_d(rd, rj, rk, sa);
} else {
@@ -2677,9 +2677,9 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
BranchShort(&skip, NegateCondition(cond), rj, rk);
}
intptr_t offset_diff = target - pc_offset();
- if (RelocInfo::IsNone(rmode) && is_int28(offset_diff)) {
+ if (RelocInfo::IsNoInfo(rmode) && is_int28(offset_diff)) {
bl(offset_diff >> 2);
- } else if (RelocInfo::IsNone(rmode) && is_int38(offset_diff)) {
+ } else if (RelocInfo::IsNoInfo(rmode) && is_int38(offset_diff)) {
pcaddu18i(t7, static_cast<int32_t>(offset_diff) >> 18);
jirl(ra, t7, (offset_diff & 0x3ffff) >> 2);
} else {
@@ -3348,7 +3348,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
@@ -3745,6 +3745,23 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
+void MacroAssembler::AssertCallableFunction(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
+ Operand(zero_reg));
+ Push(object);
+ LoadMap(object, object);
+ GetInstanceTypeRange(object, object, FIRST_CALLABLE_JS_FUNCTION_TYPE, t8);
+ Check(ls, AbortReason::kOperandIsNotACallableFunction, t8,
+ Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
+ FIRST_CALLABLE_JS_FUNCTION_TYPE));
+ Pop(object);
+ }
+}
+
void MacroAssembler::AssertBoundFunction(Register object) {
if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
index 866a74f81c..3d82b87a47 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
@@ -963,7 +963,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// In-place weak references.
@@ -1024,6 +1024,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a callable JSFunction, enabled via
+ // --debug-code.
+ void AssertCallableFunction(Register object);
+
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
@@ -1062,7 +1066,7 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
UseScratchRegisterScope scope(this);
Register scratch = scope.Acquire();
- BlockTrampolinePoolFor((3 + case_count) * kInstrSize);
+ BlockTrampolinePoolFor(3 + case_count);
pcaddi(scratch, 3);
alsl_d(scratch, index, scratch, kInstrSizeLog2);
diff --git a/deps/v8/src/codegen/machine-type.h b/deps/v8/src/codegen/machine-type.h
index b3f8ef56b5..981ac9783f 100644
--- a/deps/v8/src/codegen/machine-type.h
+++ b/deps/v8/src/codegen/machine-type.h
@@ -40,7 +40,9 @@ enum class MachineRepresentation : uint8_t {
kTagged, // (uncompressed) Object (Smi or HeapObject)
kCompressedPointer, // (compressed) HeapObject
kCompressed, // (compressed) Object (Smi or HeapObject)
- kCagedPointer, // Guaranteed to point into the virtual memory cage.
+ // A 64-bit pointer encoded in a way (e.g. as offset) that guarantees it will
+ // point into the virtual memory cage.
+ kCagedPointer,
// FP and SIMD representations must be last, and in order of increasing size.
kFloat32,
kFloat64,
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index e1ba6e511f..267281396a 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -1133,7 +1133,7 @@ int Assembler::BranchOffset(Instr instr) {
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
- return !RelocInfo::IsNone(rmode);
+ return !RelocInfo::IsNoInfo(rmode);
}
void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
@@ -3591,7 +3591,7 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
CheckForEmitInForbiddenSlot();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -3602,7 +3602,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
CheckForEmitInForbiddenSlot();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -3677,7 +3677,7 @@ void Assembler::CheckTrampolinePool() {
bc(&after_pool);
nop();
} else {
- GenPCRelativeJump(t8, t9, 0, RelocInfo::NONE,
+ GenPCRelativeJump(t8, t9, 0, RelocInfo::NO_INFO,
BranchDelaySlot::PROTECT);
}
}
@@ -3799,7 +3799,7 @@ void Assembler::GenPCRelativeJump(Register tf, Register ts, int32_t imm32,
// or when changing imm32 that lui/ori pair loads.
or_(tf, ra, zero_reg);
nal(); // Relative place of nal instruction determines kLongBranchPCOffset.
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
RecordRelocInfo(rmode);
}
lui(ts, (imm32 & kHiMask) >> kLuiShift);
@@ -3817,7 +3817,7 @@ void Assembler::GenPCRelativeJump(Register tf, Register ts, int32_t imm32,
void Assembler::GenPCRelativeJumpAndLink(Register t, int32_t imm32,
RelocInfo::Mode rmode,
BranchDelaySlot bdslot) {
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
RecordRelocInfo(rmode);
}
// Order of these instructions is relied upon when patching them
diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index 2ca7e9b363..0acee5e39d 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -63,7 +63,7 @@ class Operand {
public:
// Immediate.
V8_INLINE explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
@@ -73,7 +73,8 @@ class Operand {
}
V8_INLINE explicit Operand(const char* s);
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi value)
+ : rm_(no_reg), rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
@@ -1399,9 +1400,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dd(data, rmode);
}
void dd(Label* label);
@@ -1515,6 +1516,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline int UnboundLabelsCount() { return unbound_labels_count_; }
+ bool is_trampoline_emitted() const { return trampoline_emitted_; }
+
protected:
// Load Scaled Address instruction.
void lsa(Register rd, Register rt, Register rs, uint8_t sa);
@@ -1570,8 +1573,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
bool has_exception() const { return internal_trampoline_exception_; }
- bool is_trampoline_emitted() const { return trampoline_emitted_; }
-
// Temporarily block automatic assembly buffer growth.
void StartBlockGrowBuffer() {
DCHECK(!block_buffer_growth_);
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 4c76a1c1ec..ea4639c37c 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -4047,7 +4047,7 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int32_t imm32;
imm32 = branch_long_offset(L);
- GenPCRelativeJump(t8, t9, imm32, RelocInfo::NONE, bdslot);
+ GenPCRelativeJump(t8, t9, imm32, RelocInfo::NO_INFO, bdslot);
}
}
@@ -4057,7 +4057,7 @@ void TurboAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) {
} else {
// Generate position independent long branch.
BlockTrampolinePoolScope block_trampoline_pool(this);
- GenPCRelativeJump(t8, t9, offset, RelocInfo::NONE, bdslot);
+ GenPCRelativeJump(t8, t9, offset, RelocInfo::NO_INFO, bdslot);
}
}
@@ -4070,7 +4070,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int32_t imm32;
imm32 = branch_long_offset(L);
- GenPCRelativeJumpAndLink(t8, imm32, RelocInfo::NONE, bdslot);
+ GenPCRelativeJumpAndLink(t8, imm32, RelocInfo::NO_INFO, bdslot);
}
}
@@ -4704,7 +4704,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
@@ -5084,6 +5084,23 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
+void MacroAssembler::AssertCallableFunction(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
+ Operand(zero_reg));
+ push(object);
+ LoadMap(object, object);
+ GetInstanceTypeRange(object, object, FIRST_CALLABLE_JS_FUNCTION_TYPE, t8);
+ Check(ls, AbortReason::kOperandIsNotACallableFunction, t8,
+ Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
+ FIRST_CALLABLE_JS_FUNCTION_TYPE));
+ pop(object);
+ }
+}
+
void MacroAssembler::AssertBoundFunction(Register object) {
if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index eaed98dfe6..f2491fcf19 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -1074,7 +1074,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// In-place weak references.
@@ -1132,6 +1132,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a callable JSFunction, enabled via
+ // --debug-code.
+ void AssertCallableFunction(Register object);
+
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index a82bd5511e..9f5b34e956 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -265,7 +265,7 @@ const Instr kLwSwOffsetMask = kImm16Mask;
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
- scratch_register_list_(at.bit()) {
+ scratch_register_list_(at.bit() | s0.bit()) {
if (CpuFeatures::IsSupported(MIPS_SIMD)) {
EnableCpuFeature(MIPS_SIMD);
}
@@ -1061,7 +1061,7 @@ int Assembler::BranchOffset(Instr instr) {
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
- return !RelocInfo::IsNone(rmode);
+ return !RelocInfo::IsNoInfo(rmode);
}
void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
@@ -3790,7 +3790,7 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
CheckForEmitInForbiddenSlot();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -3801,7 +3801,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
CheckForEmitInForbiddenSlot();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index 80f282c696..f17d47e990 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -63,7 +63,7 @@ class Operand {
public:
// Immediate.
V8_INLINE explicit Operand(int64_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
@@ -73,7 +73,8 @@ class Operand {
}
V8_INLINE explicit Operand(const char* s);
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi value)
+ : rm_(no_reg), rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
@@ -1459,9 +1460,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dq(data, rmode);
}
void dd(Label* label);
@@ -1562,6 +1563,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline int UnboundLabelsCount() { return unbound_labels_count_; }
+ bool is_trampoline_emitted() const { return trampoline_emitted_; }
+
protected:
// Load Scaled Address instructions.
void lsa(Register rd, Register rt, Register rs, uint8_t sa);
@@ -1618,8 +1621,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
bool has_exception() const { return internal_trampoline_exception_; }
- bool is_trampoline_emitted() const { return trampoline_emitted_; }
-
// Temporarily block automatic assembly buffer growth.
void StartBlockGrowBuffer() {
DCHECK(!block_buffer_growth_);
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index f580aed7c8..291d6d5b6a 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -1097,13 +1097,16 @@ void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
void TurboAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
- DCHECK(sa >= 1 && sa <= 31);
+ DCHECK(sa >= 1 && sa <= 63);
if (kArchVariant == kMips64r6 && sa <= 4) {
dlsa(rd, rt, rs, sa - 1);
} else {
Register tmp = rd == rt ? scratch : rd;
DCHECK(tmp != rt);
- dsll(tmp, rs, sa);
+ if (sa <= 31)
+ dsll(tmp, rs, sa);
+ else
+ dsll32(tmp, rs, sa - 32);
Daddu(rd, rt, tmp);
}
}
@@ -5230,7 +5233,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
@@ -5630,6 +5633,23 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
+void MacroAssembler::AssertCallableFunction(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
+ Operand(zero_reg));
+ push(object);
+ LoadMap(object, object);
+ GetInstanceTypeRange(object, object, FIRST_CALLABLE_JS_FUNCTION_TYPE, t8);
+ Check(ls, AbortReason::kOperandIsNotACallableFunction, t8,
+ Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
+ FIRST_CALLABLE_JS_FUNCTION_TYPE));
+ pop(object);
+ }
+}
+
void MacroAssembler::AssertBoundFunction(Register object) {
if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index b1956867b4..bcb11adf69 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -1149,7 +1149,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// In-place weak references.
@@ -1211,6 +1211,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a callable JSFunction, enabled via
+ // --debug-code.
+ void AssertCallableFunction(Register object);
+
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
diff --git a/deps/v8/src/codegen/pending-optimization-table.cc b/deps/v8/src/codegen/pending-optimization-table.cc
index 6f51cc43af..5e88b1e456 100644
--- a/deps/v8/src/codegen/pending-optimization-table.cc
+++ b/deps/v8/src/codegen/pending-optimization-table.cc
@@ -30,6 +30,11 @@ void PendingOptimizationTable::PreparedForOptimization(
if (allow_heuristic_optimization) {
status |= FunctionStatus::kAllowHeuristicOptimization;
}
+ Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
+
+ IsCompiledScope is_compiled_scope;
+ SharedFunctionInfo::EnsureBytecodeArrayAvailable(isolate, shared_info,
+ &is_compiled_scope);
Handle<ObjectHashTable> table =
isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()
@@ -38,7 +43,7 @@ void PendingOptimizationTable::PreparedForOptimization(
isolate->heap()->pending_optimize_for_test_bytecode()),
isolate);
Handle<Tuple2> tuple = isolate->factory()->NewTuple2(
- handle(function->shared().GetBytecodeArray(isolate), isolate),
+ handle(shared_info->GetBytecodeArray(isolate), isolate),
handle(Smi::FromInt(status), isolate), AllocationType::kYoung);
table =
ObjectHashTable::Put(table, handle(function->shared(), isolate), tuple);
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
index 9274c502a8..364b20e596 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
@@ -252,7 +252,7 @@ void RelocInfo::WipeOut() {
}
}
-Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NONE) {}
+Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NO_INFO) {}
void Assembler::UntrackBranch() {
DCHECK(!trampoline_emitted_);
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index ccb144dc61..b65fe2e729 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -1185,7 +1185,7 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
if (assembler != nullptr && assembler->predictable_code_size()) return true;
return assembler->options().record_reloc_info_for_serialization;
- } else if (RelocInfo::IsNone(rmode_)) {
+ } else if (RelocInfo::IsNoInfo(rmode_)) {
return false;
}
return true;
@@ -1322,6 +1322,15 @@ void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
}
}
+void Assembler::patch_wasm_cpi_return_address(Register dst, int pc_offset,
+ int return_address_offset) {
+ DCHECK(is_int16(return_address_offset));
+ Assembler patching_assembler(
+ AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer_start_ + pc_offset, kInstrSize + kGap));
+ patching_assembler.addi(dst, dst, Operand(return_address_offset));
+}
+
void Assembler::mov_label_offset(Register dst, Label* label) {
int position = link(label);
if (label->is_bound()) {
@@ -1978,7 +1987,7 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -1989,7 +1998,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -2000,7 +2009,7 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
void Assembler::dp(uintptr_t data, RelocInfo::Mode rmode) {
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index 654c856d7d..ea82539afb 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -67,7 +67,7 @@ class V8_EXPORT_PRIVATE Operand {
public:
// immediate
V8_INLINE explicit Operand(intptr_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rmode_(rmode) {
value_.immediate = immediate;
}
@@ -77,7 +77,7 @@ class V8_EXPORT_PRIVATE Operand {
value_.immediate = static_cast<intptr_t>(f.address());
}
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi value) : rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi value) : rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
// rm
@@ -968,6 +968,10 @@ class Assembler : public AssemblerBase {
void bitwise_mov32(Register dst, int32_t value);
void bitwise_add32(Register dst, Register src, int32_t value);
+ // Patch the offset to the return address after CallCFunction.
+ void patch_wasm_cpi_return_address(Register dst, int pc_offset,
+ int return_address_offset);
+
// Load the position of the label relative to the generated code object
// pointer in a register.
void mov_label_offset(Register dst, Label* label);
@@ -1206,9 +1210,9 @@ class Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
// Read/patch instructions
Instr instr_at(int pos) {
@@ -1303,7 +1307,7 @@ class Assembler : public AssemblerBase {
ConstantPoolEntry::Access ConstantPoolAddEntry(RelocInfo::Mode rmode,
intptr_t value) {
bool sharing_ok =
- RelocInfo::IsNone(rmode) ||
+ RelocInfo::IsNoInfo(rmode) ||
(!options().record_reloc_info_for_serialization &&
RelocInfo::IsShareableRelocMode(rmode) &&
!is_constant_pool_entry_sharing_blocked() &&
diff --git a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
index adc36e2407..f3359d3ca8 100644
--- a/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
@@ -121,7 +121,7 @@ constexpr auto CallTrampolineDescriptor::registers() {
// static
constexpr auto CallVarargsDescriptor::registers() {
- // r3 : number of arguments (on the stack, not including receiver)
+ // r3 : number of arguments (on the stack)
// r4 : the target to call
// r7 : arguments list length (untagged)
// r5 : arguments list (FixedArray)
@@ -139,13 +139,13 @@ constexpr auto CallForwardVarargsDescriptor::registers() {
// static
constexpr auto CallFunctionTemplateDescriptor::registers() {
// r4 : function template info
- // r5 : number of arguments (on the stack, not including receiver)
+ // r5 : number of arguments (on the stack)
return RegisterArray(r4, r5);
}
// static
constexpr auto CallWithSpreadDescriptor::registers() {
- // r3 : number of arguments (on the stack, not including receiver)
+ // r3 : number of arguments (on the stack)
// r4 : the target to call
// r5 : the object to spread
return RegisterArray(r4, r3, r5);
@@ -160,7 +160,7 @@ constexpr auto CallWithArrayLikeDescriptor::registers() {
// static
constexpr auto ConstructVarargsDescriptor::registers() {
- // r3 : number of arguments (on the stack, not including receiver)
+ // r3 : number of arguments (on the stack)
// r4 : the target to call
// r6 : the new target
// r7 : arguments list length (untagged)
@@ -179,7 +179,7 @@ constexpr auto ConstructForwardVarargsDescriptor::registers() {
// static
constexpr auto ConstructWithSpreadDescriptor::registers() {
- // r3 : number of arguments (on the stack, not including receiver)
+ // r3 : number of arguments (on the stack)
// r4 : the target to call
// r6 : the new target
// r5 : the object to spread
@@ -241,7 +241,7 @@ constexpr auto InterpreterDispatchDescriptor::registers() {
// static
constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
- return RegisterArray(r3, // argument count (not including receiver)
+ return RegisterArray(r3, // argument count
r5, // address of first argument
r4); // the target callable to be call
}
@@ -249,7 +249,7 @@ constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
// static
constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
return RegisterArray(
- r3, // argument count (not including receiver)
+ r3, // argument count
r7, // address of the first argument
r4, // constructor to call
r6, // new target
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index bc3cea67f1..724cedc1c2 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -1493,20 +1493,27 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// Underapplication. Move the arguments already in the stack, including the
// receiver and the return address.
{
- Label copy;
+ Label copy, skip;
Register src = r9, dest = r8;
addi(src, sp, Operand(-kSystemPointerSize));
ShiftLeftU64(r0, expected_parameter_count, Operand(kSystemPointerSizeLog2));
sub(sp, sp, r0);
// Update stack pointer.
addi(dest, sp, Operand(-kSystemPointerSize));
- addi(r0, actual_parameter_count, Operand(1));
+ if (!kJSArgcIncludesReceiver) {
+ addi(r0, actual_parameter_count, Operand(1));
+ } else {
+ mr(r0, actual_parameter_count);
+ cmpi(r0, Operand::Zero());
+ ble(&skip);
+ }
mtctr(r0);
bind(&copy);
LoadU64WithUpdate(r0, MemOperand(src, kSystemPointerSize));
StoreU64WithUpdate(r0, MemOperand(dest, kSystemPointerSize));
bdnz(&copy);
+ bind(&skip);
}
// Fill remaining expected arguments with undefined values.
@@ -2013,7 +2020,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
@@ -3572,21 +3579,37 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
addi(sp, sp, Operand(2 * kSimd128Size));
}
-void TurboAssembler::ByteReverseU16(Register dst, Register val) {
- subi(sp, sp, Operand(kSystemPointerSize));
- sth(val, MemOperand(sp));
- lhbrx(dst, MemOperand(r0, sp));
- addi(sp, sp, Operand(kSystemPointerSize));
+void TurboAssembler::ByteReverseU16(Register dst, Register val,
+ Register scratch) {
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ brh(dst, val);
+ ZeroExtHalfWord(dst, dst);
+ return;
+ }
+ rlwinm(scratch, val, 8, 16, 23);
+ rlwinm(dst, val, 24, 24, 31);
+ orx(dst, scratch, dst);
+ ZeroExtHalfWord(dst, dst);
}
-void TurboAssembler::ByteReverseU32(Register dst, Register val) {
- subi(sp, sp, Operand(kSystemPointerSize));
- stw(val, MemOperand(sp));
- lwbrx(dst, MemOperand(r0, sp));
- addi(sp, sp, Operand(kSystemPointerSize));
+void TurboAssembler::ByteReverseU32(Register dst, Register val,
+ Register scratch) {
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ brw(dst, val);
+ ZeroExtWord32(dst, dst);
+ return;
+ }
+ rotlwi(scratch, val, 8);
+ rlwimi(scratch, val, 24, 0, 7);
+ rlwimi(scratch, val, 24, 16, 23);
+ ZeroExtWord32(dst, scratch);
}
void TurboAssembler::ByteReverseU64(Register dst, Register val) {
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ brd(dst, val);
+ return;
+ }
subi(sp, sp, Operand(kSystemPointerSize));
std(val, MemOperand(sp));
ldbrx(dst, MemOperand(r0, sp));
@@ -3819,7 +3842,7 @@ void TurboAssembler::ReverseBitsU64(Register dst, Register src,
void TurboAssembler::ReverseBitsU32(Register dst, Register src,
Register scratch1, Register scratch2) {
- ByteReverseU32(dst, src);
+ ByteReverseU32(dst, src, scratch1);
for (int i = 4; i < 8; i++) {
ReverseBitsInSingleByteU64(dst, dst, scratch1, scratch2, i);
}
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index febedfe3ba..200015bd85 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -612,8 +612,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Simd128Register scratch);
void SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch);
- void ByteReverseU16(Register dst, Register val);
- void ByteReverseU32(Register dst, Register val);
+ void ByteReverseU16(Register dst, Register val, Register scratch);
+ void ByteReverseU32(Register dst, Register val, Register scratch);
void ByteReverseU64(Register dst, Register val);
// Before calling a C-function from generated code, align arguments on stack.
@@ -1261,7 +1261,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// In-place weak references.
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index 1985ff28bc..6057eca4a1 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -394,7 +394,7 @@ bool RelocInfo::RequiresRelocation(Code code) {
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
- case NONE:
+ case NO_INFO:
return "no reloc";
case COMPRESSED_EMBEDDED_OBJECT:
return "compressed embedded object";
@@ -522,7 +522,7 @@ void RelocInfo::Verify(Isolate* isolate) {
Address addr = target_off_heap_target();
CHECK_NE(addr, kNullAddress);
CHECK(Builtins::IsBuiltinId(
- InstructionStream::TryLookupCode(isolate, addr)));
+ OffHeapInstructionStream::TryLookupCode(isolate, addr)));
break;
}
case RUNTIME_ENTRY:
@@ -537,7 +537,7 @@ void RelocInfo::Verify(Isolate* isolate) {
case VENEER_POOL:
case WASM_CALL:
case WASM_STUB_CALL:
- case NONE:
+ case NO_INFO:
break;
case NUMBER_OF_MODES:
case PC_JUMP:
diff --git a/deps/v8/src/codegen/reloc-info.h b/deps/v8/src/codegen/reloc-info.h
index cb1a04860d..b92907fbf0 100644
--- a/deps/v8/src/codegen/reloc-info.h
+++ b/deps/v8/src/codegen/reloc-info.h
@@ -54,7 +54,7 @@ class RelocInfo {
// Please note the order is important (see IsRealRelocMode, IsGCRelocMode,
// and IsShareableRelocMode predicates below).
- NONE, // Never recorded value. Most common one, hence value 0.
+ NO_INFO, // Never recorded value. Most common one, hence value 0.
CODE_TARGET,
RELATIVE_CODE_TARGET, // LAST_CODE_TARGET_MODE
@@ -132,7 +132,7 @@ class RelocInfo {
return mode <= LAST_GCED_ENUM;
}
static constexpr bool IsShareableRelocMode(Mode mode) {
- return mode == RelocInfo::NONE ||
+ return mode == RelocInfo::NO_INFO ||
mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE;
}
static constexpr bool IsCodeTarget(Mode mode) { return mode == CODE_TARGET; }
@@ -191,7 +191,7 @@ class RelocInfo {
static constexpr bool IsOffHeapTarget(Mode mode) {
return mode == OFF_HEAP_TARGET;
}
- static constexpr bool IsNone(Mode mode) { return mode == NONE; }
+ static constexpr bool IsNoInfo(Mode mode) { return mode == NO_INFO; }
static bool IsOnlyForSerializer(Mode mode) {
#ifdef V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
index dce8f468ce..9304f012d0 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -798,7 +798,7 @@ int Assembler::AuipcOffset(Instr instr) {
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
- return !RelocInfo::IsNone(rmode);
+ return !RelocInfo::IsNoInfo(rmode);
}
void Assembler::disassembleInstr(Instr instr) {
@@ -2461,6 +2461,27 @@ void Assembler::EBREAK() {
}
// RVV
+
+void Assembler::vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMAXU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void Assembler::vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMAX_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void Assembler::vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMIN_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
+void Assembler::vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask) {
+ GenInstrV(VREDMINU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
+}
+
void Assembler::vmv_vv(VRegister vd, VRegister vs1) {
GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, v0, NoMask);
}
@@ -2536,6 +2557,15 @@ void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
GenInstrV(VRGATHER_FUNCT6, OP_IVX, vd, rs1, vs2, mask);
}
+void Assembler::vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask) {
+ GenInstrV(VWADDUW_FUNCT6, OP_MVX, vd, rs1, vs2, mask);
+}
+
+void Assembler::vid_v(VRegister vd, MaskType mask) {
+ GenInstrV(VMUNARY0_FUNCT6, OP_MVV, vd, VID_V, v0, mask);
+}
+
#define DEFINE_OPIVV(name, funct6) \
void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask) { \
@@ -2548,6 +2578,12 @@ void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
}
+#define DEFINE_OPFRED(name, funct6) \
+ void Assembler::name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask); \
+ }
+
#define DEFINE_OPIVX(name, funct6) \
void Assembler::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
MaskType mask) { \
@@ -2561,11 +2597,19 @@ void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
}
#define DEFINE_OPMVV(name, funct6) \
- void Assembler::name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask) { \
GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \
}
+// void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, Register rs1,
+// VRegister vs2, MaskType mask = NoMask);
+#define DEFINE_OPMVX(name, funct6) \
+ void Assembler::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_MVX, vd, rs1, vs2, mask); \
+ }
+
#define DEFINE_OPFVF(name, funct6) \
void Assembler::name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
MaskType mask) { \
@@ -2584,12 +2628,22 @@ void Assembler::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask); \
}
+// vector integer extension
+#define DEFINE_OPMVV_VIE(name, vs1) \
+ void Assembler::name(VRegister vd, VRegister vs2, MaskType mask) { \
+ GenInstrV(VXUNARY0_FUNCT6, OP_MVV, vd, vs1, vs2, mask); \
+ }
+
void Assembler::vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask) {
GenInstrV(VMV_FUNCT6, OP_FVF, vd, fs1, v0, mask);
}
-void Assembler::vfmv_fs(FPURegister fd, VRegister vs2, MaskType mask) {
- GenInstrV(VWFUNARY0_FUNCT6, OP_FVV, fd, v0, vs2, mask);
+void Assembler::vfmv_fs(FPURegister fd, VRegister vs2) {
+ GenInstrV(VWFUNARY0_FUNCT6, OP_FVV, fd, v0, vs2, NoMask);
+}
+
+void Assembler::vfmv_sf(VRegister vd, FPURegister fs) {
+ GenInstrV(VRFUNARY0_FUNCT6, OP_FVF, vd, fs, v0, NoMask);
}
DEFINE_OPIVV(vadd, VADD_FUNCT6)
@@ -2597,6 +2651,23 @@ DEFINE_OPIVX(vadd, VADD_FUNCT6)
DEFINE_OPIVI(vadd, VADD_FUNCT6)
DEFINE_OPIVV(vsub, VSUB_FUNCT6)
DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
+DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
+DEFINE_OPMVX(vmul, VMUL_FUNCT6)
+DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
+DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
+DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
+DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
+DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
+DEFINE_OPMVV(vmul, VMUL_FUNCT6)
+DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
+DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
+DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
+DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
+DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
+DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
+DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
+DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
@@ -2664,14 +2735,16 @@ DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVV(vsra, VSRA_FUNCT6)
+DEFINE_OPIVX(vsra, VSRA_FUNCT6)
+DEFINE_OPIVI(vsra, VSRA_FUNCT6)
+
DEFINE_OPIVV(vsll, VSLL_FUNCT6)
DEFINE_OPIVX(vsll, VSLL_FUNCT6)
DEFINE_OPIVI(vsll, VSLL_FUNCT6)
-DEFINE_OPMVV(vredmaxu, VREDMAXU_FUNCT6)
-DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
-DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
-DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
+DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
+DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
@@ -2688,6 +2761,8 @@ DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
DEFINE_OPFVV(vfmax, VFMAX_FUNCT6)
DEFINE_OPFVV(vfmin, VFMIN_FUNCT6)
+DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
+
DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
@@ -2721,6 +2796,14 @@ DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6)
DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
+// Vector Integer Extension
+DEFINE_OPMVV_VIE(vzext_vf8, 0b00010)
+DEFINE_OPMVV_VIE(vsext_vf8, 0b00011)
+DEFINE_OPMVV_VIE(vzext_vf4, 0b00100)
+DEFINE_OPMVV_VIE(vsext_vf4, 0b00101)
+DEFINE_OPMVV_VIE(vzext_vf2, 0b00110)
+DEFINE_OPMVV_VIE(vsext_vf2, 0b00111)
+
#undef DEFINE_OPIVI
#undef DEFINE_OPIVV
#undef DEFINE_OPIVX
@@ -2728,6 +2811,7 @@ DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
#undef DEFINE_OPFVF
#undef DEFINE_OPFVV_FMA
#undef DEFINE_OPFVF_FMA
+#undef DEFINE_OPMVV_VIE
void Assembler::vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
TailAgnosticType tail, MaskAgnosticType mask) {
@@ -2767,19 +2851,7 @@ uint8_t vsew_switch(VSew vsew) {
case E32:
width = 0b110;
break;
- case E64:
- width = 0b111;
- break;
- case E128:
- width = 0b000;
- break;
- case E256:
- width = 0b101;
- break;
- case E512:
- width = 0b110;
- break;
- case E1024:
+ default:
width = 0b111;
break;
}
@@ -2788,308 +2860,259 @@ uint8_t vsew_switch(VSew vsew) {
void Assembler::vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b000);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b000);
}
void Assembler::vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b000);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b000);
}
void Assembler::vlx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, IsMew, 0);
+ GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0);
}
void Assembler::vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b000);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b000);
}
void Assembler::vss(VRegister vs3, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, IsMew, 0b000);
+ GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, 0, 0b000);
}
void Assembler::vsx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, IsMew, 0b000);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0b000);
}
void Assembler::vsu(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, IsMew, 0b000);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, 0, 0b000);
}
void Assembler::vlseg2(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b001);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b001);
}
void Assembler::vlseg3(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b010);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b010);
}
void Assembler::vlseg4(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b011);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b011);
}
void Assembler::vlseg5(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b100);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b100);
}
void Assembler::vlseg6(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b101);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b101);
}
void Assembler::vlseg7(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b110);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b110);
}
void Assembler::vlseg8(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b111);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b111);
}
void Assembler::vsseg2(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b001);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b001);
}
void Assembler::vsseg3(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b010);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b010);
}
void Assembler::vsseg4(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b011);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b011);
}
void Assembler::vsseg5(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b100);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b100);
}
void Assembler::vsseg6(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b101);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b101);
}
void Assembler::vsseg7(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b110);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b110);
}
void Assembler::vsseg8(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b111);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b111);
}
void Assembler::vlsseg2(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b001);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
}
void Assembler::vlsseg3(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b010);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
}
void Assembler::vlsseg4(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b011);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
}
void Assembler::vlsseg5(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b100);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
}
void Assembler::vlsseg6(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b101);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
}
void Assembler::vlsseg7(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b110);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
}
void Assembler::vlsseg8(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b111);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
}
void Assembler::vssseg2(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b001);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
}
void Assembler::vssseg3(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b010);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
}
void Assembler::vssseg4(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b011);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
}
void Assembler::vssseg5(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b100);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
}
void Assembler::vssseg6(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b101);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
}
void Assembler::vssseg7(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b110);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
}
void Assembler::vssseg8(VRegister vd, Register rs1, Register rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b111);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
}
void Assembler::vlxseg2(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b001);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
}
void Assembler::vlxseg3(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b010);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
}
void Assembler::vlxseg4(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b011);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
}
void Assembler::vlxseg5(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b100);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
}
void Assembler::vlxseg6(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b101);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
}
void Assembler::vlxseg7(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b110);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
}
void Assembler::vlxseg8(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b111);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
}
void Assembler::vsxseg2(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b001);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
}
void Assembler::vsxseg3(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b010);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
}
void Assembler::vsxseg4(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b011);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
}
void Assembler::vsxseg5(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b100);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
}
void Assembler::vsxseg6(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b101);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
}
void Assembler::vsxseg7(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b110);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
}
void Assembler::vsxseg8(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
MaskType mask) {
- bool IsMew = vsew >= E128 ? true : false;
uint8_t width = vsew_switch(vsew);
- GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b111);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
}
// Privileged
@@ -3594,7 +3617,7 @@ void Assembler::db(uint8_t data) {
}
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -3605,7 +3628,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
}
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -3969,6 +3992,26 @@ void ConstantPool::Check(Emission force_emit, Jump require_jump,
SetNextCheckIn(ConstantPool::kCheckInterval);
}
+LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
+ uint8_t laneidx) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ *this = LoadStoreLaneParams(laneidx, 8, kRvvVLEN / 16);
+ break;
+ case MachineRepresentation::kWord16:
+ *this = LoadStoreLaneParams(laneidx, 16, kRvvVLEN / 8);
+ break;
+ case MachineRepresentation::kWord32:
+ *this = LoadStoreLaneParams(laneidx, 32, kRvvVLEN / 4);
+ break;
+ case MachineRepresentation::kWord64:
+ *this = LoadStoreLaneParams(laneidx, 64, kRvvVLEN / 2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
// Pool entries are accessed with pc relative load therefore this cannot be more
// than 1 * MB. Since constant pool emission checks are interval based, and we
// want to keep entries close to the code, we try to emit every 64KB.
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.h b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
index 42bb92fd87..63e5dde19e 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
@@ -68,7 +68,7 @@ class Operand {
public:
// Immediate.
V8_INLINE explicit Operand(int64_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rm_(no_reg), rmode_(rmode) {
value_.immediate = immediate;
}
@@ -78,7 +78,8 @@ class Operand {
}
V8_INLINE explicit Operand(const char* s);
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi value)
+ : rm_(no_reg), rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
@@ -738,6 +739,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmerge_vx(VRegister vd, Register rs1, VRegister vs2);
void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+ void vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+ void vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
+ MaskType mask = NoMask);
+
void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
void vadc_vx(VRegister vd, Register rs1, VRegister vs2);
void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
@@ -747,7 +757,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
void vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask = NoMask);
- void vfmv_fs(FPURegister fd, VRegister vs2, MaskType mask = NoMask);
+ void vfmv_fs(FPURegister fd, VRegister vs2);
+ void vfmv_sf(VRegister vd, FPURegister fs);
+
+ void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
+ MaskType mask = NoMask);
+ void vid_v(VRegister vd, MaskType mask = Mask);
#define DEFINE_OPIVV(name, funct6) \
void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
@@ -762,7 +777,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
MaskType mask = NoMask);
#define DEFINE_OPMVV(name, funct6) \
- void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask = NoMask);
#define DEFINE_OPMVX(name, funct6) \
@@ -773,6 +788,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
MaskType mask = NoMask);
+#define DEFINE_OPFRED(name, funct6) \
+ void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
#define DEFINE_OPFVF(name, funct6) \
void name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
MaskType mask = NoMask);
@@ -785,11 +804,31 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void name##_vf(VRegister vd, FPURegister fs1, VRegister vs2, \
MaskType mask = NoMask);
+#define DEFINE_OPMVV_VIE(name) \
+ void name(VRegister vd, VRegister vs2, MaskType mask = NoMask);
+
DEFINE_OPIVV(vadd, VADD_FUNCT6)
DEFINE_OPIVX(vadd, VADD_FUNCT6)
DEFINE_OPIVI(vadd, VADD_FUNCT6)
DEFINE_OPIVV(vsub, VSUB_FUNCT6)
DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+ DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
+ DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
+ DEFINE_OPMVX(vmul, VMUL_FUNCT6)
+ DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
+ DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
+ DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
+ DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
+ DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
+ DEFINE_OPMVV(vmul, VMUL_FUNCT6)
+ DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
+ DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
+ DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
+ DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
+ DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
+ DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
+ DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
+ DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
@@ -860,14 +899,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVV(vsra, VSRA_FUNCT6)
+ DEFINE_OPIVX(vsra, VSRA_FUNCT6)
+ DEFINE_OPIVI(vsra, VSRA_FUNCT6)
+
DEFINE_OPIVV(vsll, VSLL_FUNCT6)
DEFINE_OPIVX(vsll, VSLL_FUNCT6)
DEFINE_OPIVI(vsll, VSLL_FUNCT6)
- DEFINE_OPMVV(vredmaxu, VREDMAXU_FUNCT6)
- DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
- DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
- DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
+ DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
+ DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
@@ -884,6 +925,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
DEFINE_OPFVV(vfmax, VMFMAX_FUNCT6)
DEFINE_OPFVV(vfmin, VMFMIN_FUNCT6)
+ DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
@@ -918,6 +960,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
+ // Vector Integer Extension
+ DEFINE_OPMVV_VIE(vzext_vf8)
+ DEFINE_OPMVV_VIE(vsext_vf8)
+ DEFINE_OPMVV_VIE(vzext_vf4)
+ DEFINE_OPMVV_VIE(vsext_vf4)
+ DEFINE_OPMVV_VIE(vzext_vf2)
+ DEFINE_OPMVV_VIE(vsext_vf2)
+
#undef DEFINE_OPIVI
#undef DEFINE_OPIVV
#undef DEFINE_OPIVX
@@ -927,6 +977,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#undef DEFINE_OPFVF
#undef DEFINE_OPFVV_FMA
#undef DEFINE_OPFVF_FMA
+#undef DEFINE_OPMVV_VIE
+#undef DEFINE_OPFRED
#define DEFINE_VFUNARY(name, funct6, vs1) \
void name(VRegister vd, VRegister vs2, MaskType mask = NoMask) { \
@@ -937,17 +989,34 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DEFINE_VFUNARY(vfcvt_x_f_v, VFUNARY0_FUNCT6, VFCVT_X_F_V)
DEFINE_VFUNARY(vfcvt_f_x_v, VFUNARY0_FUNCT6, VFCVT_F_X_V)
DEFINE_VFUNARY(vfcvt_f_xu_v, VFUNARY0_FUNCT6, VFCVT_F_XU_V)
+ DEFINE_VFUNARY(vfwcvt_xu_f_v, VFUNARY0_FUNCT6, VFWCVT_XU_F_V)
+ DEFINE_VFUNARY(vfwcvt_x_f_v, VFUNARY0_FUNCT6, VFWCVT_X_F_V)
+ DEFINE_VFUNARY(vfwcvt_f_x_v, VFUNARY0_FUNCT6, VFWCVT_F_X_V)
+ DEFINE_VFUNARY(vfwcvt_f_xu_v, VFUNARY0_FUNCT6, VFWCVT_F_XU_V)
+ DEFINE_VFUNARY(vfwcvt_f_f_v, VFUNARY0_FUNCT6, VFWCVT_F_F_V)
+
DEFINE_VFUNARY(vfncvt_f_f_w, VFUNARY0_FUNCT6, VFNCVT_F_F_W)
+ DEFINE_VFUNARY(vfncvt_x_f_w, VFUNARY0_FUNCT6, VFNCVT_X_F_W)
+ DEFINE_VFUNARY(vfncvt_xu_f_w, VFUNARY0_FUNCT6, VFNCVT_XU_F_W)
DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V)
+ DEFINE_VFUNARY(vfsqrt_v, VFUNARY1_FUNCT6, VFSQRT_V)
#undef DEFINE_VFUNARY
- void vnot_vv(VRegister dst, VRegister src) { vxor_vi(dst, src, -1); }
+ void vnot_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vxor_vi(dst, src, -1, mask);
+ }
- void vneg_vv(VRegister dst, VRegister src) { vrsub_vx(dst, src, zero_reg); }
+ void vneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vrsub_vx(dst, src, zero_reg, mask);
+ }
- void vfneg_vv(VRegister dst, VRegister src) { vfsngjn_vv(dst, src, src); }
- void vfabs_vv(VRegister dst, VRegister src) { vfsngjx_vv(dst, src, src); }
+ void vfneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vfsngjn_vv(dst, src, src, mask);
+ }
+ void vfabs_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
+ vfsngjx_vv(dst, src, src, mask);
+ }
// Privileged
void uret();
void sret();
@@ -1130,9 +1199,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dq(data, rmode);
}
void dd(Label* label);
@@ -1247,6 +1316,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
+ void set(Register rd, int8_t sew, int8_t lmul) {
+ DCHECK_GE(sew, E8);
+ DCHECK_LE(sew, E64);
+ DCHECK_GE(lmul, m1);
+ DCHECK_LE(lmul, mf2);
+ set(rd, VSew(sew), Vlmul(lmul));
+ }
+
void set(RoundingMode mode) {
if (mode_ != mode) {
assm_->addi(kScratchReg, zero_reg, mode << kFcsrFrmShift);
@@ -1533,6 +1610,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
VRegister vs2, MaskType mask = NoMask);
void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, int8_t vs1,
VRegister vs2, MaskType mask = NoMask);
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, VRegister vs2,
+ MaskType mask = NoMask);
// OPMVV OPFVV
void GenInstrV(uint8_t funct6, Opcode opcode, Register rd, VRegister vs1,
VRegister vs2, MaskType mask = NoMask);
@@ -1683,6 +1762,18 @@ class V8_EXPORT_PRIVATE UseScratchRegisterScope {
RegList old_available_;
};
+class LoadStoreLaneParams {
+ public:
+ int sz;
+ uint8_t laneidx;
+
+ LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx);
+
+ private:
+ LoadStoreLaneParams(uint8_t laneidx, int sz, int lanes)
+ : sz(sz), laneidx(laneidx % lanes) {}
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.h b/deps/v8/src/codegen/riscv64/constants-riscv64.h
index 173a5d0457..b5afe9b1df 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.h
@@ -712,6 +712,61 @@ enum Opcode : uint32_t {
RO_V_VSUB_VX = OP_IVX | (VSUB_FUNCT6 << kRvvFunct6Shift),
RO_V_VSUB_VV = OP_IVV | (VSUB_FUNCT6 << kRvvFunct6Shift),
+ VDIVU_FUNCT6 = 0b100000,
+ RO_V_VDIVU_VX = OP_MVX | (VDIVU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VDIVU_VV = OP_MVV | (VDIVU_FUNCT6 << kRvvFunct6Shift),
+
+ VDIV_FUNCT6 = 0b100001,
+ RO_V_VDIV_VX = OP_MVX | (VDIV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VDIV_VV = OP_MVV | (VDIV_FUNCT6 << kRvvFunct6Shift),
+
+ VREMU_FUNCT6 = 0b100010,
+ RO_V_VREMU_VX = OP_MVX | (VREMU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VREMU_VV = OP_MVV | (VREMU_FUNCT6 << kRvvFunct6Shift),
+
+ VREM_FUNCT6 = 0b100011,
+ RO_V_VREM_VX = OP_MVX | (VREM_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VREM_VV = OP_MVV | (VREM_FUNCT6 << kRvvFunct6Shift),
+
+ VMULHU_FUNCT6 = 0b100100,
+ RO_V_VMULHU_VX = OP_MVX | (VMULHU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMULHU_VV = OP_MVV | (VMULHU_FUNCT6 << kRvvFunct6Shift),
+
+ VMUL_FUNCT6 = 0b100101,
+ RO_V_VMUL_VX = OP_MVX | (VMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMUL_VV = OP_MVV | (VMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VWMUL_FUNCT6 = 0b111011,
+ RO_V_VWMUL_VX = OP_MVX | (VWMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWMUL_VV = OP_MVV | (VWMUL_FUNCT6 << kRvvFunct6Shift),
+
+ VWMULU_FUNCT6 = 0b111000,
+ RO_V_VWMULU_VX = OP_MVX | (VWMULU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWMULU_VV = OP_MVV | (VWMULU_FUNCT6 << kRvvFunct6Shift),
+
+ VMULHSU_FUNCT6 = 0b100110,
+ RO_V_VMULHSU_VX = OP_MVX | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMULHSU_VV = OP_MVV | (VMULHSU_FUNCT6 << kRvvFunct6Shift),
+
+ VMULH_FUNCT6 = 0b100111,
+ RO_V_VMULH_VX = OP_MVX | (VMULH_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMULH_VV = OP_MVV | (VMULH_FUNCT6 << kRvvFunct6Shift),
+
+ VWADD_FUNCT6 = 0b110001,
+ RO_V_VWADD_VV = OP_MVV | (VWADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWADD_VX = OP_MVX | (VWADD_FUNCT6 << kRvvFunct6Shift),
+
+ VWADDU_FUNCT6 = 0b110000,
+ RO_V_VWADDU_VV = OP_MVV | (VWADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWADDU_VX = OP_MVX | (VWADDU_FUNCT6 << kRvvFunct6Shift),
+
+ VWADDUW_FUNCT6 = 0b110101,
+ RO_V_VWADDUW_VX = OP_MVX | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VWADDUW_VV = OP_MVV | (VWADDUW_FUNCT6 << kRvvFunct6Shift),
+
+ VCOMPRESS_FUNCT6 = 0b010111,
+ RO_V_VCOMPRESS_VV = OP_MVV | (VCOMPRESS_FUNCT6 << kRvvFunct6Shift),
+
VSADDU_FUNCT6 = 0b100000,
RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift),
RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift),
@@ -829,11 +884,20 @@ enum Opcode : uint32_t {
RO_V_VSRL_VV = OP_IVV | (VSRL_FUNCT6 << kRvvFunct6Shift),
RO_V_VSRL_VX = OP_IVX | (VSRL_FUNCT6 << kRvvFunct6Shift),
+ VSRA_FUNCT6 = 0b101001,
+ RO_V_VSRA_VI = OP_IVI | (VSRA_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRA_VV = OP_IVV | (VSRA_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRA_VX = OP_IVX | (VSRA_FUNCT6 << kRvvFunct6Shift),
+
VSLL_FUNCT6 = 0b100101,
RO_V_VSLL_VI = OP_IVI | (VSLL_FUNCT6 << kRvvFunct6Shift),
RO_V_VSLL_VV = OP_IVV | (VSLL_FUNCT6 << kRvvFunct6Shift),
RO_V_VSLL_VX = OP_IVX | (VSLL_FUNCT6 << kRvvFunct6Shift),
+ VSMUL_FUNCT6 = 0b100111,
+ RO_V_VSMUL_VV = OP_IVV | (VSMUL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSMUL_VX = OP_IVX | (VSMUL_FUNCT6 << kRvvFunct6Shift),
+
VADC_FUNCT6 = 0b010000,
RO_V_VADC_VI = OP_IVI | (VADC_FUNCT6 << kRvvFunct6Shift),
RO_V_VADC_VV = OP_IVV | (VADC_FUNCT6 << kRvvFunct6Shift),
@@ -846,13 +910,23 @@ enum Opcode : uint32_t {
VWXUNARY0_FUNCT6 = 0b010000,
VRXUNARY0_FUNCT6 = 0b010000,
+ VMUNARY0_FUNCT6 = 0b010100,
RO_V_VWXUNARY0 = OP_MVV | (VWXUNARY0_FUNCT6 << kRvvFunct6Shift),
RO_V_VRXUNARY0 = OP_MVX | (VRXUNARY0_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMUNARY0 = OP_MVV | (VMUNARY0_FUNCT6 << kRvvFunct6Shift),
+
+ VID_V = 0b10001,
+
+ VXUNARY0_FUNCT6 = 0b010010,
+ RO_V_VXUNARY0 = OP_MVV | (VXUNARY0_FUNCT6 << kRvvFunct6Shift),
VWFUNARY0_FUNCT6 = 0b010000,
RO_V_VFMV_FS = OP_FVV | (VWFUNARY0_FUNCT6 << kRvvFunct6Shift),
+ VRFUNARY0_FUNCT6 = 0b010000,
+ RO_V_VFMV_SF = OP_FVF | (VRFUNARY0_FUNCT6 << kRvvFunct6Shift),
+
VREDMAXU_FUNCT6 = 0b000110,
RO_V_VREDMAXU = OP_MVV | (VREDMAXU_FUNCT6 << kRvvFunct6Shift),
VREDMAX_FUNCT6 = 0b000111,
@@ -872,9 +946,19 @@ enum Opcode : uint32_t {
VFCVT_X_F_V = 0b00001,
VFCVT_F_XU_V = 0b00010,
VFCVT_F_X_V = 0b00011,
+ VFWCVT_XU_F_V = 0b01000,
+ VFWCVT_X_F_V = 0b01001,
+ VFWCVT_F_XU_V = 0b01010,
+ VFWCVT_F_X_V = 0b01011,
+ VFWCVT_F_F_V = 0b01100,
VFNCVT_F_F_W = 0b10100,
+ VFNCVT_X_F_W = 0b10001,
+ VFNCVT_XU_F_W = 0b10000,
VFCLASS_V = 0b10000,
+ VFSQRT_V = 0b00000,
+ VFSQRT7_V = 0b00100,
+ VFREC7_V = 0b00101,
VFADD_FUNCT6 = 0b000000,
RO_V_VFADD_VV = OP_FVV | (VFADD_FUNCT6 << kRvvFunct6Shift),
@@ -918,6 +1002,9 @@ enum Opcode : uint32_t {
RO_V_VFMAX_VV = OP_FVV | (VFMAX_FUNCT6 << kRvvFunct6Shift),
RO_V_VFMAX_VF = OP_FVF | (VFMAX_FUNCT6 << kRvvFunct6Shift),
+ VFREDMAX_FUNCT6 = 0b0001111,
+ RO_V_VFREDMAX_VV = OP_FVV | (VFREDMAX_FUNCT6 << kRvvFunct6Shift),
+
VFMIN_FUNCT6 = 0b000100,
RO_V_VFMIN_VV = OP_FVV | (VFMIN_FUNCT6 << kRvvFunct6Shift),
RO_V_VFMIN_VF = OP_FVF | (VFMIN_FUNCT6 << kRvvFunct6Shift),
@@ -1132,14 +1219,10 @@ enum FClassFlag {
V(E8) \
V(E16) \
V(E32) \
- V(E64) \
- V(E128) \
- V(E256) \
- V(E512) \
- V(E1024)
+ V(E64)
-enum VSew {
#define DEFINE_FLAG(name) name,
+enum VSew {
RVV_SEW(DEFINE_FLAG)
#undef DEFINE_FLAG
};
@@ -1785,7 +1868,7 @@ class InstructionGetters : public T {
RVV_LMUL(CAST_VLMUL)
default:
return "unknown";
-#undef CAST_VSEW
+#undef CAST_VLMUL
}
}
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index 7f93187322..8b3b76da32 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -2159,11 +2159,25 @@ void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch,
// they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits),
// and JS round semantics specify that rounding of NaN (Infinity) returns NaN
// (Infinity), so NaN and Infinity are considered rounded value too.
- li(scratch, 64 - kFloat32MantissaBits - kFloat32ExponentBits);
+ const int kFloatMantissaBits =
+ sizeof(F) == 4 ? kFloat32MantissaBits : kFloat64MantissaBits;
+ const int kFloatExponentBits =
+ sizeof(F) == 4 ? kFloat32ExponentBits : kFloat64ExponentBits;
+ const int kFloatExponentBias =
+ sizeof(F) == 4 ? kFloat32ExponentBias : kFloat64ExponentBias;
+
+ // slli(rt, rs, 64 - (pos + size));
+ // if (sign_extend) {
+ // srai(rt, rt, 64 - size);
+ // } else {
+ // srli(rt, rt, 64 - size);
+ // }
+
+ li(scratch, 64 - kFloatMantissaBits - kFloatExponentBits);
vsll_vx(v_scratch, src, scratch);
- li(scratch, 64 - kFloat32ExponentBits);
+ li(scratch, 64 - kFloatExponentBits);
vsrl_vx(v_scratch, v_scratch, scratch);
- li(scratch, kFloat32ExponentBias + kFloat32MantissaBits);
+ li(scratch, kFloatExponentBias + kFloatMantissaBits);
vmslt_vx(v0, v_scratch, scratch);
VU.set(frm);
@@ -2205,6 +2219,26 @@ void TurboAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch,
RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RDN);
}
+void TurboAssembler::Trunc_d(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RTZ);
+}
+
+void TurboAssembler::Trunc_f(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RTZ);
+}
+
+void TurboAssembler::Round_f(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<float>(vdst, vsrc, scratch, v_scratch, RNE);
+}
+
+void TurboAssembler::Round_d(VRegister vdst, VRegister vsrc, Register scratch,
+ VRegister v_scratch) {
+ RoundHelper<double>(vdst, vsrc, scratch, v_scratch, RNE);
+}
+
void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RDN);
@@ -3543,6 +3577,7 @@ void TurboAssembler::LoadAddress(Register dst, Label* target,
CHECK(is_int32(offset + 0x800));
int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
int32_t Lo12 = (int32_t)offset << 20 >> 20;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
auipc(dst, Hi20);
addi(dst, dst, Lo12);
} else {
@@ -3993,6 +4028,64 @@ void TurboAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) {
vsll_vi(v0, v0, 1);
vmerge_vx(dst, kScratchReg, dst);
}
+
+void TurboAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx,
+ MemOperand src) {
+ if (ts == 8) {
+ Lbu(kScratchReg2, src);
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x1 << laneidx);
+ vmv_sx(v0, kScratchReg);
+ VU.set(kScratchReg, E8, m1);
+ vmerge_vx(dst, kScratchReg2, dst);
+ } else if (ts == 16) {
+ Lhu(kScratchReg2, src);
+ VU.set(kScratchReg, E16, m1);
+ li(kScratchReg, 0x1 << laneidx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst, kScratchReg2, dst);
+ } else if (ts == 32) {
+ Lwu(kScratchReg2, src);
+ VU.set(kScratchReg, E32, m1);
+ li(kScratchReg, 0x1 << laneidx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst, kScratchReg2, dst);
+ } else if (ts == 64) {
+ Ld(kScratchReg2, src);
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x1 << laneidx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst, kScratchReg2, dst);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void TurboAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx,
+ MemOperand dst) {
+ if (sz == 8) {
+ VU.set(kScratchReg, E8, m1);
+ vslidedown_vi(kSimd128ScratchReg, src, laneidx);
+ vmv_xs(kScratchReg, kSimd128ScratchReg);
+ Sb(kScratchReg, dst);
+ } else if (sz == 16) {
+ VU.set(kScratchReg, E16, m1);
+ vslidedown_vi(kSimd128ScratchReg, src, laneidx);
+ vmv_xs(kScratchReg, kSimd128ScratchReg);
+ Sh(kScratchReg, dst);
+ } else if (sz == 32) {
+ VU.set(kScratchReg, E32, m1);
+ vslidedown_vi(kSimd128ScratchReg, src, laneidx);
+ vmv_xs(kScratchReg, kSimd128ScratchReg);
+ Sw(kScratchReg, dst);
+ } else {
+ DCHECK_EQ(sz, 64);
+ VU.set(kScratchReg, E64, m1);
+ vslidedown_vi(kSimd128ScratchReg, src, laneidx);
+ vmv_xs(kScratchReg, kSimd128ScratchReg);
+ Sd(kScratchReg, dst);
+ }
+}
// -----------------------------------------------------------------------------
// Runtime calls.
@@ -4120,7 +4213,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
// Ld a Address from a constant pool.
// Record a value into constant pool.
if (!FLAG_riscv_constant_pool) {
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index 9e43eaf8aa..89d88f7af2 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -854,6 +854,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
VRegister v_scratch);
void Floor_d(VRegister dst, VRegister src, Register scratch,
VRegister v_scratch);
+ void Trunc_f(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
+ void Trunc_d(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
+ void Round_f(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
+ void Round_d(VRegister dst, VRegister src, Register scratch,
+ VRegister v_scratch);
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label);
@@ -953,6 +961,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Vlmul lmul);
void WasmRvvS128const(VRegister dst, const uint8_t imms[16]);
+ void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src);
+ void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst);
+
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
@@ -1187,7 +1198,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// In-place weak references.
diff --git a/deps/v8/src/codegen/riscv64/register-riscv64.h b/deps/v8/src/codegen/riscv64/register-riscv64.h
index 14c993512f..2b1e4d3d65 100644
--- a/deps/v8/src/codegen/riscv64/register-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/register-riscv64.h
@@ -55,8 +55,13 @@ namespace internal {
V(v16) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
V(v24) V(v25) V(v26) V(v27) V(v28) V(v29) V(v30) V(v31)
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) V(ft8) \
+#define UNALLOACTABLE_VECTOR_REGISTERS(V) \
+ V(v9) V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) \
+ V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
+ V(v24) V(v25)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) V(ft8) \
V(ft9) V(ft10) V(ft11) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \
V(fa6) V(fa7)
@@ -374,8 +379,9 @@ constexpr Register kWasmInstanceRegister = a0;
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
constexpr DoubleRegister kFPReturnRegister0 = fa0;
-constexpr VRegister kSimd128ScratchReg = v27;
-constexpr VRegister kSimd128ScratchReg2 = v26;
+constexpr VRegister kSimd128ScratchReg = v26;
+constexpr VRegister kSimd128ScratchReg2 = v27;
+constexpr VRegister kSimd128ScratchReg3 = v8;
constexpr VRegister kSimd128RegZero = v25;
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
diff --git a/deps/v8/src/codegen/s390/assembler-s390-inl.h b/deps/v8/src/codegen/s390/assembler-s390-inl.h
index b3d0ffa1da..8170c02204 100644
--- a/deps/v8/src/codegen/s390/assembler-s390-inl.h
+++ b/deps/v8/src/codegen/s390/assembler-s390-inl.h
@@ -247,7 +247,7 @@ void RelocInfo::WipeOut() {
}
// Operand constructors
-Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NONE) {}
+Operand::Operand(Register rm) : rm_(rm), rmode_(RelocInfo::NO_INFO) {}
// Fetch the 32bit value from the FIXED_SEQUENCE IIHF / IILF
Address Assembler::target_address_at(Address pc, Address constant_pool) {
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 8457e7c536..1283c87317 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -802,7 +802,7 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -813,7 +813,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -824,7 +824,7 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) {
void Assembler::dp(uintptr_t data, RelocInfo::Mode rmode) {
CheckBuffer();
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h
index 86fd0190b9..cfc65f70d4 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.h
+++ b/deps/v8/src/codegen/s390/assembler-s390.h
@@ -93,7 +93,7 @@ class V8_EXPORT_PRIVATE Operand {
public:
// immediate
V8_INLINE explicit Operand(intptr_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE)
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO)
: rmode_(rmode) {
value_.immediate = immediate;
}
@@ -103,7 +103,7 @@ class V8_EXPORT_PRIVATE Operand {
value_.immediate = static_cast<intptr_t>(f.address());
}
explicit Operand(Handle<HeapObject> handle);
- V8_INLINE explicit Operand(Smi value) : rmode_(RelocInfo::NONE) {
+ V8_INLINE explicit Operand(Smi value) : rmode_(RelocInfo::NO_INFO) {
value_.immediate = static_cast<intptr_t>(value.ptr());
}
@@ -1312,9 +1312,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
// Read/patch instructions
SixByteInstr instr_at(int pos) {
diff --git a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
index a51909b936..398637c40a 100644
--- a/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
+++ b/deps/v8/src/codegen/s390/interface-descriptors-s390-inl.h
@@ -121,7 +121,7 @@ constexpr auto CallTrampolineDescriptor::registers() {
// static
constexpr auto CallVarargsDescriptor::registers() {
- // r2 : number of arguments (on the stack, not including receiver)
+ // r2 : number of arguments (on the stack)
// r3 : the target to call
// r6 : arguments list length (untagged)
// r4 : arguments list (FixedArray)
@@ -139,13 +139,13 @@ constexpr auto CallForwardVarargsDescriptor::registers() {
// static
constexpr auto CallFunctionTemplateDescriptor::registers() {
// r3 : function template info
- // r4 : number of arguments (on the stack, not including receiver)
+ // r4 : number of arguments (on the stack)
return RegisterArray(r3, r4);
}
// static
constexpr auto CallWithSpreadDescriptor::registers() {
- // r2: number of arguments (on the stack, not including receiver)
+ // r2: number of arguments (on the stack)
// r3 : the target to call
// r4 : the object to spread
return RegisterArray(r3, r2, r4);
@@ -160,7 +160,7 @@ constexpr auto CallWithArrayLikeDescriptor::registers() {
// static
constexpr auto ConstructVarargsDescriptor::registers() {
- // r2 : number of arguments (on the stack, not including receiver)
+ // r2 : number of arguments (on the stack)
// r3 : the target to call
// r5 : the new target
// r6 : arguments list length (untagged)
@@ -179,7 +179,7 @@ constexpr auto ConstructForwardVarargsDescriptor::registers() {
// static
constexpr auto ConstructWithSpreadDescriptor::registers() {
- // r2 : number of arguments (on the stack, not including receiver)
+ // r2 : number of arguments (on the stack)
// r3 : the target to call
// r5 : the new target
// r4 : the object to spread
@@ -211,8 +211,7 @@ constexpr auto CompareDescriptor::registers() { return RegisterArray(r3, r2); }
// static
constexpr auto Compare_BaselineDescriptor::registers() {
- // TODO(v8:11421): Implement on this platform.
- return DefaultRegisterArray();
+ return RegisterArray(r3, r2, r4);
}
// static
@@ -220,8 +219,7 @@ constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(r3, r2); }
// static
constexpr auto BinaryOp_BaselineDescriptor::registers() {
- // TODO(v8:11421): Implement on this platform.
- return DefaultRegisterArray();
+ return RegisterArray(r3, r2, r4);
}
// static
@@ -241,7 +239,7 @@ constexpr auto InterpreterDispatchDescriptor::registers() {
// static
constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
- return RegisterArray(r2, // argument count (not including receiver)
+ return RegisterArray(r2, // argument count
r4, // address of first argument
r3); // the target callable to be call
}
@@ -249,7 +247,7 @@ constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
// static
constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
return RegisterArray(
- r2, // argument count (not including receiver)
+ r2, // argument count
r6, // address of the first argument
r3, // constructor to call
r5, // new target
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 06e98fe9d9..7080e89eec 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -496,6 +496,13 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
Call(ip);
}
+void TurboAssembler::TailCallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(this,
+ CommentForOffHeapTrampoline("tail call", builtin));
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ b(ip);
+}
+
void TurboAssembler::Drop(int count) {
if (count > 0) {
int total = count * kSystemPointerSize;
@@ -1578,7 +1585,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Clear top frame.
Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
isolate()));
- StoreU64(MemOperand(ip), Operand(0, RelocInfo::NONE), r0);
+ StoreU64(MemOperand(ip), Operand(0, RelocInfo::NO_INFO), r0);
// Restore current context from top and clear it in debug mode.
Move(ip,
@@ -1691,7 +1698,11 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
lay(dest, MemOperand(dest, kSystemPointerSize));
SubS64(num, num, Operand(1));
bind(&check);
- b(ge, &copy);
+ if (kJSArgcIncludesReceiver) {
+ b(gt, &copy);
+ } else {
+ b(ge, &copy);
+ }
}
// Fill remaining expected arguments with undefined values.
@@ -2013,7 +2024,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(code, RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
@@ -2456,7 +2467,7 @@ void TurboAssembler::mov(Register dst, const Operand& src) {
value = src.immediate();
}
- if (src.rmode() != RelocInfo::NONE) {
+ if (src.rmode() != RelocInfo::NO_INFO) {
// some form of relocation needed
RecordRelocInfo(src.rmode(), value);
}
@@ -2464,7 +2475,7 @@ void TurboAssembler::mov(Register dst, const Operand& src) {
int32_t hi_32 = static_cast<int32_t>(value >> 32);
int32_t lo_32 = static_cast<int32_t>(value);
- if (src.rmode() == RelocInfo::NONE) {
+ if (src.rmode() == RelocInfo::NO_INFO) {
if (hi_32 == 0) {
if (is_uint16(lo_32)) {
llill(dst, Operand(lo_32));
@@ -3431,7 +3442,7 @@ void TurboAssembler::CmpS64(Register src1, Register src2) { cgr(src1, src2); }
// Compare 32-bit Register vs Immediate
// This helper will set up proper relocation entries if required.
void TurboAssembler::CmpS32(Register dst, const Operand& opnd) {
- if (opnd.rmode() == RelocInfo::NONE) {
+ if (opnd.rmode() == RelocInfo::NO_INFO) {
intptr_t value = opnd.immediate();
if (is_int16(value))
chi(dst, opnd);
@@ -3447,7 +3458,7 @@ void TurboAssembler::CmpS32(Register dst, const Operand& opnd) {
// Compare Pointer Sized Register vs Immediate
// This helper will set up proper relocation entries if required.
void TurboAssembler::CmpS64(Register dst, const Operand& opnd) {
- if (opnd.rmode() == RelocInfo::NONE) {
+ if (opnd.rmode() == RelocInfo::NO_INFO) {
cgfi(dst, opnd);
} else {
mov(r0, opnd); // Need to generate 64-bit relocation
@@ -3619,7 +3630,7 @@ void TurboAssembler::StoreU64(Register src, const MemOperand& mem,
void TurboAssembler::StoreU64(const MemOperand& mem, const Operand& opnd,
Register scratch) {
// Relocations not supported
- DCHECK_EQ(opnd.rmode(), RelocInfo::NONE);
+ DCHECK_EQ(opnd.rmode(), RelocInfo::NO_INFO);
// Try to use MVGHI/MVHI
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
@@ -5553,6 +5564,22 @@ STORE_LANE_LIST(STORE_LANE)
#undef CAN_LOAD_STORE_REVERSE
#undef IS_BIG_ENDIAN
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ LoadU64(destination, MemOperand(kRootRegister, offset));
+}
+
#undef kScratchDoubleReg
} // namespace internal
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index 2a799f80f8..aa2e0ef5b8 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -45,6 +45,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
using TurboAssemblerBase::TurboAssemblerBase;
void CallBuiltin(Builtin builtin);
+ void TailCallBuiltin(Builtin builtin);
void AtomicCmpExchangeHelper(Register addr, Register output,
Register old_value, Register new_value,
int start, int end, int shift_amount, int offset,
@@ -1267,6 +1268,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
+ void LoadStackLimit(Register destination, StackLimitKind kind);
// It assumes that the arguments are located below the stack pointer.
// argc is the number of arguments not including the receiver.
// TODO(victorgomes): Remove this function once we stick with the reversed
@@ -1351,7 +1353,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
diff --git a/deps/v8/src/codegen/safepoint-table.cc b/deps/v8/src/codegen/safepoint-table.cc
index 67a17d5f0e..1d08a3b4d7 100644
--- a/deps/v8/src/codegen/safepoint-table.cc
+++ b/deps/v8/src/codegen/safepoint-table.cc
@@ -4,6 +4,8 @@
#include "src/codegen/safepoint-table.h"
+#include <iomanip>
+
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -20,98 +22,136 @@ namespace internal {
SafepointTable::SafepointTable(Isolate* isolate, Address pc, Code code)
: SafepointTable(code.InstructionStart(isolate, pc),
- code.SafepointTableAddress(), true) {}
+ code.SafepointTableAddress()) {}
#if V8_ENABLE_WEBASSEMBLY
SafepointTable::SafepointTable(const wasm::WasmCode* code)
- : SafepointTable(code->instruction_start(),
- code->instruction_start() + code->safepoint_table_offset(),
- false) {}
+ : SafepointTable(
+ code->instruction_start(),
+ code->instruction_start() + code->safepoint_table_offset()) {}
#endif // V8_ENABLE_WEBASSEMBLY
SafepointTable::SafepointTable(Address instruction_start,
- Address safepoint_table_address, bool has_deopt)
+ Address safepoint_table_address)
: instruction_start_(instruction_start),
- has_deopt_(has_deopt),
safepoint_table_address_(safepoint_table_address),
- length_(ReadLength(safepoint_table_address)),
- entry_size_(ReadEntrySize(safepoint_table_address)) {}
-
-unsigned SafepointTable::find_return_pc(unsigned pc_offset) {
- for (unsigned i = 0; i < length(); i++) {
- if (GetTrampolinePcOffset(i) == static_cast<int>(pc_offset)) {
- return GetPcOffset(i);
- } else if (GetPcOffset(i) == pc_offset) {
- return pc_offset;
+ length_(base::Memory<int>(safepoint_table_address + kLengthOffset)),
+ entry_configuration_(base::Memory<uint32_t>(safepoint_table_address +
+ kEntryConfigurationOffset)) {}
+
+int SafepointTable::find_return_pc(int pc_offset) {
+ for (int i = 0; i < length(); i++) {
+ SafepointEntry entry = GetEntry(i);
+ if (entry.trampoline_pc() == pc_offset || entry.pc() == pc_offset) {
+ return entry.pc();
}
}
UNREACHABLE();
}
SafepointEntry SafepointTable::FindEntry(Address pc) const {
- unsigned pc_offset = static_cast<unsigned>(pc - instruction_start_);
- // We use kMaxUInt32 as sentinel value, so check that we don't hit that.
- DCHECK_NE(kMaxUInt32, pc_offset);
- unsigned len = length();
- CHECK_GT(len, 0);
- // If pc == kMaxUInt32, then this entry covers all call sites in the function.
- if (len == 1 && GetPcOffset(0) == kMaxUInt32) return GetEntry(0);
- for (unsigned i = 0; i < len; i++) {
- // TODO(kasperl): Replace the linear search with binary search.
- if (GetPcOffset(i) == pc_offset ||
- (has_deopt_ &&
- GetTrampolinePcOffset(i) == static_cast<int>(pc_offset))) {
- return GetEntry(i);
+ int pc_offset = static_cast<int>(pc - instruction_start_);
+
+ // Check if the PC is pointing at a trampoline.
+ if (has_deopt_data()) {
+ int candidate = -1;
+ for (int i = 0; i < length_; ++i) {
+ int trampoline_pc = GetEntry(i).trampoline_pc();
+ if (trampoline_pc != -1 && trampoline_pc <= pc_offset) candidate = i;
+ if (trampoline_pc > pc_offset) break;
+ }
+ if (candidate != -1) return GetEntry(candidate);
+ }
+
+ for (int i = 0; i < length_; ++i) {
+ SafepointEntry entry = GetEntry(i);
+ if (i == length_ - 1 || GetEntry(i + 1).pc() > pc_offset) {
+ DCHECK_LE(entry.pc(), pc_offset);
+ return entry;
}
}
UNREACHABLE();
}
-void SafepointTable::PrintEntry(unsigned index, std::ostream& os) const {
- disasm::NameConverter converter;
- SafepointEntry entry = GetEntry(index);
- uint8_t* bits = entry.bits();
+void SafepointTable::Print(std::ostream& os) const {
+ os << "Safepoints (entries = " << length_ << ", byte size = " << byte_size()
+ << ")\n";
+
+ for (int index = 0; index < length_; index++) {
+ SafepointEntry entry = GetEntry(index);
+ os << reinterpret_cast<const void*>(instruction_start_ + entry.pc()) << " "
+ << std::setw(6) << std::hex << entry.pc() << std::dec;
+
+ if (!entry.tagged_slots().empty()) {
+ os << " slots (sp->fp): ";
+ for (uint8_t bits : entry.tagged_slots()) {
+ for (int bit = 0; bit < kBitsPerByte; ++bit) {
+ os << ((bits >> bit) & 1);
+ }
+ }
+ }
- // Print the stack slot bits.
- if (entry_size_ > 0) {
- for (uint32_t i = 0; i < entry_size_; ++i) {
- for (int bit = 0; bit < kBitsPerByte; ++bit) {
- os << ((bits[i] & (1 << bit)) ? "1" : "0");
+ if (entry.tagged_register_indexes() != 0) {
+ os << " registers: ";
+ uint32_t register_bits = entry.tagged_register_indexes();
+ int bits = 32 - base::bits::CountLeadingZeros32(register_bits);
+ for (int j = bits - 1; j >= 0; --j) {
+ os << ((register_bits >> j) & 1);
}
}
+
+ if (entry.has_deoptimization_index()) {
+ os << " deopt " << std::setw(6) << entry.deoptimization_index()
+ << " trampoline: " << std::setw(6) << std::hex
+ << entry.trampoline_pc();
+ }
+ os << "\n";
}
}
Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler) {
- deoptimization_info_.push_back(
- DeoptimizationInfo(zone_, assembler->pc_offset_for_safepoint()));
- DeoptimizationInfo& new_info = deoptimization_info_.back();
- return Safepoint(new_info.stack_indexes, &new_info.register_indexes);
-}
-
-unsigned SafepointTableBuilder::GetCodeOffset() const {
- DCHECK(emitted_);
- return offset_;
+ entries_.push_back(EntryBuilder(zone_, assembler->pc_offset_for_safepoint()));
+ EntryBuilder& new_entry = entries_.back();
+ return Safepoint(new_entry.stack_indexes, &new_entry.register_indexes);
}
int SafepointTableBuilder::UpdateDeoptimizationInfo(int pc, int trampoline,
int start,
- unsigned deopt_index) {
+ int deopt_index) {
+ DCHECK_NE(SafepointEntry::kNoTrampolinePC, trampoline);
+ DCHECK_NE(SafepointEntry::kNoDeoptIndex, deopt_index);
+ auto it = entries_.Find(start);
+ DCHECK(std::any_of(it, entries_.end(),
+ [pc](auto& entry) { return entry.pc == pc; }));
int index = start;
- for (auto it = deoptimization_info_.Find(start);
- it != deoptimization_info_.end(); it++, index++) {
- if (static_cast<int>(it->pc) == pc) {
- it->trampoline = trampoline;
- it->deopt_index = deopt_index;
- return index;
+ while (it->pc != pc) ++it, ++index;
+ it->trampoline = trampoline;
+ it->deopt_index = deopt_index;
+ return index;
+}
+
+void SafepointTableBuilder::Emit(Assembler* assembler, int tagged_slots_size) {
+#ifdef DEBUG
+ int last_pc = -1;
+ int last_trampoline = -1;
+ for (const EntryBuilder& entry : entries_) {
+ // Entries are ordered by PC.
+ DCHECK_LT(last_pc, entry.pc);
+ last_pc = entry.pc;
+ // Trampoline PCs are increasing, and larger than regular PCs.
+ if (entry.trampoline != SafepointEntry::kNoTrampolinePC) {
+ DCHECK_LT(last_trampoline, entry.trampoline);
+ DCHECK_LT(entries_.back().pc, entry.trampoline);
+ last_trampoline = entry.trampoline;
}
+ // An entry either has trampoline and deopt index, or none of the two.
+ DCHECK_EQ(entry.trampoline == SafepointEntry::kNoTrampolinePC,
+ entry.deopt_index == SafepointEntry::kNoDeoptIndex);
}
- UNREACHABLE();
-}
+#endif // DEBUG
-void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
RemoveDuplicates();
- TrimEntries(&bits_per_entry);
+ TrimEntries(&tagged_slots_size);
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
// We cannot emit a const pool within the safepoint table.
@@ -123,89 +163,139 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
assembler->RecordComment(";;; Safepoint table.");
offset_ = assembler->pc_offset();
- // Compute the number of bytes per safepoint entry.
- int bytes_per_entry =
- RoundUp(bits_per_entry, kBitsPerByte) >> kBitsPerByteLog2;
+ // Compute the required sizes of the fields.
+ int used_register_indexes = 0;
+ STATIC_ASSERT(SafepointEntry::kNoTrampolinePC == -1);
+ int max_pc = -1;
+ STATIC_ASSERT(SafepointEntry::kNoDeoptIndex == -1);
+ int max_deopt_index = -1;
+ for (const EntryBuilder& entry : entries_) {
+ used_register_indexes |= entry.register_indexes;
+ max_pc = std::max(max_pc, std::max(entry.pc, entry.trampoline));
+ max_deopt_index = std::max(max_deopt_index, entry.deopt_index);
+ }
+
+ // Derive the bytes and bools for the entry configuration from the values.
+ auto value_to_bytes = [](int value) {
+ DCHECK_LE(0, value);
+ if (value == 0) return 0;
+ if (value <= 0xff) return 1;
+ if (value <= 0xffff) return 2;
+ if (value <= 0xffffff) return 3;
+ return 4;
+ };
+ bool has_deopt_data = max_deopt_index != -1;
+ int register_indexes_size = value_to_bytes(used_register_indexes);
+ // Add 1 so all values are non-negative.
+ int pc_size = value_to_bytes(max_pc + 1);
+ int deopt_index_size = value_to_bytes(max_deopt_index + 1);
+ int tagged_slots_bytes =
+ (tagged_slots_size + kBitsPerByte - 1) / kBitsPerByte;
+
+ // Add a CHECK to ensure we never overflow the space in the bitfield, even for
+ // huge functions which might not be covered by tests.
+ CHECK(SafepointTable::RegisterIndexesSizeField::is_valid(
+ register_indexes_size) &&
+ SafepointTable::PcSizeField::is_valid(pc_size) &&
+ SafepointTable::DeoptIndexSizeField::is_valid(deopt_index_size) &&
+ SafepointTable::TaggedSlotsBytesField::is_valid(tagged_slots_bytes));
+
+ uint32_t entry_configuration =
+ SafepointTable::HasDeoptDataField::encode(has_deopt_data) |
+ SafepointTable::RegisterIndexesSizeField::encode(register_indexes_size) |
+ SafepointTable::PcSizeField::encode(pc_size) |
+ SafepointTable::DeoptIndexSizeField::encode(deopt_index_size) |
+ SafepointTable::TaggedSlotsBytesField::encode(tagged_slots_bytes);
// Emit the table header.
STATIC_ASSERT(SafepointTable::kLengthOffset == 0 * kIntSize);
- STATIC_ASSERT(SafepointTable::kEntrySizeOffset == 1 * kIntSize);
+ STATIC_ASSERT(SafepointTable::kEntryConfigurationOffset == 1 * kIntSize);
STATIC_ASSERT(SafepointTable::kHeaderSize == 2 * kIntSize);
- int length = static_cast<int>(deoptimization_info_.size());
+ int length = static_cast<int>(entries_.size());
assembler->dd(length);
- assembler->dd(bytes_per_entry);
-
- // Emit sorted table of pc offsets together with additional info (i.e. the
- // deoptimization index or arguments count) and trampoline offsets.
- STATIC_ASSERT(SafepointTable::kPcOffset == 0 * kIntSize);
- STATIC_ASSERT(SafepointTable::kEncodedInfoOffset == 1 * kIntSize);
- STATIC_ASSERT(SafepointTable::kTrampolinePcOffset == 2 * kIntSize);
- STATIC_ASSERT(SafepointTable::kFixedEntrySize == 3 * kIntSize);
- for (const DeoptimizationInfo& info : deoptimization_info_) {
- assembler->dd(info.pc);
- if (info.register_indexes) {
- // We emit the register indexes in the same bits as the deopt_index.
- // Register indexes and deopt_index should not exist at the same time.
- DCHECK_EQ(info.deopt_index,
- static_cast<uint32_t>(Safepoint::kNoDeoptimizationIndex));
- assembler->dd(info.register_indexes);
- } else {
- assembler->dd(info.deopt_index);
+ assembler->dd(entry_configuration);
+
+ auto emit_bytes = [assembler](int value, int bytes) {
+ DCHECK_LE(0, value);
+ for (; bytes > 0; --bytes, value >>= 8) assembler->db(value);
+ DCHECK_EQ(0, value);
+ };
+ // Emit entries, sorted by pc offsets.
+ for (const EntryBuilder& entry : entries_) {
+ emit_bytes(entry.pc, pc_size);
+ if (has_deopt_data) {
+ // Add 1 so all values are non-negative.
+ emit_bytes(entry.deopt_index + 1, deopt_index_size);
+ emit_bytes(entry.trampoline + 1, pc_size);
}
- assembler->dd(info.trampoline);
+ emit_bytes(entry.register_indexes, register_indexes_size);
}
- // Emit table of bitmaps.
- ZoneVector<uint8_t> bits(bytes_per_entry, 0, zone_);
- for (const DeoptimizationInfo& info : deoptimization_info_) {
- ZoneChunkList<int>* indexes = info.stack_indexes;
+ // Emit bitmaps of tagged stack slots.
+ ZoneVector<uint8_t> bits(tagged_slots_bytes, 0, zone_);
+ for (const EntryBuilder& entry : entries_) {
std::fill(bits.begin(), bits.end(), 0);
// Run through the indexes and build a bitmap.
- for (int idx : *indexes) {
- DCHECK_GT(bits_per_entry, idx);
- int index = bits_per_entry - 1 - idx;
+ for (int idx : *entry.stack_indexes) {
+ DCHECK_GT(tagged_slots_size, idx);
+ int index = tagged_slots_size - 1 - idx;
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
- bits[byte_index] |= (1U << bit_index);
+ bits[byte_index] |= (1u << bit_index);
}
// Emit the bitmap for the current entry.
- for (int k = 0; k < bytes_per_entry; k++) {
- assembler->db(bits[k]);
- }
+ for (uint8_t byte : bits) assembler->db(byte);
}
- emitted_ = true;
}
void SafepointTableBuilder::RemoveDuplicates() {
- // If the table contains more than one entry, and all entries are identical
- // (except for the pc), replace the whole table by a single entry with pc =
- // kMaxUInt32. This especially compacts the table for wasm code without tagged
- // pointers and without deoptimization info.
-
- if (deoptimization_info_.size() < 2) return;
-
- // Check that all entries (1, size] are identical to entry 0.
- const DeoptimizationInfo& first_info = deoptimization_info_.front();
- for (auto it = deoptimization_info_.Find(1); it != deoptimization_info_.end();
- it++) {
- if (!IsIdenticalExceptForPc(first_info, *it)) return;
+ // Remove any duplicate entries, i.e. succeeding entries that are identical
+ // except for the PC. During lookup, we will find the first entry whose PC is
+ // not larger than the PC at hand, and find the first non-duplicate.
+
+ if (entries_.size() < 2) return;
+
+ auto is_identical_except_for_pc = [](const EntryBuilder& entry1,
+ const EntryBuilder& entry2) {
+ if (entry1.deopt_index != entry2.deopt_index) return false;
+ DCHECK_EQ(entry1.trampoline, entry2.trampoline);
+
+ ZoneChunkList<int>* indexes1 = entry1.stack_indexes;
+ ZoneChunkList<int>* indexes2 = entry2.stack_indexes;
+ if (indexes1->size() != indexes2->size()) return false;
+ if (!std::equal(indexes1->begin(), indexes1->end(), indexes2->begin())) {
+ return false;
+ }
+
+ if (entry1.register_indexes != entry2.register_indexes) return false;
+
+ return true;
+ };
+
+ auto remaining_it = entries_.begin();
+ size_t remaining = 0;
+
+ for (auto it = entries_.begin(), end = entries_.end(); it != end;
+ ++remaining_it, ++remaining) {
+ if (remaining_it != it) *remaining_it = *it;
+ // Merge identical entries.
+ do {
+ ++it;
+ } while (it != end && is_identical_except_for_pc(*it, *remaining_it));
}
- // If we get here, all entries were identical. Rewind the list to just one
- // entry, and set the pc to kMaxUInt32.
- deoptimization_info_.Rewind(1);
- deoptimization_info_.front().pc = kMaxUInt32;
+ entries_.Rewind(remaining);
}
-void SafepointTableBuilder::TrimEntries(int* bits_per_entry) {
- int min_index = *bits_per_entry;
+void SafepointTableBuilder::TrimEntries(int* tagged_slots_size) {
+ int min_index = *tagged_slots_size;
if (min_index == 0) return; // Early exit: nothing to trim.
- for (auto& info : deoptimization_info_) {
- for (int idx : *info.stack_indexes) {
- DCHECK_GT(*bits_per_entry, idx); // Validity check.
+ for (auto& entry : entries_) {
+ for (int idx : *entry.stack_indexes) {
+ DCHECK_GT(*tagged_slots_size, idx); // Validity check.
if (idx >= min_index) continue;
if (idx == 0) return; // Early exit: nothing to trim.
min_index = idx;
@@ -213,29 +303,13 @@ void SafepointTableBuilder::TrimEntries(int* bits_per_entry) {
}
DCHECK_LT(0, min_index);
- *bits_per_entry -= min_index;
- for (auto& info : deoptimization_info_) {
- for (int& idx : *info.stack_indexes) {
+ *tagged_slots_size -= min_index;
+ for (auto& entry : entries_) {
+ for (int& idx : *entry.stack_indexes) {
idx -= min_index;
}
}
}
-bool SafepointTableBuilder::IsIdenticalExceptForPc(
- const DeoptimizationInfo& info1, const DeoptimizationInfo& info2) const {
- if (info1.deopt_index != info2.deopt_index) return false;
-
- ZoneChunkList<int>* indexes1 = info1.stack_indexes;
- ZoneChunkList<int>* indexes2 = info2.stack_indexes;
- if (indexes1->size() != indexes2->size()) return false;
- if (!std::equal(indexes1->begin(), indexes1->end(), indexes2->begin())) {
- return false;
- }
-
- if (info1.register_indexes != info2.register_indexes) return false;
-
- return true;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h
index 07bbcaf9a0..4201d5fc2f 100644
--- a/deps/v8/src/codegen/safepoint-table.h
+++ b/deps/v8/src/codegen/safepoint-table.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_SAFEPOINT_TABLE_H_
#define V8_CODEGEN_SAFEPOINT_TABLE_H_
+#include "src/base/bit-field.h"
#include "src/base/iterator.h"
#include "src/base/memory.h"
#include "src/common/assert-scope.h"
@@ -22,72 +23,64 @@ class WasmCode;
class SafepointEntry {
public:
+ static constexpr int kNoDeoptIndex = -1;
+ static constexpr int kNoTrampolinePC = -1;
+
SafepointEntry() = default;
- SafepointEntry(unsigned deopt_index, uint8_t* bits, uint8_t* bits_end,
- int trampoline_pc)
- : deopt_index_(deopt_index),
- bits_(bits),
- bits_end_(bits_end),
+ SafepointEntry(int pc, int deopt_index, uint32_t tagged_register_indexes,
+ base::Vector<uint8_t> tagged_slots, int trampoline_pc)
+ : pc_(pc),
+ deopt_index_(deopt_index),
+ tagged_register_indexes_(tagged_register_indexes),
+ tagged_slots_(tagged_slots),
trampoline_pc_(trampoline_pc) {
DCHECK(is_valid());
}
- bool is_valid() const { return bits_ != nullptr; }
+ bool is_valid() const { return tagged_slots_.begin() != nullptr; }
- bool Equals(const SafepointEntry& other) const {
- return deopt_index_ == other.deopt_index_ && bits_ == other.bits_;
+ bool operator==(const SafepointEntry& other) const {
+ return pc_ == other.pc_ && deopt_index_ == other.deopt_index_ &&
+ tagged_register_indexes_ == other.tagged_register_indexes_ &&
+ tagged_slots_ == other.tagged_slots_ &&
+ trampoline_pc_ == other.trampoline_pc_;
}
void Reset() {
- deopt_index_ = 0;
- bits_ = nullptr;
- bits_end_ = nullptr;
+ *this = SafepointEntry{};
+ DCHECK(!is_valid());
}
- int trampoline_pc() { return trampoline_pc_; }
+ int pc() const { return pc_; }
- static const unsigned kNoDeoptIndex = kMaxUInt32;
- static constexpr int kNoTrampolinePC = -1;
+ int trampoline_pc() const { return trampoline_pc_; }
- int deoptimization_index() const {
- DCHECK(is_valid() && has_deoptimization_index());
- return deopt_index_;
- }
-
- uint32_t register_bits() const {
- // The register bits use the same field as the deopt_index_.
- DCHECK(is_valid());
- return deopt_index_;
- }
-
- bool has_register_bits() const {
- // The register bits use the same field as the deopt_index_.
+ bool has_deoptimization_index() const {
DCHECK(is_valid());
return deopt_index_ != kNoDeoptIndex;
}
- bool has_deoptimization_index() const {
- DCHECK(is_valid());
- return deopt_index_ != kNoDeoptIndex;
+ int deoptimization_index() const {
+ DCHECK(is_valid() && has_deoptimization_index());
+ return deopt_index_;
}
- uint8_t* bits() const {
+ uint32_t tagged_register_indexes() const {
DCHECK(is_valid());
- return bits_;
+ return tagged_register_indexes_;
}
- base::iterator_range<uint8_t*> iterate_bits() const {
- return base::make_iterator_range(bits_, bits_end_);
+ base::Vector<const uint8_t> tagged_slots() const {
+ DCHECK(is_valid());
+ return tagged_slots_;
}
- size_t entry_size() const { return bits_end_ - bits_; }
-
private:
- uint32_t deopt_index_ = 0;
- uint8_t* bits_ = nullptr;
- uint8_t* bits_end_ = nullptr;
- // It needs to be an integer as it is -1 for eager deoptimizations.
+ int pc_ = -1;
+ int deopt_index_ = kNoDeoptIndex;
+ uint32_t tagged_register_indexes_ = 0;
+ base::Vector<uint8_t> tagged_slots_;
int trampoline_pc_ = kNoTrampolinePC;
};
@@ -103,89 +96,101 @@ class SafepointTable {
SafepointTable(const SafepointTable&) = delete;
SafepointTable& operator=(const SafepointTable&) = delete;
- int size() const {
- return kHeaderSize + (length_ * (kFixedEntrySize + entry_size_));
- }
- unsigned length() const { return length_; }
- unsigned entry_size() const { return entry_size_; }
+ int length() const { return length_; }
- unsigned GetPcOffset(unsigned index) const {
- DCHECK(index < length_);
- return base::Memory<uint32_t>(GetPcOffsetLocation(index));
+ int byte_size() const {
+ return kHeaderSize + length_ * (entry_size() + tagged_slots_bytes());
}
- int GetTrampolinePcOffset(unsigned index) const {
- DCHECK(index < length_);
- return base::Memory<int>(GetTrampolineLocation(index));
- }
-
- unsigned find_return_pc(unsigned pc_offset);
-
- SafepointEntry GetEntry(unsigned index) const {
- DCHECK(index < length_);
- unsigned deopt_index =
- base::Memory<uint32_t>(GetEncodedInfoLocation(index));
- uint8_t* bits = &base::Memory<uint8_t>(entries() + (index * entry_size_));
- int trampoline_pc = has_deopt_
- ? base::Memory<int>(GetTrampolineLocation(index))
- : SafepointEntry::kNoTrampolinePC;
- return SafepointEntry(deopt_index, bits, bits + entry_size_, trampoline_pc);
+ int find_return_pc(int pc_offset);
+
+ SafepointEntry GetEntry(int index) const {
+ DCHECK_GT(length_, index);
+ Address entry_ptr =
+ safepoint_table_address_ + kHeaderSize + index * entry_size();
+
+ int pc = read_bytes(&entry_ptr, pc_size());
+ int deopt_index = SafepointEntry::kNoDeoptIndex;
+ int trampoline_pc = SafepointEntry::kNoTrampolinePC;
+ if (has_deopt_data()) {
+ deopt_index = read_bytes(&entry_ptr, deopt_index_size()) - 1;
+ trampoline_pc = read_bytes(&entry_ptr, pc_size()) - 1;
+ DCHECK(deopt_index >= 0 || deopt_index == SafepointEntry::kNoDeoptIndex);
+ DCHECK(trampoline_pc >= 0 ||
+ trampoline_pc == SafepointEntry::kNoTrampolinePC);
+ }
+ int tagged_register_indexes =
+ read_bytes(&entry_ptr, register_indexes_size());
+
+ // Entry bits start after the the vector of entries (thus the pc offset of
+ // the non-existing entry after the last one).
+ uint8_t* tagged_slots_start = reinterpret_cast<uint8_t*>(
+ safepoint_table_address_ + kHeaderSize + length_ * entry_size());
+ base::Vector<uint8_t> tagged_slots(
+ tagged_slots_start + index * tagged_slots_bytes(),
+ tagged_slots_bytes());
+
+ return SafepointEntry(pc, deopt_index, tagged_register_indexes,
+ tagged_slots, trampoline_pc);
}
// Returns the entry for the given pc.
SafepointEntry FindEntry(Address pc) const;
- void PrintEntry(unsigned index, std::ostream& os) const;
+ void Print(std::ostream&) const;
private:
- SafepointTable(Address instruction_start, Address safepoint_table_address,
- bool has_deopt);
-
- static const uint8_t kNoRegisters = 0xFF;
-
// Layout information.
- static const int kLengthOffset = 0;
- static const int kEntrySizeOffset = kLengthOffset + kIntSize;
- static const int kHeaderSize = kEntrySizeOffset + kIntSize;
- static const int kPcOffset = 0;
- static const int kEncodedInfoOffset = kPcOffset + kIntSize;
- static const int kTrampolinePcOffset = kEncodedInfoOffset + kIntSize;
- static const int kFixedEntrySize = kTrampolinePcOffset + kIntSize;
-
- static uint32_t ReadLength(Address table) {
- return base::Memory<uint32_t>(table + kLengthOffset);
+ static constexpr int kLengthOffset = 0;
+ static constexpr int kEntryConfigurationOffset = kLengthOffset + kIntSize;
+ static constexpr int kHeaderSize = kEntryConfigurationOffset + kUInt32Size;
+
+ using HasDeoptDataField = base::BitField<bool, 0, 1>;
+ using RegisterIndexesSizeField = HasDeoptDataField::Next<int, 3>;
+ using PcSizeField = RegisterIndexesSizeField::Next<int, 3>;
+ using DeoptIndexSizeField = PcSizeField::Next<int, 3>;
+ // In 22 bits, we can encode up to 4M bytes, corresponding to 32M frame slots,
+ // which is 128MB on 32-bit and 256MB on 64-bit systems. The stack size is
+ // limited to a bit below 1MB anyway (see FLAG_stack_size).
+ using TaggedSlotsBytesField = DeoptIndexSizeField::Next<int, 22>;
+
+ SafepointTable(Address instruction_start, Address safepoint_table_address);
+
+ int entry_size() const {
+ int deopt_data_size = has_deopt_data() ? pc_size() + deopt_index_size() : 0;
+ return pc_size() + deopt_data_size + register_indexes_size();
}
- static uint32_t ReadEntrySize(Address table) {
- return base::Memory<uint32_t>(table + kEntrySizeOffset);
- }
- Address pc_and_deoptimization_indexes() const {
- return safepoint_table_address_ + kHeaderSize;
+
+ int tagged_slots_bytes() const {
+ return TaggedSlotsBytesField::decode(entry_configuration_);
}
- Address entries() const {
- return safepoint_table_address_ + kHeaderSize + (length_ * kFixedEntrySize);
+ bool has_deopt_data() const {
+ return HasDeoptDataField::decode(entry_configuration_);
}
-
- Address GetPcOffsetLocation(unsigned index) const {
- return pc_and_deoptimization_indexes() + (index * kFixedEntrySize);
+ int pc_size() const { return PcSizeField::decode(entry_configuration_); }
+ int deopt_index_size() const {
+ return DeoptIndexSizeField::decode(entry_configuration_);
}
-
- Address GetEncodedInfoLocation(unsigned index) const {
- return GetPcOffsetLocation(index) + kEncodedInfoOffset;
+ int register_indexes_size() const {
+ return RegisterIndexesSizeField::decode(entry_configuration_);
}
- Address GetTrampolineLocation(unsigned index) const {
- return GetPcOffsetLocation(index) + kTrampolinePcOffset;
+ static int read_bytes(Address* ptr, int bytes) {
+ uint32_t result = 0;
+ for (int b = 0; b < bytes; ++b, ++*ptr) {
+ result |= uint32_t{*reinterpret_cast<uint8_t*>(*ptr)} << (8 * b);
+ }
+ return static_cast<int>(result);
}
DISALLOW_GARBAGE_COLLECTION(no_gc_)
const Address instruction_start_;
- const bool has_deopt_;
// Safepoint table layout.
const Address safepoint_table_address_;
- const uint32_t length_;
- const uint32_t entry_size_;
+ const int length_;
+ const uint32_t entry_configuration_;
friend class SafepointTableBuilder;
friend class SafepointEntry;
@@ -193,13 +198,11 @@ class SafepointTable {
class Safepoint {
public:
- static const int kNoDeoptimizationIndex = SafepointEntry::kNoDeoptIndex;
-
void DefinePointerSlot(int index) { stack_indexes_->push_back(index); }
void DefineRegister(int reg_code) {
// Make sure the recorded index is always less than 31, so that we don't
- // generate {kNoDeoptimizationIndex} by accident.
+ // generate {kNoDeoptIndex} by accident.
DCHECK_LT(reg_code, 31);
*register_indexes_ |= 1u << reg_code;
}
@@ -215,16 +218,18 @@ class Safepoint {
class SafepointTableBuilder {
public:
- explicit SafepointTableBuilder(Zone* zone)
- : deoptimization_info_(zone),
- emitted_(false),
- zone_(zone) {}
+ explicit SafepointTableBuilder(Zone* zone) : entries_(zone), zone_(zone) {}
SafepointTableBuilder(const SafepointTableBuilder&) = delete;
SafepointTableBuilder& operator=(const SafepointTableBuilder&) = delete;
+ bool emitted() const { return offset_ != -1; }
+
// Get the offset of the emitted safepoint table in the code.
- unsigned GetCodeOffset() const;
+ int GetCodeOffset() const {
+ DCHECK(emitted());
+ return offset_;
+ }
// Define a new safepoint for the current position in the body.
Safepoint DefineSafepoint(Assembler* assembler);
@@ -238,41 +243,36 @@ class SafepointTableBuilder {
// table contains the trampoline PC {trampoline} that replaced the
// return PC {pc} on the stack.
int UpdateDeoptimizationInfo(int pc, int trampoline, int start,
- unsigned deopt_index);
+ int deopt_index);
private:
- struct DeoptimizationInfo {
- unsigned pc;
- unsigned deopt_index;
+ struct EntryBuilder {
+ int pc;
+ int deopt_index;
int trampoline;
ZoneChunkList<int>* stack_indexes;
uint32_t register_indexes;
- DeoptimizationInfo(Zone* zone, unsigned pc)
+ EntryBuilder(Zone* zone, int pc)
: pc(pc),
- deopt_index(Safepoint::kNoDeoptimizationIndex),
- trampoline(-1),
+ deopt_index(SafepointEntry::kNoDeoptIndex),
+ trampoline(SafepointEntry::kNoTrampolinePC),
stack_indexes(zone->New<ZoneChunkList<int>>(
zone, ZoneChunkList<int>::StartMode::kSmall)),
register_indexes(0) {}
};
- // Compares all fields of a {DeoptimizationInfo} except {pc} and {trampoline}.
- bool IsIdenticalExceptForPc(const DeoptimizationInfo&,
- const DeoptimizationInfo&) const;
-
- // If all entries are identical, replace them by 1 entry with pc = kMaxUInt32.
+ // Remove consecutive identical entries.
void RemoveDuplicates();
// Try to trim entries by removing trailing zeros (and shrinking
// {bits_per_entry}).
void TrimEntries(int* bits_per_entry);
- ZoneChunkList<DeoptimizationInfo> deoptimization_info_;
+ ZoneChunkList<EntryBuilder> entries_;
- unsigned offset_;
- bool emitted_;
+ int offset_ = -1;
- Zone* zone_;
+ Zone* const zone_;
};
} // namespace internal
diff --git a/deps/v8/src/codegen/script-details.h b/deps/v8/src/codegen/script-details.h
index e342e132d7..5317ae989e 100644
--- a/deps/v8/src/codegen/script-details.h
+++ b/deps/v8/src/codegen/script-details.h
@@ -29,7 +29,7 @@ struct ScriptDetails {
int column_offset;
MaybeHandle<Object> name_obj;
MaybeHandle<Object> source_map_url;
- MaybeHandle<FixedArray> host_defined_options;
+ MaybeHandle<Object> host_defined_options;
REPLMode repl_mode;
const ScriptOriginOptions origin_options;
};
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
index bbfe894443..4dd54fd6f0 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -1205,6 +1205,101 @@ void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
}
}
+// Helper macro to define qfma macro-assembler. This takes care of every
+// possible case of register aliasing to minimize the number of instructions.
+#define QFMA(ps_or_pd) \
+ if (CpuFeatures::IsSupported(FMA3)) { \
+ CpuFeatureScope fma3_scope(this, FMA3); \
+ if (dst == src1) { \
+ vfmadd231##ps_or_pd(dst, src2, src3); \
+ } else if (dst == src2) { \
+ vfmadd132##ps_or_pd(dst, src1, src3); \
+ } else if (dst == src3) { \
+ vfmadd213##ps_or_pd(dst, src2, src1); \
+ } else { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmovups(dst, src1); \
+ vfmadd231##ps_or_pd(dst, src2, src3); \
+ } \
+ } else if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmul##ps_or_pd(tmp, src2, src3); \
+ vadd##ps_or_pd(dst, src1, tmp); \
+ } else { \
+ if (dst == src1) { \
+ movaps(tmp, src2); \
+ mul##ps_or_pd(tmp, src3); \
+ add##ps_or_pd(dst, tmp); \
+ } else if (dst == src2) { \
+ DCHECK_NE(src2, src1); \
+ mul##ps_or_pd(src2, src3); \
+ add##ps_or_pd(src2, src1); \
+ } else if (dst == src3) { \
+ DCHECK_NE(src3, src1); \
+ mul##ps_or_pd(src3, src2); \
+ add##ps_or_pd(src3, src1); \
+ } else { \
+ movaps(dst, src2); \
+ mul##ps_or_pd(dst, src3); \
+ add##ps_or_pd(dst, src1); \
+ } \
+ }
+
+// Helper macro to define qfms macro-assembler. This takes care of every
+// possible case of register aliasing to minimize the number of instructions.
+#define QFMS(ps_or_pd) \
+ if (CpuFeatures::IsSupported(FMA3)) { \
+ CpuFeatureScope fma3_scope(this, FMA3); \
+ if (dst == src1) { \
+ vfnmadd231##ps_or_pd(dst, src2, src3); \
+ } else if (dst == src2) { \
+ vfnmadd132##ps_or_pd(dst, src1, src3); \
+ } else if (dst == src3) { \
+ vfnmadd213##ps_or_pd(dst, src2, src1); \
+ } else { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmovups(dst, src1); \
+ vfnmadd231##ps_or_pd(dst, src2, src3); \
+ } \
+ } else if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmul##ps_or_pd(tmp, src2, src3); \
+ vsub##ps_or_pd(dst, src1, tmp); \
+ } else { \
+ movaps(tmp, src2); \
+ mul##ps_or_pd(tmp, src3); \
+ if (dst != src1) { \
+ movaps(dst, src1); \
+ } \
+ sub##ps_or_pd(dst, tmp); \
+ }
+
+void SharedTurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMA(ps)
+}
+
+void SharedTurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMS(ps)
+}
+
+void SharedTurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMA(pd);
+}
+
+void SharedTurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMS(pd);
+}
+
+#undef QFMOP
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
index 325dfea7d0..abe1d6200a 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
@@ -476,6 +476,15 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void S128Load32Splat(XMMRegister dst, Operand src);
void S128Store64Lane(Operand dst, XMMRegister src, uint8_t laneidx);
+ void F64x2Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F64x2Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F32x4Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F32x4Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+
protected:
template <typename Op>
using AvxFn = void (Assembler::*)(XMMRegister, XMMRegister, Op, uint8_t);
@@ -900,7 +909,7 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
vpshufb(dst, tmp1, dst);
vpshufb(tmp2, tmp1, tmp2);
vpaddb(dst, dst, tmp2);
- } else if (CpuFeatures::IsSupported(ATOM)) {
+ } else if (CpuFeatures::IsSupported(INTEL_ATOM)) {
// Pre-Goldmont low-power Intel microarchitectures have very slow
// PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
// algorithm on these processors. ATOM CPU feature captures exactly
diff --git a/deps/v8/src/codegen/turbo-assembler.cc b/deps/v8/src/codegen/turbo-assembler.cc
index 09c4559813..e1546f71ca 100644
--- a/deps/v8/src/codegen/turbo-assembler.cc
+++ b/deps/v8/src/codegen/turbo-assembler.cc
@@ -50,6 +50,12 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination,
if (isolate()->roots_table().IsRootHandle(object, &root_index)) {
// Roots are loaded relative to the root register.
LoadRoot(destination, root_index);
+ } else if (V8_EXTERNAL_CODE_SPACE_BOOL &&
+ isolate()->builtins()->IsBuiltinCodeDataContainerHandle(
+ object, &builtin)) {
+ // Similar to roots, builtins may be loaded from the builtins table.
+ LoadRootRelative(destination,
+ RootRegisterOffsetForBuiltinCodeDataContainer(builtin));
} else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin)) {
// Similar to roots, builtins may be loaded from the builtins table.
LoadRootRelative(destination, RootRegisterOffsetForBuiltin(builtin));
@@ -101,6 +107,12 @@ int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) {
}
// static
+int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltinCodeDataContainer(
+ Builtin builtin) {
+ return IsolateData::BuiltinCodeDataContainerSlotOffset(builtin);
+}
+
+// static
intptr_t TurboAssemblerBase::RootRegisterOffsetForExternalReference(
Isolate* isolate, const ExternalReference& reference) {
return static_cast<intptr_t>(reference.address() - isolate->isolate_root());
diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h
index 2f2deadaac..7403aa1bfd 100644
--- a/deps/v8/src/codegen/turbo-assembler.h
+++ b/deps/v8/src/codegen/turbo-assembler.h
@@ -80,6 +80,7 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
static int32_t RootRegisterOffsetForRootIndex(RootIndex root_index);
static int32_t RootRegisterOffsetForBuiltin(Builtin builtin);
+ static int32_t RootRegisterOffsetForBuiltinCodeDataContainer(Builtin builtin);
// Returns the root-relative offset to reference.address().
static intptr_t RootRegisterOffsetForExternalReference(
diff --git a/deps/v8/src/codegen/unoptimized-compilation-info.cc b/deps/v8/src/codegen/unoptimized-compilation-info.cc
index 08cd818188..d0bc2d159d 100644
--- a/deps/v8/src/codegen/unoptimized-compilation-info.cc
+++ b/deps/v8/src/codegen/unoptimized-compilation-info.cc
@@ -18,7 +18,10 @@ namespace internal {
UnoptimizedCompilationInfo::UnoptimizedCompilationInfo(Zone* zone,
ParseInfo* parse_info,
FunctionLiteral* literal)
- : flags_(parse_info->flags()), feedback_vector_spec_(zone) {
+ : flags_(parse_info->flags()),
+ dispatcher_(parse_info->dispatcher()),
+ character_stream_(parse_info->character_stream()),
+ feedback_vector_spec_(zone) {
// NOTE: The parse_info passed here represents the global information gathered
// during parsing, but does not represent specific details of the actual
// function literal being compiled for this OptimizedCompilationInfo. As such,
diff --git a/deps/v8/src/codegen/unoptimized-compilation-info.h b/deps/v8/src/codegen/unoptimized-compilation-info.h
index 3cdb94158b..b7fb1e8de6 100644
--- a/deps/v8/src/codegen/unoptimized-compilation-info.h
+++ b/deps/v8/src/codegen/unoptimized-compilation-info.h
@@ -35,6 +35,10 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
FunctionLiteral* literal);
const UnoptimizedCompileFlags& flags() const { return flags_; }
+ LazyCompileDispatcher* dispatcher() { return dispatcher_; }
+ const Utf16CharacterStream* character_stream() const {
+ return character_stream_;
+ }
// Accessors for the input data of the function being compiled.
@@ -86,6 +90,10 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
// Compilation flags.
const UnoptimizedCompileFlags flags_;
+ // For dispatching eager compilation of lazily compiled functions.
+ LazyCompileDispatcher* dispatcher_;
+ const Utf16CharacterStream* character_stream_;
+
// The root AST node of the function literal being compiled.
FunctionLiteral* literal_;
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index 8e451e641e..cb2f67850a 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -45,14 +45,14 @@ void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
}
void Assembler::emit(Immediate x) {
- if (!RelocInfo::IsNone(x.rmode_)) {
+ if (!RelocInfo::IsNoInfo(x.rmode_)) {
RecordRelocInfo(x.rmode_);
}
emitl(x.value_);
}
void Assembler::emit(Immediate64 x) {
- if (!RelocInfo::IsNone(x.rmode_)) {
+ if (!RelocInfo::IsNoInfo(x.rmode_)) {
RecordRelocInfo(x.rmode_);
}
emitq(static_cast<uint64_t>(x.value_));
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index fe0403b80e..0fdeee7685 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -103,9 +103,9 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.has_lzcnt() && FLAG_enable_lzcnt) SetSupported(LZCNT);
if (cpu.has_popcnt() && FLAG_enable_popcnt) SetSupported(POPCNT);
if (strcmp(FLAG_mcpu, "auto") == 0) {
- if (cpu.is_atom()) SetSupported(ATOM);
+ if (cpu.is_atom()) SetSupported(INTEL_ATOM);
} else if (strcmp(FLAG_mcpu, "atom") == 0) {
- SetSupported(ATOM);
+ SetSupported(INTEL_ATOM);
}
// Ensure that supported cpu features make sense. E.g. it is wrong to support
@@ -141,7 +141,7 @@ void CpuFeatures::PrintFeatures() {
CpuFeatures::IsSupported(AVX2), CpuFeatures::IsSupported(FMA3),
CpuFeatures::IsSupported(BMI1), CpuFeatures::IsSupported(BMI2),
CpuFeatures::IsSupported(LZCNT), CpuFeatures::IsSupported(POPCNT),
- CpuFeatures::IsSupported(ATOM));
+ CpuFeatures::IsSupported(INTEL_ATOM));
}
// -----------------------------------------------------------------------------
@@ -276,7 +276,7 @@ bool ConstPool::TryRecordEntry(intptr_t data, RelocInfo::Mode mode) {
// Currently, partial constant pool only handles the following kinds of
// RelocInfo.
- if (mode != RelocInfo::NONE && mode != RelocInfo::EXTERNAL_REFERENCE &&
+ if (mode != RelocInfo::NO_INFO && mode != RelocInfo::EXTERNAL_REFERENCE &&
mode != RelocInfo::OFF_HEAP_TARGET)
return false;
@@ -330,7 +330,8 @@ void Assembler::PatchConstPool() {
bool Assembler::UseConstPoolFor(RelocInfo::Mode rmode) {
if (!FLAG_partial_constant_pool) return false;
- return (rmode == RelocInfo::NONE || rmode == RelocInfo::EXTERNAL_REFERENCE ||
+ return (rmode == RelocInfo::NO_INFO ||
+ rmode == RelocInfo::EXTERNAL_REFERENCE ||
rmode == RelocInfo::OFF_HEAP_TARGET);
}
@@ -703,7 +704,7 @@ void Assembler::immediate_arithmetic_op(byte subcode, Register dst,
Immediate src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
- if (is_int8(src.value_) && RelocInfo::IsNone(src.rmode_)) {
+ if (is_int8(src.value_) && RelocInfo::IsNoInfo(src.rmode_)) {
emit(0x83);
emit_modrm(subcode, dst);
emit(src.value_);
@@ -721,7 +722,7 @@ void Assembler::immediate_arithmetic_op(byte subcode, Operand dst,
Immediate src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
- if (is_int8(src.value_) && RelocInfo::IsNone(src.rmode_)) {
+ if (is_int8(src.value_) && RelocInfo::IsNoInfo(src.rmode_)) {
emit(0x83);
emit_operand(subcode, dst);
emit(src.value_);
@@ -1020,7 +1021,7 @@ void Assembler::near_jmp(intptr_t disp, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
emit(0xE9);
DCHECK(is_int32(disp));
- if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode);
+ if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode);
emitl(static_cast<int32_t>(disp));
}
@@ -3416,30 +3417,33 @@ void Assembler::pmovmskb(Register dst, XMMRegister src) {
}
// AVX instructions
-
-void Assembler::vmovddup(XMMRegister dst, XMMRegister src) {
- DCHECK(IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kL128, kF2, k0F, kWIG);
- emit(0x12);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::vmovddup(XMMRegister dst, Operand src) {
- DCHECK(IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kL128, kF2, k0F, kWIG);
- emit(0x12);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::vmovshdup(XMMRegister dst, XMMRegister src) {
- DCHECK(IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kL128, kF3, k0F, kWIG);
- emit(0x16);
- emit_sse_operand(dst, src);
-}
+#define VMOV_DUP(SIMDRegister, length) \
+ void Assembler::vmovddup(SIMDRegister dst, SIMDRegister src) { \
+ DCHECK(IsEnabled(AVX)); \
+ EnsureSpace ensure_space(this); \
+ emit_vex_prefix(dst, xmm0, src, k##length, kF2, k0F, kWIG); \
+ emit(0x12); \
+ emit_sse_operand(dst, src); \
+ } \
+ \
+ void Assembler::vmovddup(SIMDRegister dst, Operand src) { \
+ DCHECK(IsEnabled(AVX)); \
+ EnsureSpace ensure_space(this); \
+ emit_vex_prefix(dst, xmm0, src, k##length, kF2, k0F, kWIG); \
+ emit(0x12); \
+ emit_sse_operand(dst, src); \
+ } \
+ \
+ void Assembler::vmovshdup(SIMDRegister dst, SIMDRegister src) { \
+ DCHECK(IsEnabled(AVX)); \
+ EnsureSpace ensure_space(this); \
+ emit_vex_prefix(dst, xmm0, src, k##length, kF3, k0F, kWIG); \
+ emit(0x16); \
+ emit_sse_operand(dst, src); \
+ }
+VMOV_DUP(XMMRegister, L128)
+VMOV_DUP(YMMRegister, L256)
+#undef VMOV_DUP
#define BROADCASTSS(SIMDRegister, length) \
void Assembler::vbroadcastss(SIMDRegister dst, Operand src) { \
@@ -3737,22 +3741,27 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
emit(imm8);
}
-void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- DCHECK(IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, k66, k0F, kWIG);
- emit(op);
- emit_sse_operand(dst, src2);
-}
-
-void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
- DCHECK(IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kL128, k66, k0F, kWIG);
- emit(op);
- emit_sse_operand(dst, src2);
-}
+#define VPD(SIMDRegister, length) \
+ void Assembler::vpd(byte op, SIMDRegister dst, SIMDRegister src1, \
+ SIMDRegister src2) { \
+ DCHECK(IsEnabled(AVX)); \
+ EnsureSpace ensure_space(this); \
+ emit_vex_prefix(dst, src1, src2, k##length, k66, k0F, kWIG); \
+ emit(op); \
+ emit_sse_operand(dst, src2); \
+ } \
+ \
+ void Assembler::vpd(byte op, SIMDRegister dst, SIMDRegister src1, \
+ Operand src2) { \
+ DCHECK(IsEnabled(AVX)); \
+ EnsureSpace ensure_space(this); \
+ emit_vex_prefix(dst, src1, src2, k##length, k66, k0F, kWIG); \
+ emit(op); \
+ emit_sse_operand(dst, src2); \
+ }
+VPD(XMMRegister, L128)
+VPD(YMMRegister, L256)
+#undef VPD
void Assembler::vucomiss(XMMRegister dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
@@ -4362,7 +4371,7 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
@@ -4372,7 +4381,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- if (!RelocInfo::IsNone(rmode)) {
+ if (!RelocInfo::IsNoInfo(rmode)) {
DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
RelocInfo::IsLiteralConstant(rmode));
RecordRelocInfo(rmode);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index 41ba5f4ac1..2c89157979 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -130,7 +130,7 @@ class Immediate {
private:
const int32_t value_;
- const RelocInfo::Mode rmode_ = RelocInfo::NONE;
+ const RelocInfo::Mode rmode_ = RelocInfo::NO_INFO;
friend class Assembler;
};
@@ -148,7 +148,7 @@ class Immediate64 {
private:
const int64_t value_;
- const RelocInfo::Mode rmode_ = RelocInfo::NONE;
+ const RelocInfo::Mode rmode_ = RelocInfo::NO_INFO;
friend class Assembler;
};
@@ -1156,6 +1156,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} \
void v##instruction(XMMRegister dst, Operand src) { \
vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0); \
+ } \
+ void v##instruction(YMMRegister dst, YMMRegister src) { \
+ vinstr(0x##opcode, dst, ymm0, src, k##prefix, k##escape1##escape2, kW0); \
+ } \
+ void v##instruction(YMMRegister dst, Operand src) { \
+ vinstr(0x##opcode, dst, ymm0, src, k##prefix, k##escape1##escape2, kW0); \
}
SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSSE3_UNOP_AVX_INSTRUCTION)
@@ -1167,6 +1173,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// The mask operand is encoded in bits[7:4] of the immediate byte.
emit(mask.code() << 4);
}
+ void vpblendvb(YMMRegister dst, YMMRegister src1, YMMRegister src2,
+ YMMRegister mask) {
+ vinstr(0x4C, dst, src1, src2, k66, k0F3A, kW0, AVX2);
+ // The mask operand is encoded in bits[7:4] of the immediate byte.
+ emit(mask.code() << 4);
+ }
void vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister mask) {
@@ -1174,6 +1186,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// The mask operand is encoded in bits[7:4] of the immediate byte.
emit(mask.code() << 4);
}
+ void vblendvps(YMMRegister dst, YMMRegister src1, YMMRegister src2,
+ YMMRegister mask) {
+ vinstr(0x4A, dst, src1, src2, k66, k0F3A, kW0, AVX);
+ // The mask operand is encoded in bits[7:4] of the immediate byte.
+ emit(mask.code() << 4);
+ }
void vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister mask) {
@@ -1181,6 +1199,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// The mask operand is encoded in bits[7:4] of the immediate byte.
emit(mask.code() << 4);
}
+ void vblendvpd(YMMRegister dst, YMMRegister src1, YMMRegister src2,
+ YMMRegister mask) {
+ vinstr(0x4B, dst, src1, src2, k66, k0F3A, kW0, AVX);
+ // The mask operand is encoded in bits[7:4] of the immediate byte.
+ emit(mask.code() << 4);
+ }
#define DECLARE_SSE4_PMOV_AVX_INSTRUCTION(instruction, prefix, escape1, \
escape2, opcode) \
@@ -1329,7 +1353,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// AVX instruction
void vmovddup(XMMRegister dst, XMMRegister src);
void vmovddup(XMMRegister dst, Operand src);
+ void vmovddup(YMMRegister dst, YMMRegister src);
+ void vmovddup(YMMRegister dst, Operand src);
void vmovshdup(XMMRegister dst, XMMRegister src);
+ void vmovshdup(YMMRegister dst, YMMRegister src);
void vbroadcastss(XMMRegister dst, Operand src);
void vbroadcastss(XMMRegister dst, XMMRegister src);
void vbroadcastss(YMMRegister dst, Operand src);
@@ -1569,13 +1596,21 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vmovaps(XMMRegister dst, XMMRegister src) { vps(0x28, dst, xmm0, src); }
+ void vmovaps(YMMRegister dst, YMMRegister src) { vps(0x28, dst, ymm0, src); }
void vmovaps(XMMRegister dst, Operand src) { vps(0x28, dst, xmm0, src); }
+ void vmovaps(YMMRegister dst, Operand src) { vps(0x28, dst, ymm0, src); }
void vmovups(XMMRegister dst, XMMRegister src) { vps(0x10, dst, xmm0, src); }
+ void vmovups(YMMRegister dst, YMMRegister src) { vps(0x10, dst, ymm0, src); }
void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); }
+ void vmovups(YMMRegister dst, Operand src) { vps(0x10, dst, ymm0, src); }
void vmovups(Operand dst, XMMRegister src) { vps(0x11, src, xmm0, dst); }
+ void vmovups(Operand dst, YMMRegister src) { vps(0x11, src, ymm0, dst); }
void vmovapd(XMMRegister dst, XMMRegister src) { vpd(0x28, dst, xmm0, src); }
+ void vmovapd(YMMRegister dst, YMMRegister src) { vpd(0x28, dst, ymm0, src); }
void vmovupd(XMMRegister dst, Operand src) { vpd(0x10, dst, xmm0, src); }
+ void vmovupd(YMMRegister dst, Operand src) { vpd(0x10, dst, ymm0, src); }
void vmovupd(Operand dst, XMMRegister src) { vpd(0x11, src, xmm0, dst); }
+ void vmovupd(Operand dst, YMMRegister src) { vpd(0x11, src, ymm0, dst); }
void vmovmskps(Register dst, XMMRegister src) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vps(0x50, idst, xmm0, src);
@@ -1775,7 +1810,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
byte imm8);
void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vpd(byte op, YMMRegister dst, YMMRegister src1, YMMRegister src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
+ void vpd(byte op, YMMRegister dst, YMMRegister src1, Operand src2);
// AVX2 instructions
#define AVX2_INSTRUCTION(instr, prefix, escape1, escape2, opcode) \
@@ -1945,9 +1982,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data);
- void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
- void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) {
dq(data, rmode);
}
void dq(Label* label);
diff --git a/deps/v8/src/codegen/x64/fma-instr.h b/deps/v8/src/codegen/x64/fma-instr.h
index c607429e33..83165cc670 100644
--- a/deps/v8/src/codegen/x64/fma-instr.h
+++ b/deps/v8/src/codegen/x64/fma-instr.h
@@ -5,7 +5,7 @@
#ifndef V8_CODEGEN_X64_FMA_INSTR_H_
#define V8_CODEGEN_X64_FMA_INSTR_H_
-#define FMA_INSTRUCTION_LIST(V) \
+#define FMA_SD_INSTRUCTION_LIST(V) \
V(vfmadd132sd, L128, 66, 0F, 38, W1, 99) \
V(vfmadd213sd, L128, 66, 0F, 38, W1, a9) \
V(vfmadd231sd, L128, 66, 0F, 38, W1, b9) \
@@ -17,25 +17,31 @@
V(vfnmadd231sd, L128, 66, 0F, 38, W1, bd) \
V(vfnmsub132sd, L128, 66, 0F, 38, W1, 9f) \
V(vfnmsub213sd, L128, 66, 0F, 38, W1, af) \
- V(vfnmsub231sd, L128, 66, 0F, 38, W1, bf) \
- V(vfmadd132ss, LIG, 66, 0F, 38, W0, 99) \
- V(vfmadd213ss, LIG, 66, 0F, 38, W0, a9) \
- V(vfmadd231ss, LIG, 66, 0F, 38, W0, b9) \
- V(vfmsub132ss, LIG, 66, 0F, 38, W0, 9b) \
- V(vfmsub213ss, LIG, 66, 0F, 38, W0, ab) \
- V(vfmsub231ss, LIG, 66, 0F, 38, W0, bb) \
- V(vfnmadd132ss, LIG, 66, 0F, 38, W0, 9d) \
- V(vfnmadd213ss, LIG, 66, 0F, 38, W0, ad) \
- V(vfnmadd231ss, LIG, 66, 0F, 38, W0, bd) \
- V(vfnmsub132ss, LIG, 66, 0F, 38, W0, 9f) \
- V(vfnmsub213ss, LIG, 66, 0F, 38, W0, af) \
- V(vfnmsub231ss, LIG, 66, 0F, 38, W0, bf) \
+ V(vfnmsub231sd, L128, 66, 0F, 38, W1, bf)
+
+#define FMA_SS_INSTRUCTION_LIST(V) \
+ V(vfmadd132ss, LIG, 66, 0F, 38, W0, 99) \
+ V(vfmadd213ss, LIG, 66, 0F, 38, W0, a9) \
+ V(vfmadd231ss, LIG, 66, 0F, 38, W0, b9) \
+ V(vfmsub132ss, LIG, 66, 0F, 38, W0, 9b) \
+ V(vfmsub213ss, LIG, 66, 0F, 38, W0, ab) \
+ V(vfmsub231ss, LIG, 66, 0F, 38, W0, bb) \
+ V(vfnmadd132ss, LIG, 66, 0F, 38, W0, 9d) \
+ V(vfnmadd213ss, LIG, 66, 0F, 38, W0, ad) \
+ V(vfnmadd231ss, LIG, 66, 0F, 38, W0, bd) \
+ V(vfnmsub132ss, LIG, 66, 0F, 38, W0, 9f) \
+ V(vfnmsub213ss, LIG, 66, 0F, 38, W0, af) \
+ V(vfnmsub231ss, LIG, 66, 0F, 38, W0, bf)
+
+#define FMA_PS_INSTRUCTION_LIST(V) \
V(vfmadd132ps, L128, 66, 0F, 38, W0, 98) \
V(vfmadd213ps, L128, 66, 0F, 38, W0, a8) \
V(vfmadd231ps, L128, 66, 0F, 38, W0, b8) \
V(vfnmadd132ps, L128, 66, 0F, 38, W0, 9c) \
V(vfnmadd213ps, L128, 66, 0F, 38, W0, ac) \
- V(vfnmadd231ps, L128, 66, 0F, 38, W0, bc) \
+ V(vfnmadd231ps, L128, 66, 0F, 38, W0, bc)
+
+#define FMA_PD_INSTRUCTION_LIST(V) \
V(vfmadd132pd, L128, 66, 0F, 38, W1, 98) \
V(vfmadd213pd, L128, 66, 0F, 38, W1, a8) \
V(vfmadd231pd, L128, 66, 0F, 38, W1, b8) \
@@ -43,4 +49,10 @@
V(vfnmadd213pd, L128, 66, 0F, 38, W1, ac) \
V(vfnmadd231pd, L128, 66, 0F, 38, W1, bc)
+#define FMA_INSTRUCTION_LIST(V) \
+ FMA_SD_INSTRUCTION_LIST(V) \
+ FMA_SS_INSTRUCTION_LIST(V) \
+ FMA_PS_INSTRUCTION_LIST(V) \
+ FMA_PD_INSTRUCTION_LIST(V)
+
#endif // V8_CODEGEN_X64_FMA_INSTR_H_
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index c8c5903410..6ac8017ca8 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -371,8 +371,8 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// turned on to provoke errors.
if (FLAG_debug_code) {
ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
- Move(value, kZapValue, RelocInfo::NONE);
- Move(slot_address, kZapValue, RelocInfo::NONE);
+ Move(value, kZapValue, RelocInfo::NO_INFO);
+ Move(slot_address, kZapValue, RelocInfo::NO_INFO);
}
}
@@ -669,8 +669,8 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
// turned on to provoke errors.
if (FLAG_debug_code) {
ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
- Move(slot_address, kZapValue, RelocInfo::NONE);
- Move(value, kZapValue, RelocInfo::NONE);
+ Move(slot_address, kZapValue, RelocInfo::NO_INFO);
+ Move(value, kZapValue, RelocInfo::NO_INFO);
}
}
@@ -819,7 +819,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
- // We don't allow a GC during a store buffer overflow so there is no need to
+ // We don't allow a GC in a write barrier slow path so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
int bytes = 0;
@@ -906,99 +906,6 @@ void TurboAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) {
}
}
-// Helper macro to define qfma macro-assembler. This takes care of every
-// possible case of register aliasing to minimize the number of instructions.
-#define QFMA(ps_or_pd) \
- if (CpuFeatures::IsSupported(FMA3)) { \
- CpuFeatureScope fma3_scope(this, FMA3); \
- if (dst == src1) { \
- vfmadd231##ps_or_pd(dst, src2, src3); \
- } else if (dst == src2) { \
- vfmadd132##ps_or_pd(dst, src1, src3); \
- } else if (dst == src3) { \
- vfmadd213##ps_or_pd(dst, src2, src1); \
- } else { \
- vmovups(dst, src1); \
- vfmadd231##ps_or_pd(dst, src2, src3); \
- } \
- } else if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(this, AVX); \
- vmul##ps_or_pd(tmp, src2, src3); \
- vadd##ps_or_pd(dst, src1, tmp); \
- } else { \
- if (dst == src1) { \
- movaps(tmp, src2); \
- mul##ps_or_pd(tmp, src3); \
- add##ps_or_pd(dst, tmp); \
- } else if (dst == src2) { \
- DCHECK_NE(src2, src1); \
- mul##ps_or_pd(src2, src3); \
- add##ps_or_pd(src2, src1); \
- } else if (dst == src3) { \
- DCHECK_NE(src3, src1); \
- mul##ps_or_pd(src3, src2); \
- add##ps_or_pd(src3, src1); \
- } else { \
- movaps(dst, src2); \
- mul##ps_or_pd(dst, src3); \
- add##ps_or_pd(dst, src1); \
- } \
- }
-
-// Helper macro to define qfms macro-assembler. This takes care of every
-// possible case of register aliasing to minimize the number of instructions.
-#define QFMS(ps_or_pd) \
- if (CpuFeatures::IsSupported(FMA3)) { \
- CpuFeatureScope fma3_scope(this, FMA3); \
- if (dst == src1) { \
- vfnmadd231##ps_or_pd(dst, src2, src3); \
- } else if (dst == src2) { \
- vfnmadd132##ps_or_pd(dst, src1, src3); \
- } else if (dst == src3) { \
- vfnmadd213##ps_or_pd(dst, src2, src1); \
- } else { \
- vmovups(dst, src1); \
- vfnmadd231##ps_or_pd(dst, src2, src3); \
- } \
- } else if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope avx_scope(this, AVX); \
- vmul##ps_or_pd(tmp, src2, src3); \
- vsub##ps_or_pd(dst, src1, tmp); \
- } else { \
- movaps(tmp, src2); \
- mul##ps_or_pd(tmp, src3); \
- if (dst != src1) { \
- movaps(dst, src1); \
- } \
- sub##ps_or_pd(dst, tmp); \
- }
-
-void TurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister src3,
- XMMRegister tmp) {
- QFMA(ps)
-}
-
-void TurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister src3,
- XMMRegister tmp) {
- QFMS(ps)
-}
-
-void TurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister src3,
- XMMRegister tmp) {
- QFMA(pd);
-}
-
-void TurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister src3,
- XMMRegister tmp) {
- QFMS(pd);
-}
-
-#undef QFMOP
-
void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -1561,7 +1468,7 @@ void TurboAssembler::Move(Register dst, Smi source) {
if (value == 0) {
xorl(dst, dst);
} else if (SmiValuesAre32Bits() || value < 0) {
- Move(dst, source.ptr(), RelocInfo::NONE);
+ Move(dst, source.ptr(), RelocInfo::NO_INFO);
} else {
uint32_t uvalue = static_cast<uint32_t>(source.ptr());
Move(dst, uvalue);
@@ -1596,7 +1503,7 @@ void TurboAssembler::Move(Register dst, Register src) {
void TurboAssembler::Move(Register dst, Operand src) { movq(dst, src); }
void TurboAssembler::Move(Register dst, Immediate src) {
- if (src.rmode() == RelocInfo::Mode::NONE) {
+ if (src.rmode() == RelocInfo::Mode::NO_INFO) {
Move(dst, src.value());
} else {
movl(dst, src);
@@ -1920,7 +1827,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
j(cc, code_object, rmode);
}
-void MacroAssembler::JumpToInstructionStream(Address entry) {
+void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
Move(kOffHeapTrampolineRegister, entry, RelocInfo::OFF_HEAP_TARGET);
jmp(kOffHeapTrampolineRegister);
}
@@ -1931,7 +1838,7 @@ void TurboAssembler::Call(ExternalReference ext) {
}
void TurboAssembler::Call(Operand op) {
- if (!CpuFeatures::IsSupported(ATOM)) {
+ if (!CpuFeatures::IsSupported(INTEL_ATOM)) {
call(op);
} else {
movq(kScratchRegister, op);
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index da57d4629a..262162ded0 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -177,15 +177,6 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
uint32_t* load_pc_offset = nullptr);
- void F64x2Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister src3, XMMRegister tmp);
- void F64x2Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister src3, XMMRegister tmp);
- void F32x4Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister src3, XMMRegister tmp);
- void F32x4Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister src3, XMMRegister tmp);
-
void Lzcntq(Register dst, Register src);
void Lzcntq(Register dst, Operand src);
void Lzcntl(Register dst, Register src);
@@ -335,7 +326,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Move(Register dst, Address ptr, RelocInfo::Mode rmode) {
// This method must not be used with heap object references. The stored
// address is not GC safe. Use the handle version instead.
- DCHECK(rmode == RelocInfo::NONE || rmode > RelocInfo::LAST_GCED_ENUM);
+ DCHECK(rmode == RelocInfo::NO_INFO || rmode > RelocInfo::LAST_GCED_ENUM);
movq(dst, Immediate64(ptr, rmode));
}
@@ -784,7 +775,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void PopQuad(Operand dst);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(Address entry);
+ void JumpToOffHeapInstructionStream(Address entry);
// Compare object type for heap object.
// Always use unsigned comparisons: above and below, not less and greater.
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index f8d3446126..4d17867542 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -90,6 +90,14 @@ constexpr int64_t TB = static_cast<int64_t>(GB) * 1024;
#define V8_DEFAULT_STACK_SIZE_KB 984
#endif
+// Helper macros to enable handling of direct C calls in the simulator.
+#if defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_ARM64)
+#define V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+#define V8_IF_USE_SIMULATOR(V) , V
+#else
+#define V8_IF_USE_SIMULATOR(V)
+#endif // defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_ARM64)
+
// Minimum stack size in KB required by compilers.
constexpr int kStackSpaceRequiredForCompilation = 40;
@@ -104,8 +112,15 @@ STATIC_ASSERT(V8_DEFAULT_STACK_SIZE_KB* KB +
kStackLimitSlackForDeoptimizationInBytes <=
MB);
-#if defined(V8_SHORT_BUILTIN_CALLS) && \
- (!defined(V8_COMPRESS_POINTERS) || defined(V8_EXTERNAL_CODE_SPACE))
+// The V8_ENABLE_NEAR_CODE_RANGE_BOOL enables logic that tries to allocate
+// code range within a pc-relative call/jump proximity from embedded builtins.
+// This machinery could help only when we have an opportunity to choose where
+// to allocate code range and could benefit from it. This is the case for the
+// following configurations:
+// - external code space AND pointer compression are enabled,
+// - short builtin calls feature is enabled while pointer compression is not.
+#if (defined(V8_SHORT_BUILTIN_CALLS) && !defined(V8_COMPRESS_POINTERS)) || \
+ defined(V8_EXTERNAL_CODE_SPACE)
#define V8_ENABLE_NEAR_CODE_RANGE_BOOL true
#else
#define V8_ENABLE_NEAR_CODE_RANGE_BOOL false
@@ -115,11 +130,6 @@ STATIC_ASSERT(V8_DEFAULT_STACK_SIZE_KB* KB +
// physical memory by checking the max old space size.
const size_t kShortBuiltinCallsOldSpaceSizeThreshold = size_t{2} * GB;
-// This constant is used for detecting whether code range could be
-// allocated within the +/- 2GB boundary to builtins' embedded blob
-// to use short builtin calls.
-const size_t kShortBuiltinCallsBoundary = size_t{2} * GB;
-
// Determine whether dict mode prototypes feature is enabled.
#ifdef V8_ENABLE_SWISS_NAME_DICTIONARY
#define V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL true
@@ -866,8 +876,27 @@ inline constexpr bool IsSharedAllocationType(AllocationType kind) {
kind == AllocationType::kSharedMap;
}
-// TODO(ishell): review and rename kWordAligned to kTaggedAligned.
-enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
+enum AllocationAlignment {
+ // The allocated address is kTaggedSize aligned (this is default for most of
+ // the allocations).
+ kTaggedAligned,
+ // The allocated address is kDoubleSize aligned.
+ kDoubleAligned,
+ // The (allocated address + kTaggedSize) is kDoubleSize aligned.
+ kDoubleUnaligned
+};
+
+#ifdef V8_HOST_ARCH_32_BIT
+#define USE_ALLOCATION_ALIGNMENT_BOOL true
+#else
+#ifdef V8_COMPRESS_POINTERS
+// TODO(ishell, v8:8875): Consider using aligned allocations once the
+// allocation alignment inconsistency is fixed. For now we keep using
+// unaligned access since both x64 and arm64 architectures (where pointer
+// compression is supported) allow unaligned access to doubles and full words.
+#endif // V8_COMPRESS_POINTERS
+#define USE_ALLOCATION_ALIGNMENT_BOOL false
+#endif // V8_HOST_ARCH_32_BIT
enum class AccessMode { ATOMIC, NON_ATOMIC };
@@ -938,7 +967,7 @@ enum ParseRestriction : bool {
};
// State for inline cache call sites. Aliased as IC::State.
-enum InlineCacheState {
+enum class InlineCacheState {
// No feedback will be collected.
NO_FEEDBACK,
// Has never been executed.
@@ -957,24 +986,26 @@ enum InlineCacheState {
GENERIC,
};
+inline size_t hash_value(InlineCacheState mode) { return bit_cast<int>(mode); }
+
// Printing support.
inline const char* InlineCacheState2String(InlineCacheState state) {
switch (state) {
- case NO_FEEDBACK:
+ case InlineCacheState::NO_FEEDBACK:
return "NOFEEDBACK";
- case UNINITIALIZED:
+ case InlineCacheState::UNINITIALIZED:
return "UNINITIALIZED";
- case MONOMORPHIC:
+ case InlineCacheState::MONOMORPHIC:
return "MONOMORPHIC";
- case RECOMPUTE_HANDLER:
+ case InlineCacheState::RECOMPUTE_HANDLER:
return "RECOMPUTE_HANDLER";
- case POLYMORPHIC:
+ case InlineCacheState::POLYMORPHIC:
return "POLYMORPHIC";
- case MEGAMORPHIC:
+ case InlineCacheState::MEGAMORPHIC:
return "MEGAMORPHIC";
- case MEGADOM:
+ case InlineCacheState::MEGADOM:
return "MEGADOM";
- case GENERIC:
+ case InlineCacheState::GENERIC:
return "GENERIC";
}
UNREACHABLE();
@@ -1613,7 +1644,7 @@ inline std::ostream& operator<<(std::ostream& os,
using FileAndLine = std::pair<const char*, int>;
-enum OptimizationMarker : int32_t {
+enum class OptimizationMarker : int32_t {
// These values are set so that it is easy to check if there is a marker where
// some processing needs to be done.
kNone = 0b000,
@@ -1626,8 +1657,11 @@ enum OptimizationMarker : int32_t {
// For kNone or kInOptimizationQueue we don't need any special processing.
// To check both cases using a single mask, we expect the kNone to be 0 and
// kInOptimizationQueue to be 1 so that we can mask off the lsb for checking.
-STATIC_ASSERT(kNone == 0b000 && kInOptimizationQueue == 0b001);
-STATIC_ASSERT(kLastOptimizationMarker <= 0b111);
+STATIC_ASSERT(static_cast<int>(OptimizationMarker::kNone) == 0b000 &&
+ static_cast<int>(OptimizationMarker::kInOptimizationQueue) ==
+ 0b001);
+STATIC_ASSERT(static_cast<int>(OptimizationMarker::kLastOptimizationMarker) <=
+ 0b111);
static constexpr uint32_t kNoneOrInOptimizationQueueMask = 0b110;
inline bool IsInOptimizationQueueMarker(OptimizationMarker marker) {
@@ -1760,7 +1794,7 @@ inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
return store_mode == STORE_AND_GROW_HANDLE_COW;
}
-enum IcCheckType { ELEMENT, PROPERTY };
+enum class IcCheckType { kElement, kProperty };
// Helper stubs can be called in different ways depending on where the target
// code is located and how the call sequence is expected to look like:
@@ -1880,13 +1914,13 @@ enum PropertiesEnumerationMode {
kPropertyAdditionOrder,
};
-enum class StringInternalizationStrategy {
- // The string must be internalized by first copying.
+enum class StringTransitionStrategy {
+ // The string must be transitioned to a new representation by first copying.
kCopy,
- // The string can be internalized in-place by changing its map.
+ // The string can be transitioned in-place by changing its map.
kInPlace,
- // The string is already internalized.
- kAlreadyInternalized
+ // The string is already transitioned to the desired representation.
+ kAlreadyTransitioned
};
} // namespace internal
diff --git a/deps/v8/src/common/high-allocation-throughput-scope.h b/deps/v8/src/common/high-allocation-throughput-scope.h
new file mode 100644
index 0000000000..2f04e9327c
--- /dev/null
+++ b/deps/v8/src/common/high-allocation-throughput-scope.h
@@ -0,0 +1,37 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMMON_HIGH_ALLOCATION_THROUGHPUT_SCOPE_H_
+#define V8_COMMON_HIGH_ALLOCATION_THROUGHPUT_SCOPE_H_
+
+#include "include/v8-platform.h"
+
+namespace v8 {
+namespace internal {
+
+/**
+ * Scope that notifies embedder's observer about entering sections with high
+ * throughput of malloc/free operations.
+ */
+class HighAllocationThroughputScope final {
+ public:
+ explicit HighAllocationThroughputScope(Platform* platform)
+ : observer_(platform->GetHighAllocationThroughputObserver()) {
+ observer_->LeaveSection();
+ }
+
+ HighAllocationThroughputScope(const HighAllocationThroughputScope&) = delete;
+ HighAllocationThroughputScope& operator=(
+ const HighAllocationThroughputScope&) = delete;
+
+ ~HighAllocationThroughputScope() { observer_->EnterSection(); }
+
+ private:
+ HighAllocationThroughputObserver* observer_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMMON_HIGH_ALLOCATION_THROUGHPUT_SCOPE_H_
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index 2b69e1d6a9..de75463362 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -37,7 +37,6 @@ namespace internal {
T(AwaitNotInAsyncContext, \
"await is only valid in async functions and the top level bodies of " \
"modules") \
- T(AwaitNotInAsyncFunction, "await is only valid in async function") \
T(AwaitNotInDebugEvaluate, \
"await can not be used when evaluating code " \
"while paused in the debugger") \
@@ -321,6 +320,9 @@ namespace internal {
"Invalid property descriptor. Cannot both specify accessors and a value " \
"or writable attribute, %") \
T(VarRedeclaration, "Identifier '%' has already been declared") \
+ T(VarNotAllowedInEvalScope, \
+ "Identifier '%' cannot be declared with 'var' in current evaluation " \
+ "scope, consider trying 'let' instead") \
T(WrongArgs, "%: Arguments list has wrong type") \
/* ReferenceError */ \
T(NotDefined, "% is not defined") \
@@ -347,7 +349,7 @@ namespace internal {
T(InvalidCountValue, "Invalid count value") \
T(InvalidDataViewAccessorOffset, \
"Offset is outside the bounds of the DataView") \
- T(InvalidDataViewLength, "Invalid DataView length %") \
+ T(InvalidDataViewLength, "Invalid DataView length") \
T(InvalidOffset, "Start offset % is outside the bounds of the buffer") \
T(InvalidHint, "Invalid hint: %") \
T(InvalidIndex, "Invalid value: not (convertible to) a safe integer") \
@@ -442,7 +444,8 @@ namespace internal {
"Invalid module export name: contains unpaired surrogate") \
T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
- T(InvalidPrivateBrand, "Object must be an instance of class %") \
+ T(InvalidPrivateBrandInstance, "Receiver must be an instance of class %") \
+ T(InvalidPrivateBrandStatic, "Receiver must be class %") \
T(InvalidPrivateBrandReinitialization, \
"Cannot initialize private methods of class % twice on the same object") \
T(InvalidPrivateFieldReinitialization, \
@@ -632,7 +635,9 @@ namespace internal {
T(OptionalChainingNoSuper, "Invalid optional chain from super property") \
T(OptionalChainingNoTemplate, "Invalid tagged template on optional chain") \
/* AggregateError */ \
- T(AllPromisesRejected, "All promises were rejected")
+ T(AllPromisesRejected, "All promises were rejected") \
+ /* Web snapshots */ \
+ T(WebSnapshotError, "Web snapshot failed: %")
enum class MessageTemplate {
#define TEMPLATE(NAME, STRING) k##NAME,
diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h
index 86ad167fe7..fc366b2b53 100644
--- a/deps/v8/src/common/ptr-compr-inl.h
+++ b/deps/v8/src/common/ptr-compr-inl.h
@@ -102,7 +102,7 @@ V8_INLINE Address GetPtrComprCageBaseAddress(Address on_heap_addr) {
#endif // V8_COMPRESS_POINTERS
-inline PtrComprCageBase GetPtrComprCageBase(HeapObject object) {
+V8_INLINE PtrComprCageBase GetPtrComprCageBase(HeapObject object) {
return GetPtrComprCageBaseFromOnHeapAddress(object.ptr());
}
diff --git a/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
index 81c3c077df..bcfb1f6c00 100644
--- a/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.cc
@@ -11,13 +11,19 @@
#include "src/base/platform/mutex.h"
#include "src/base/platform/time.h"
#include "src/codegen/compiler.h"
+#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/handles/global-handles-inl.h"
+#include "src/heap/parked-scope.h"
#include "src/logging/counters.h"
#include "src/logging/runtime-call-stats-scope.h"
+#include "src/numbers/hash-seed-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
+#include "src/roots/roots.h"
+#include "src/security/external-pointer.h"
#include "src/tasks/cancelable-task.h"
#include "src/tasks/task-utils.h"
#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
@@ -25,6 +31,10 @@
namespace v8 {
namespace internal {
+// The maximum amount of time we should allow a single function's FinishNow to
+// spend opportunistically finalizing other finalizable jobs.
+static constexpr int kMaxOpportunisticFinalizeTimeMs = 1;
+
class LazyCompileDispatcher::JobTask : public v8::JobTask {
public:
explicit JobTask(LazyCompileDispatcher* lazy_compile_dispatcher)
@@ -35,16 +45,19 @@ class LazyCompileDispatcher::JobTask : public v8::JobTask {
}
size_t GetMaxConcurrency(size_t worker_count) const final {
- return lazy_compile_dispatcher_->num_jobs_for_background_.load(
+ size_t n = lazy_compile_dispatcher_->num_jobs_for_background_.load(
std::memory_order_relaxed);
+ if (FLAG_lazy_compile_dispatcher_max_threads == 0) return n;
+ return std::min(
+ n, static_cast<size_t>(FLAG_lazy_compile_dispatcher_max_threads));
}
private:
LazyCompileDispatcher* lazy_compile_dispatcher_;
};
-LazyCompileDispatcher::Job::Job(BackgroundCompileTask* task_arg)
- : task(task_arg), has_run(false), aborted(false) {}
+LazyCompileDispatcher::Job::Job(std::unique_ptr<BackgroundCompileTask> task)
+ : task(std::move(task)), state(Job::State::kPending) {}
LazyCompileDispatcher::Job::~Job() = default;
@@ -62,8 +75,6 @@ LazyCompileDispatcher::LazyCompileDispatcher(Isolate* isolate,
max_stack_size_(max_stack_size),
trace_compiler_dispatcher_(FLAG_trace_compiler_dispatcher),
idle_task_manager_(new CancelableTaskManager()),
- next_job_id_(0),
- shared_to_unoptimized_job_id_(isolate->heap()),
idle_task_scheduled_(false),
num_jobs_for_background_(0),
main_thread_blocking_on_job_(nullptr),
@@ -78,86 +89,145 @@ LazyCompileDispatcher::~LazyCompileDispatcher() {
CHECK(!job_handle_->IsValid());
}
-base::Optional<LazyCompileDispatcher::JobId> LazyCompileDispatcher::Enqueue(
- const ParseInfo* outer_parse_info, const AstRawString* function_name,
- const FunctionLiteral* function_literal) {
+namespace {
+
+// If the SharedFunctionInfo's UncompiledData has a job slot, then write into
+// it. Otherwise, allocate a new UncompiledData with a job slot, and then write
+// into that. Since we have two optional slots (preparse data and job), this
+// gets a little messy.
+void SetUncompiledDataJobPointer(LocalIsolate* isolate,
+ Handle<SharedFunctionInfo> shared_info,
+ Address job_address) {
+ UncompiledData uncompiled_data = shared_info->uncompiled_data();
+ switch (uncompiled_data.map(isolate).instance_type()) {
+ // The easy cases -- we already have a job slot, so can write into it and
+ // return.
+ case UNCOMPILED_DATA_WITH_PREPARSE_DATA_AND_JOB_TYPE:
+ UncompiledDataWithPreparseDataAndJob::cast(uncompiled_data)
+ .set_job(job_address);
+ break;
+ case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE:
+ UncompiledDataWithoutPreparseDataWithJob::cast(uncompiled_data)
+ .set_job(job_address);
+ break;
+
+ // Otherwise, we'll have to allocate a new UncompiledData (with or without
+ // preparse data as appropriate), set the job pointer on that, and update
+ // the SharedFunctionInfo to use the new UncompiledData
+ case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE: {
+ Handle<String> inferred_name(uncompiled_data.inferred_name(), isolate);
+ Handle<PreparseData> preparse_data(
+ UncompiledDataWithPreparseData::cast(uncompiled_data).preparse_data(),
+ isolate);
+ Handle<UncompiledDataWithPreparseDataAndJob> new_uncompiled_data =
+ isolate->factory()->NewUncompiledDataWithPreparseDataAndJob(
+ inferred_name, uncompiled_data.start_position(),
+ uncompiled_data.end_position(), preparse_data);
+
+ new_uncompiled_data->set_job(job_address);
+ shared_info->set_uncompiled_data(*new_uncompiled_data);
+ break;
+ }
+ case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE: {
+ DCHECK(uncompiled_data.IsUncompiledDataWithoutPreparseData());
+ Handle<String> inferred_name(uncompiled_data.inferred_name(), isolate);
+ Handle<UncompiledDataWithoutPreparseDataWithJob> new_uncompiled_data =
+ isolate->factory()->NewUncompiledDataWithoutPreparseDataWithJob(
+ inferred_name, uncompiled_data.start_position(),
+ uncompiled_data.end_position());
+
+ new_uncompiled_data->set_job(job_address);
+ shared_info->set_uncompiled_data(*new_uncompiled_data);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace
+
+void LazyCompileDispatcher::Enqueue(
+ LocalIsolate* isolate, Handle<SharedFunctionInfo> shared_info,
+ std::unique_ptr<Utf16CharacterStream> character_stream) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.LazyCompilerDispatcherEnqueue");
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileEnqueueOnDispatcher);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileEnqueueOnDispatcher);
- std::unique_ptr<Job> job = std::make_unique<Job>(new BackgroundCompileTask(
- outer_parse_info, function_name, function_literal,
+ Job* job = new Job(std::make_unique<BackgroundCompileTask>(
+ isolate_, shared_info, std::move(character_stream),
worker_thread_runtime_call_stats_, background_compile_timer_,
static_cast<int>(max_stack_size_)));
- JobMap::const_iterator it = InsertJob(std::move(job));
- JobId id = it->first;
- if (trace_compiler_dispatcher_) {
- PrintF(
- "LazyCompileDispatcher: enqueued job %zu for function literal id %d\n",
- id, function_literal->function_literal_id());
- }
+
+ SetUncompiledDataJobPointer(isolate, shared_info,
+ reinterpret_cast<Address>(job));
// Post a a background worker task to perform the compilation on the worker
// thread.
{
base::MutexGuard lock(&mutex_);
- pending_background_jobs_.insert(it->second.get());
- num_jobs_for_background_ += 1;
- VerifyBackgroundTaskCount(lock);
+ if (trace_compiler_dispatcher_) {
+ PrintF("LazyCompileDispatcher: enqueued job for ");
+ shared_info->ShortPrint();
+ PrintF("\n");
+ }
+
+#ifdef DEBUG
+ all_jobs_.insert(job);
+#endif
+ pending_background_jobs_.push_back(job);
+ NotifyAddedBackgroundJob(lock);
}
+ // This is not in NotifyAddedBackgroundJob to avoid being inside the mutex.
job_handle_->NotifyConcurrencyIncrease();
- return base::make_optional(id);
}
bool LazyCompileDispatcher::IsEnqueued(
Handle<SharedFunctionInfo> function) const {
- if (jobs_.empty()) return false;
- return GetJobFor(function) != jobs_.end();
-}
-
-bool LazyCompileDispatcher::IsEnqueued(JobId job_id) const {
- return jobs_.find(job_id) != jobs_.end();
-}
-
-void LazyCompileDispatcher::RegisterSharedFunctionInfo(
- JobId job_id, SharedFunctionInfo function) {
- DCHECK_NE(jobs_.find(job_id), jobs_.end());
-
- if (trace_compiler_dispatcher_) {
- PrintF("LazyCompileDispatcher: registering ");
- function.ShortPrint();
- PrintF(" with job id %zu\n", job_id);
- }
-
- // Make a global handle to the function.
- Handle<SharedFunctionInfo> function_handle = Handle<SharedFunctionInfo>::cast(
- isolate_->global_handles()->Create(function));
-
- // Register mapping.
- auto job_it = jobs_.find(job_id);
- DCHECK_NE(job_it, jobs_.end());
- Job* job = job_it->second.get();
- shared_to_unoptimized_job_id_.Insert(function_handle, job_id);
-
- {
- base::MutexGuard lock(&mutex_);
- job->function = function_handle;
- if (job->IsReadyToFinalize(lock)) {
- // Schedule an idle task to finalize job if it is ready.
- ScheduleIdleTaskFromAnyThread(lock);
- }
+ Job* job = nullptr;
+ Object function_data = function->function_data(kAcquireLoad);
+ if (function_data.IsUncompiledDataWithPreparseDataAndJob()) {
+ job = reinterpret_cast<Job*>(
+ UncompiledDataWithPreparseDataAndJob::cast(function_data).job());
+ } else if (function_data.IsUncompiledDataWithoutPreparseDataWithJob()) {
+ job = reinterpret_cast<Job*>(
+ UncompiledDataWithoutPreparseDataWithJob::cast(function_data).job());
}
+ return job != nullptr;
}
-void LazyCompileDispatcher::WaitForJobIfRunningOnBackground(Job* job) {
+void LazyCompileDispatcher::WaitForJobIfRunningOnBackground(
+ Job* job, const base::MutexGuard& lock) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.LazyCompilerDispatcherWaitForBackgroundJob");
RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileWaitForDispatcher);
- base::MutexGuard lock(&mutex_);
- if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
- num_jobs_for_background_ -= pending_background_jobs_.erase(job);
- VerifyBackgroundTaskCount(lock);
+ if (!job->is_running_on_background()) {
+ if (job->state == Job::State::kPending) {
+ DCHECK_EQ(std::count(pending_background_jobs_.begin(),
+ pending_background_jobs_.end(), job),
+ 1);
+
+ // TODO(leszeks): Remove from pending jobs without walking the whole
+ // vector.
+ pending_background_jobs_.erase(
+ std::remove(pending_background_jobs_.begin(),
+ pending_background_jobs_.end(), job));
+ job->state = Job::State::kPendingToRunOnForeground;
+ NotifyRemovedBackgroundJob(lock);
+ } else {
+ DCHECK_EQ(job->state, Job::State::kReadyToFinalize);
+ DCHECK_EQ(
+ std::count(finalizable_jobs_.begin(), finalizable_jobs_.end(), job),
+ 1);
+
+ // TODO(leszeks): Remove from finalizable jobs without walking the whole
+ // vector.
+ finalizable_jobs_.erase(
+ std::remove(finalizable_jobs_.begin(), finalizable_jobs_.end(), job));
+ job->state = Job::State::kFinalizingNow;
+ }
return;
}
DCHECK_NULL(main_thread_blocking_on_job_);
@@ -165,8 +235,16 @@ void LazyCompileDispatcher::WaitForJobIfRunningOnBackground(Job* job) {
while (main_thread_blocking_on_job_ != nullptr) {
main_thread_blocking_signal_.Wait(&mutex_);
}
- DCHECK(pending_background_jobs_.find(job) == pending_background_jobs_.end());
- DCHECK(running_background_jobs_.find(job) == running_background_jobs_.end());
+
+ DCHECK_EQ(job->state, Job::State::kReadyToFinalize);
+ DCHECK_EQ(std::count(finalizable_jobs_.begin(), finalizable_jobs_.end(), job),
+ 1);
+
+ // TODO(leszeks): Remove from finalizable jobs without walking the whole
+ // vector.
+ finalizable_jobs_.erase(
+ std::remove(finalizable_jobs_.begin(), finalizable_jobs_.end(), job));
+ job->state = Job::State::kFinalizingNow;
}
bool LazyCompileDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
@@ -179,42 +257,85 @@ bool LazyCompileDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
PrintF(" now\n");
}
- JobMap::const_iterator it = GetJobFor(function);
- CHECK(it != jobs_.end());
- Job* job = it->second.get();
- WaitForJobIfRunningOnBackground(job);
+ Job* job;
- if (!job->has_run) {
+ {
+ base::MutexGuard lock(&mutex_);
+ job = GetJobFor(function, lock);
+ WaitForJobIfRunningOnBackground(job, lock);
+ }
+
+ if (job->state == Job::State::kPendingToRunOnForeground) {
job->task->Run();
- job->has_run = true;
+ job->state = Job::State::kFinalizingNow;
+ }
+
+ if (DEBUG_BOOL) {
+ base::MutexGuard lock(&mutex_);
+ DCHECK_EQ(std::count(pending_background_jobs_.begin(),
+ pending_background_jobs_.end(), job),
+ 0);
+ DCHECK_EQ(
+ std::count(finalizable_jobs_.begin(), finalizable_jobs_.end(), job), 0);
+ DCHECK_EQ(job->state, Job::State::kFinalizingNow);
}
- DCHECK(job->IsReadyToFinalize(&mutex_));
- DCHECK(!job->aborted);
bool success = Compiler::FinalizeBackgroundCompileTask(
- job->task.get(), function, isolate_, Compiler::KEEP_EXCEPTION);
+ job->task.get(), isolate_, Compiler::KEEP_EXCEPTION);
+ job->state = Job::State::kFinalized;
DCHECK_NE(success, isolate_->has_pending_exception());
- RemoveJob(it);
+ DeleteJob(job);
+
+ // Opportunistically finalize all other jobs for a maximum time of
+ // kMaxOpportunisticFinalizeTimeMs.
+ double deadline_in_seconds = platform_->MonotonicallyIncreasingTime() +
+ kMaxOpportunisticFinalizeTimeMs / 1000.0;
+ while (deadline_in_seconds > platform_->MonotonicallyIncreasingTime()) {
+ if (!FinalizeSingleJob()) break;
+ }
+
return success;
}
-void LazyCompileDispatcher::AbortJob(JobId job_id) {
+void LazyCompileDispatcher::AbortJob(Handle<SharedFunctionInfo> shared_info) {
if (trace_compiler_dispatcher_) {
- PrintF("LazyCompileDispatcher: aborted job %zu\n", job_id);
+ PrintF("LazyCompileDispatcher: aborting job for ");
+ shared_info->ShortPrint();
+ PrintF("\n");
}
- JobMap::const_iterator job_it = jobs_.find(job_id);
- Job* job = job_it->second.get();
-
base::LockGuard<base::Mutex> lock(&mutex_);
- num_jobs_for_background_ -= pending_background_jobs_.erase(job);
- VerifyBackgroundTaskCount(lock);
- if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
- RemoveJob(job_it);
- } else {
+
+ Job* job = GetJobFor(shared_info, lock);
+ if (job->is_running_on_background()) {
// Job is currently running on the background thread, wait until it's done
// and remove job then.
- job->aborted = true;
+ job->state = Job::State::kAbortRequested;
+ } else {
+ if (job->state == Job::State::kPending) {
+ DCHECK_EQ(std::count(pending_background_jobs_.begin(),
+ pending_background_jobs_.end(), job),
+ 1);
+
+ pending_background_jobs_.erase(
+ std::remove(pending_background_jobs_.begin(),
+ pending_background_jobs_.end(), job));
+ job->state = Job::State::kAbortingNow;
+ NotifyRemovedBackgroundJob(lock);
+ } else if (job->state == Job::State::kReadyToFinalize) {
+ DCHECK_EQ(
+ std::count(finalizable_jobs_.begin(), finalizable_jobs_.end(), job),
+ 1);
+
+ finalizable_jobs_.erase(
+ std::remove(finalizable_jobs_.begin(), finalizable_jobs_.end(), job));
+ job->state = Job::State::kAbortingNow;
+ } else {
+ UNREACHABLE();
+ }
+ job->task->AbortFunction();
+ job->state = Job::State::kFinalized;
+ DeleteJob(job, lock);
}
}
@@ -224,23 +345,42 @@ void LazyCompileDispatcher::AbortAll() {
{
base::MutexGuard lock(&mutex_);
- DCHECK(running_background_jobs_.empty());
+ for (Job* job : pending_background_jobs_) {
+ job->task->AbortFunction();
+ job->state = Job::State::kFinalized;
+ DeleteJob(job, lock);
+ }
pending_background_jobs_.clear();
+ for (Job* job : finalizable_jobs_) {
+ job->task->AbortFunction();
+ job->state = Job::State::kFinalized;
+ DeleteJob(job, lock);
+ }
+ finalizable_jobs_.clear();
+ for (Job* job : jobs_to_dispose_) {
+ delete job;
+ }
+ jobs_to_dispose_.clear();
+
+ DCHECK_EQ(all_jobs_.size(), 0);
+ num_jobs_for_background_ = 0;
+ VerifyBackgroundTaskCount(lock);
}
- jobs_.clear();
- shared_to_unoptimized_job_id_.Clear();
idle_task_manager_->CancelAndWait();
}
-LazyCompileDispatcher::JobMap::const_iterator LazyCompileDispatcher::GetJobFor(
- Handle<SharedFunctionInfo> shared) const {
- JobId* job_id_ptr = shared_to_unoptimized_job_id_.Find(shared);
- JobMap::const_iterator job = jobs_.end();
- if (job_id_ptr) {
- job = jobs_.find(*job_id_ptr);
+LazyCompileDispatcher::Job* LazyCompileDispatcher::GetJobFor(
+ Handle<SharedFunctionInfo> shared, const base::MutexGuard&) const {
+ Object function_data = shared->function_data(kAcquireLoad);
+ if (function_data.IsUncompiledDataWithPreparseDataAndJob()) {
+ return reinterpret_cast<Job*>(
+ UncompiledDataWithPreparseDataAndJob::cast(function_data).job());
+ } else if (function_data.IsUncompiledDataWithoutPreparseDataWithJob()) {
+ return reinterpret_cast<Job*>(
+ UncompiledDataWithoutPreparseDataWithJob::cast(function_data).job());
}
- return job;
+ return nullptr;
}
void LazyCompileDispatcher::ScheduleIdleTaskFromAnyThread(
@@ -259,19 +399,29 @@ void LazyCompileDispatcher::ScheduleIdleTaskFromAnyThread(
void LazyCompileDispatcher::DoBackgroundWork(JobDelegate* delegate) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.LazyCompileDispatcherDoBackgroundWork");
+
+ WorkerThreadRuntimeCallStatsScope worker_thread_scope(
+ worker_thread_runtime_call_stats_);
+
+ LocalIsolate isolate(isolate_, ThreadKind::kBackground,
+ worker_thread_scope.Get());
+ UnparkedScope unparked_scope(&isolate);
+ LocalHandleScope handle_scope(&isolate);
+
+ ReusableUnoptimizedCompileState reusable_state(&isolate);
+
while (!delegate->ShouldYield()) {
Job* job = nullptr;
{
base::MutexGuard lock(&mutex_);
- if (!pending_background_jobs_.empty()) {
- auto it = pending_background_jobs_.begin();
- job = *it;
- pending_background_jobs_.erase(it);
- running_background_jobs_.insert(job);
- VerifyBackgroundTaskCount(lock);
- }
+
+ if (pending_background_jobs_.empty()) break;
+ job = pending_background_jobs_.back();
+ pending_background_jobs_.pop_back();
+ DCHECK_EQ(job->state, Job::State::kPending);
+
+ job->state = Job::State::kRunning;
}
- if (job == nullptr) break;
if (V8_UNLIKELY(block_for_testing_.Value())) {
block_for_testing_.SetValue(false);
@@ -282,31 +432,87 @@ void LazyCompileDispatcher::DoBackgroundWork(JobDelegate* delegate) {
PrintF("LazyCompileDispatcher: doing background work\n");
}
- job->task->Run();
+ job->task->Run(&isolate, &reusable_state);
{
base::MutexGuard lock(&mutex_);
- num_jobs_for_background_ -= running_background_jobs_.erase(job);
- VerifyBackgroundTaskCount(lock);
-
- job->has_run = true;
- if (job->IsReadyToFinalize(lock)) {
+ if (job->state == Job::State::kRunning) {
+ job->state = Job::State::kReadyToFinalize;
// Schedule an idle task to finalize the compilation on the main thread
// if the job has a shared function info registered.
- ScheduleIdleTaskFromAnyThread(lock);
+ } else {
+ DCHECK_EQ(job->state, Job::State::kAbortRequested);
+ job->state = Job::State::kAborted;
}
+ finalizable_jobs_.push_back(job);
+ NotifyRemovedBackgroundJob(lock);
if (main_thread_blocking_on_job_ == job) {
main_thread_blocking_on_job_ = nullptr;
main_thread_blocking_signal_.NotifyOne();
+ } else {
+ ScheduleIdleTaskFromAnyThread(lock);
+ }
+ }
+ }
+
+ while (!delegate->ShouldYield()) {
+ Job* job = nullptr;
+ {
+ base::MutexGuard lock(&job_dispose_mutex_);
+ if (jobs_to_dispose_.empty()) break;
+ job = jobs_to_dispose_.back();
+ jobs_to_dispose_.pop_back();
+ if (jobs_to_dispose_.empty()) {
+ num_jobs_for_background_--;
}
}
+ delete job;
}
// Don't touch |this| anymore after this point, as it might have been
// deleted.
}
+LazyCompileDispatcher::Job* LazyCompileDispatcher::PopSingleFinalizeJob() {
+ base::MutexGuard lock(&mutex_);
+
+ if (finalizable_jobs_.empty()) return nullptr;
+
+ Job* job = finalizable_jobs_.back();
+ finalizable_jobs_.pop_back();
+ DCHECK(job->state == Job::State::kReadyToFinalize ||
+ job->state == Job::State::kAborted);
+ if (job->state == Job::State::kReadyToFinalize) {
+ job->state = Job::State::kFinalizingNow;
+ } else {
+ DCHECK_EQ(job->state, Job::State::kAborted);
+ job->state = Job::State::kAbortingNow;
+ }
+ return job;
+}
+
+bool LazyCompileDispatcher::FinalizeSingleJob() {
+ Job* job = PopSingleFinalizeJob();
+ if (job == nullptr) return false;
+
+ if (trace_compiler_dispatcher_) {
+ PrintF("LazyCompileDispatcher: idle finalizing job\n");
+ }
+
+ if (job->state == Job::State::kFinalizingNow) {
+ HandleScope scope(isolate_);
+ Compiler::FinalizeBackgroundCompileTask(job->task.get(), isolate_,
+ Compiler::CLEAR_EXCEPTION);
+ } else {
+ DCHECK_EQ(job->state, Job::State::kAbortingNow);
+ job->task->AbortFunction();
+ }
+ job->state = Job::State::kFinalized;
+ DeleteJob(job);
+ return true;
+}
+
void LazyCompileDispatcher::DoIdleWork(double deadline_in_seconds) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.LazyCompilerDispatcherDoIdleWork");
@@ -322,30 +528,8 @@ void LazyCompileDispatcher::DoIdleWork(double deadline_in_seconds) {
}
while (deadline_in_seconds > platform_->MonotonicallyIncreasingTime()) {
// Find a job which is pending finalization and has a shared function info
- LazyCompileDispatcher::JobMap::const_iterator it;
- {
- base::MutexGuard lock(&mutex_);
- for (it = jobs_.cbegin(); it != jobs_.cend(); ++it) {
- if (it->second->IsReadyToFinalize(lock)) break;
- }
- // Since we hold the lock here, we can be sure no jobs have become ready
- // for finalization while we looped through the list.
- if (it == jobs_.cend()) return;
-
- DCHECK(it->second->IsReadyToFinalize(lock));
- DCHECK_EQ(running_background_jobs_.find(it->second.get()),
- running_background_jobs_.end());
- DCHECK_EQ(pending_background_jobs_.find(it->second.get()),
- pending_background_jobs_.end());
- }
-
- Job* job = it->second.get();
- if (!job->aborted) {
- Compiler::FinalizeBackgroundCompileTask(
- job->task.get(), job->function.ToHandleChecked(), isolate_,
- Compiler::CLEAR_EXCEPTION);
- }
- RemoveJob(it);
+ auto there_was_a_job = FinalizeSingleJob();
+ if (!there_was_a_job) return;
}
// We didn't return above so there still might be jobs to finalize.
@@ -355,37 +539,61 @@ void LazyCompileDispatcher::DoIdleWork(double deadline_in_seconds) {
}
}
-LazyCompileDispatcher::JobMap::const_iterator LazyCompileDispatcher::InsertJob(
- std::unique_ptr<Job> job) {
- bool added;
- JobMap::const_iterator it;
- std::tie(it, added) =
- jobs_.insert(std::make_pair(next_job_id_++, std::move(job)));
- DCHECK(added);
- return it;
+void LazyCompileDispatcher::DeleteJob(Job* job) {
+ DCHECK(job->state == Job::State::kFinalized);
+#ifdef DEBUG
+ {
+ base::MutexGuard lock(&mutex_);
+ all_jobs_.erase(job);
+ }
+#endif
+ delete job;
}
-LazyCompileDispatcher::JobMap::const_iterator LazyCompileDispatcher::RemoveJob(
- LazyCompileDispatcher::JobMap::const_iterator it) {
- Job* job = it->second.get();
-
- DCHECK_EQ(running_background_jobs_.find(job), running_background_jobs_.end());
- DCHECK_EQ(pending_background_jobs_.find(job), pending_background_jobs_.end());
-
- // Delete SFI associated with job if its been registered.
- Handle<SharedFunctionInfo> function;
- if (job->function.ToHandle(&function)) {
- GlobalHandles::Destroy(function.location());
+void LazyCompileDispatcher::DeleteJob(Job* job, const base::MutexGuard&) {
+ DCHECK(job->state == Job::State::kFinalized);
+#ifdef DEBUG
+ all_jobs_.erase(job);
+#endif
+ base::MutexGuard lock(&job_dispose_mutex_);
+ jobs_to_dispose_.push_back(job);
+ if (jobs_to_dispose_.size() == 1) {
+ num_jobs_for_background_++;
}
-
- // Delete job.
- return jobs_.erase(it);
}
#ifdef DEBUG
void LazyCompileDispatcher::VerifyBackgroundTaskCount(const base::MutexGuard&) {
+ size_t pending_jobs = 0;
+ size_t running_jobs = 0;
+ size_t finalizable_jobs = 0;
+
+ for (Job* job : all_jobs_) {
+ switch (job->state) {
+ case Job::State::kPending:
+ pending_jobs++;
+ break;
+ case Job::State::kRunning:
+ case Job::State::kAbortRequested:
+ running_jobs++;
+ break;
+ case Job::State::kReadyToFinalize:
+ case Job::State::kAborted:
+ finalizable_jobs++;
+ break;
+ case Job::State::kPendingToRunOnForeground:
+ case Job::State::kFinalizingNow:
+ case Job::State::kAbortingNow:
+ case Job::State::kFinalized:
+ // Ignore.
+ break;
+ }
+ }
+
+ CHECK_EQ(pending_background_jobs_.size(), pending_jobs);
+ CHECK_EQ(finalizable_jobs_.size(), finalizable_jobs);
CHECK_EQ(num_jobs_for_background_.load(),
- running_background_jobs_.size() + pending_background_jobs_.size());
+ pending_jobs + running_jobs + (jobs_to_dispose_.empty() ? 0 : 1));
}
#endif
diff --git a/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.h
index 43c4f6c913..423e700e7f 100644
--- a/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/lazy-compile-dispatcher.h
@@ -20,6 +20,7 @@
#include "src/common/globals.h"
#include "src/handles/maybe-handles.h"
#include "src/utils/identity-map.h"
+#include "src/utils/locked-queue.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
@@ -34,11 +35,14 @@ class AstValueFactory;
class BackgroundCompileTask;
class CancelableTaskManager;
class UnoptimizedCompileJob;
+class UnoptimizedCompileState;
class FunctionLiteral;
class Isolate;
class ParseInfo;
+class ProducedPreparseData;
class SharedFunctionInfo;
class TimedHistogram;
+class Utf16CharacterStream;
class WorkerThreadRuntimeCallStats;
class Zone;
@@ -81,15 +85,8 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
LazyCompileDispatcher& operator=(const LazyCompileDispatcher&) = delete;
~LazyCompileDispatcher();
- base::Optional<JobId> Enqueue(const ParseInfo* outer_parse_info,
- const AstRawString* function_name,
- const FunctionLiteral* function_literal);
-
- // Registers the given |function| with the compilation job |job_id|.
- void RegisterSharedFunctionInfo(JobId job_id, SharedFunctionInfo function);
-
- // Returns true if there is a pending job with the given id.
- bool IsEnqueued(JobId job_id) const;
+ void Enqueue(LocalIsolate* isolate, Handle<SharedFunctionInfo> shared_info,
+ std::unique_ptr<Utf16CharacterStream> character_stream);
// Returns true if there is a pending job registered for the given function.
bool IsEnqueued(Handle<SharedFunctionInfo> function) const;
@@ -98,56 +95,87 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
// possible). Returns true if the compile job was successful.
bool FinishNow(Handle<SharedFunctionInfo> function);
- // Aborts compilation job |job_id|.
- void AbortJob(JobId job_id);
+ // Aborts compilation job for the given function.
+ void AbortJob(Handle<SharedFunctionInfo> function);
// Aborts all jobs, blocking until all jobs are aborted.
void AbortAll();
private:
- FRIEND_TEST(LazyCompilerDispatcherTest, IdleTaskNoIdleTime);
- FRIEND_TEST(LazyCompilerDispatcherTest, IdleTaskSmallIdleTime);
- FRIEND_TEST(LazyCompilerDispatcherTest, FinishNowWithWorkerTask);
- FRIEND_TEST(LazyCompilerDispatcherTest, AbortJobNotStarted);
- FRIEND_TEST(LazyCompilerDispatcherTest, AbortJobAlreadyStarted);
- FRIEND_TEST(LazyCompilerDispatcherTest, AsyncAbortAllPendingWorkerTask);
- FRIEND_TEST(LazyCompilerDispatcherTest, AsyncAbortAllRunningWorkerTask);
- FRIEND_TEST(LazyCompilerDispatcherTest, CompileMultipleOnBackgroundThread);
+ FRIEND_TEST(LazyCompileDispatcherTest, IdleTaskNoIdleTime);
+ FRIEND_TEST(LazyCompileDispatcherTest, IdleTaskSmallIdleTime);
+ FRIEND_TEST(LazyCompileDispatcherTest, FinishNowWithWorkerTask);
+ FRIEND_TEST(LazyCompileDispatcherTest, AbortJobNotStarted);
+ FRIEND_TEST(LazyCompileDispatcherTest, AbortJobAlreadyStarted);
+ FRIEND_TEST(LazyCompileDispatcherTest, AsyncAbortAllPendingWorkerTask);
+ FRIEND_TEST(LazyCompileDispatcherTest, AsyncAbortAllRunningWorkerTask);
+ FRIEND_TEST(LazyCompileDispatcherTest, CompileMultipleOnBackgroundThread);
// JobTask for PostJob API.
class JobTask;
struct Job {
- explicit Job(BackgroundCompileTask* task_arg);
+ enum class State {
+ // Background thread states (Enqueue + DoBackgroundWork)
+ // ---
+
+ // In the pending task queue.
+ kPending,
+ // Currently running on a background thread.
+ kRunning,
+ kAbortRequested, // ... but we want to drop the result.
+ // In the finalizable task queue.
+ kReadyToFinalize,
+ kAborted,
+
+ // Main thread states (FinishNow and FinalizeSingleJob)
+ // ---
+
+ // Popped off the pending task queue.
+ kPendingToRunOnForeground,
+ // Popped off the finalizable task queue.
+ kFinalizingNow,
+ kAbortingNow, // ... and we want to abort
+
+ // Finished finalizing, ready for deletion.
+ kFinalized,
+ };
+
+ explicit Job(std::unique_ptr<BackgroundCompileTask> task);
~Job();
- bool IsReadyToFinalize(const base::MutexGuard&) {
- return has_run && (!function.is_null() || aborted);
- }
-
- bool IsReadyToFinalize(base::Mutex* mutex) {
- base::MutexGuard lock(mutex);
- return IsReadyToFinalize(lock);
+ bool is_running_on_background() const {
+ return state == State::kRunning || state == State::kAbortRequested;
}
std::unique_ptr<BackgroundCompileTask> task;
- MaybeHandle<SharedFunctionInfo> function;
- bool has_run;
- bool aborted;
+ State state = State::kPending;
};
- using JobMap = std::map<JobId, std::unique_ptr<Job>>;
- using SharedToJobIdMap = IdentityMap<JobId, FreeStoreAllocationPolicy>;
+ using SharedToJobMap = IdentityMap<Job*, FreeStoreAllocationPolicy>;
- void WaitForJobIfRunningOnBackground(Job* job);
- JobMap::const_iterator GetJobFor(Handle<SharedFunctionInfo> shared) const;
+ void WaitForJobIfRunningOnBackground(Job* job, const base::MutexGuard&);
+ Job* GetJobFor(Handle<SharedFunctionInfo> shared,
+ const base::MutexGuard&) const;
+ Job* PopSingleFinalizeJob();
void ScheduleIdleTaskFromAnyThread(const base::MutexGuard&);
+ bool FinalizeSingleJob();
void DoBackgroundWork(JobDelegate* delegate);
void DoIdleWork(double deadline_in_seconds);
- // Returns iterator to the inserted job.
- JobMap::const_iterator InsertJob(std::unique_ptr<Job> job);
- // Returns iterator following the removed job.
- JobMap::const_iterator RemoveJob(JobMap::const_iterator job);
+
+ // DeleteJob without the mutex held.
+ void DeleteJob(Job* job);
+ // DeleteJob with the mutex already held.
+ void DeleteJob(Job* job, const base::MutexGuard&);
+
+ void NotifyAddedBackgroundJob(const base::MutexGuard& lock) {
+ ++num_jobs_for_background_;
+ VerifyBackgroundTaskCount(lock);
+ }
+ void NotifyRemovedBackgroundJob(const base::MutexGuard& lock) {
+ --num_jobs_for_background_;
+ VerifyBackgroundTaskCount(lock);
+ }
#ifdef DEBUG
void VerifyBackgroundTaskCount(const base::MutexGuard&);
@@ -169,37 +197,37 @@ class V8_EXPORT_PRIVATE LazyCompileDispatcher {
std::unique_ptr<CancelableTaskManager> idle_task_manager_;
- // Id for next job to be added
- JobId next_job_id_;
-
- // Mapping from job_id to job.
- JobMap jobs_;
-
- // Mapping from SharedFunctionInfo to the corresponding unoptimized
- // compilation's JobId;
- SharedToJobIdMap shared_to_unoptimized_job_id_;
-
// The following members can be accessed from any thread. Methods need to hold
// the mutex |mutex_| while accessing them.
- base::Mutex mutex_;
+ mutable base::Mutex mutex_;
// True if an idle task is scheduled to be run.
bool idle_task_scheduled_;
// The set of jobs that can be run on a background thread.
- std::unordered_set<Job*> pending_background_jobs_;
+ std::vector<Job*> pending_background_jobs_;
- // The set of jobs currently being run on background threads.
- std::unordered_set<Job*> running_background_jobs_;
+ // The set of jobs that can be finalized on the main thread.
+ std::vector<Job*> finalizable_jobs_;
- // The total number of jobs, pending and running.
+ // The total number of jobs ready to execute on background, both those pending
+ // and those currently running.
std::atomic<size_t> num_jobs_for_background_;
+#ifdef DEBUG
+ // The set of all allocated jobs, used for verification of the various queues
+ // and counts.
+ std::unordered_set<Job*> all_jobs_;
+#endif
+
// If not nullptr, then the main thread waits for the task processing
// this job, and blocks on the ConditionVariable main_thread_blocking_signal_.
Job* main_thread_blocking_on_job_;
base::ConditionVariable main_thread_blocking_signal_;
+ mutable base::Mutex job_dispose_mutex_;
+ std::vector<Job*> jobs_to_dispose_;
+
// Test support.
base::AtomicValue<bool> block_for_testing_;
base::Semaphore semaphore_for_testing_;
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index fda0727dd1..f929b98b0c 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -421,8 +421,13 @@ FieldAccess AccessBuilder::ForJSTypedArrayExternalPointer() {
JSTypedArray::kExternalPointerOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
+#ifdef V8_CAGED_POINTERS
+ Type::CagedPointer(),
+ MachineType::CagedPointer(),
+#else
Type::ExternalPointer(),
MachineType::Pointer(),
+#endif
kNoWriteBarrier,
ConstFieldInfo::None(),
false,
@@ -437,8 +442,13 @@ FieldAccess AccessBuilder::ForJSDataViewDataPointer() {
JSDataView::kDataPointerOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
+#ifdef V8_CAGED_POINTERS
+ Type::CagedPointer(),
+ MachineType::CagedPointer(),
+#else
Type::ExternalPointer(),
MachineType::Pointer(),
+#endif
kNoWriteBarrier,
ConstFieldInfo::None(),
false,
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 25474fb91a..35a7838004 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -431,8 +431,9 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
}
PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
- MapRef receiver_map, MapRef map, base::Optional<JSObjectRef> holder,
- InternalIndex descriptor, AccessMode access_mode) const {
+ MapRef receiver_map, MapRef map, NameRef name,
+ base::Optional<JSObjectRef> holder, InternalIndex descriptor,
+ AccessMode access_mode) const {
DCHECK(descriptor.is_found());
// TODO(jgruber,v8:7790): Use DescriptorArrayRef instead.
Handle<DescriptorArray> descriptors = map.instance_descriptors().object();
@@ -449,7 +450,10 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
}
FieldIndex field_index = FieldIndex::ForPropertyIndex(*map.object(), index,
details_representation);
- Type field_type = Type::NonInternal();
+ // Private brands are used when loading private methods, which are stored in a
+ // BlockContext, an internal object.
+ Type field_type = name.object()->IsPrivateBrand() ? Type::OtherInternal()
+ : Type::NonInternal();
base::Optional<MapRef> field_map;
ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone());
@@ -797,7 +801,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// Don't bother optimizing stores to read-only properties.
if (details.IsReadOnly()) return Invalid();
- if (details.kind() == kData && holder.has_value()) {
+ if (details.kind() == PropertyKind::kData && holder.has_value()) {
// This is a store to a property not found on the receiver but on a
// prototype. According to ES6 section 9.1.9 [[Set]], we need to
// create a new data property on the receiver. We can still optimize
@@ -841,17 +845,17 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
return Invalid();
}
if (details.location() == PropertyLocation::kField) {
- if (details.kind() == kData) {
- return ComputeDataFieldAccessInfo(receiver_map, map, holder, index,
- access_mode);
+ if (details.kind() == PropertyKind::kData) {
+ return ComputeDataFieldAccessInfo(receiver_map, map, name, holder,
+ index, access_mode);
} else {
- DCHECK_EQ(kAccessor, details.kind());
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
// TODO(turbofan): Add support for general accessors?
return Invalid();
}
} else {
DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
- DCHECK_EQ(kAccessor, details.kind());
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
return ComputeAccessorDescriptorAccessInfo(receiver_map, name, map,
holder, index, access_mode);
}
@@ -1124,9 +1128,10 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
MapRef map, NameRef name, base::Optional<JSObjectRef> holder,
PropertyAttributes attrs) const {
// Check if the {map} has a data transition with the given {name}.
- Map transition = TransitionsAccessor(isolate(), map.object(),
- broker()->is_concurrent_inlining())
- .SearchTransition(*name.object(), kData, attrs);
+ Map transition =
+ TransitionsAccessor(isolate(), map.object(),
+ broker()->is_concurrent_inlining())
+ .SearchTransition(*name.object(), PropertyKind::kData, attrs);
if (transition.is_null()) return Invalid();
base::Optional<MapRef> maybe_transition_map =
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index e1865a96ed..827c253e1f 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -289,8 +289,9 @@ class AccessInfoFactory final {
base::Optional<JSObjectRef> holder,
PropertyAttributes attrs) const;
PropertyAccessInfo ComputeDataFieldAccessInfo(
- MapRef receiver_map, MapRef map, base::Optional<JSObjectRef> holder,
- InternalIndex descriptor, AccessMode access_mode) const;
+ MapRef receiver_map, MapRef map, NameRef name,
+ base::Optional<JSObjectRef> holder, InternalIndex descriptor,
+ AccessMode access_mode) const;
PropertyAccessInfo ComputeAccessorDescriptorAccessInfo(
MapRef receiver_map, NameRef name, MapRef map,
base::Optional<JSObjectRef> holder, InternalIndex descriptor,
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 31aafbcb97..7f34b6594e 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -840,13 +840,9 @@ void InstructionSelector::VisitLoad(Node* node) {
immediate_mode = kLoadStoreImm64;
break;
case MachineRepresentation::kCagedPointer:
-#ifdef V8_CAGED_POINTERS
opcode = kArm64LdrDecodeCagedPointer;
immediate_mode = kLoadStoreImm64;
break;
-#else
- UNREACHABLE();
-#endif
case MachineRepresentation::kSimd128:
opcode = kArm64LdrQ;
immediate_mode = kNoImmediate;
@@ -948,13 +944,9 @@ void InstructionSelector::VisitStore(Node* node) {
COMPRESS_POINTERS_BOOL ? kLoadStoreImm32 : kLoadStoreImm64;
break;
case MachineRepresentation::kCagedPointer:
-#ifdef V8_CAGED_POINTERS
opcode = kArm64StrEncodeCagedPointer;
immediate_mode = kLoadStoreImm64;
break;
-#else
- UNREACHABLE();
-#endif
case MachineRepresentation::kWord64:
opcode = kArm64Str;
immediate_mode = kLoadStoreImm64;
@@ -3474,28 +3466,32 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
-#define SIMD_UNOP_LIST(V) \
- V(F64x2ConvertLowI32x4S, kArm64F64x2ConvertLowI32x4S) \
- V(F64x2ConvertLowI32x4U, kArm64F64x2ConvertLowI32x4U) \
- V(F64x2PromoteLowF32x4, kArm64F64x2PromoteLowF32x4) \
- V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
- V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
- V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
- V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
- V(F32x4DemoteF64x2Zero, kArm64F32x4DemoteF64x2Zero) \
- V(I64x2BitMask, kArm64I64x2BitMask) \
- V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
- V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
- V(I32x4BitMask, kArm64I32x4BitMask) \
- V(I32x4TruncSatF64x2SZero, kArm64I32x4TruncSatF64x2SZero) \
- V(I32x4TruncSatF64x2UZero, kArm64I32x4TruncSatF64x2UZero) \
- V(I16x8BitMask, kArm64I16x8BitMask) \
- V(I8x16BitMask, kArm64I8x16BitMask) \
- V(S128Not, kArm64S128Not) \
- V(V128AnyTrue, kArm64V128AnyTrue) \
- V(I64x2AllTrue, kArm64I64x2AllTrue) \
- V(I32x4AllTrue, kArm64I32x4AllTrue) \
- V(I16x8AllTrue, kArm64I16x8AllTrue) \
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2ConvertLowI32x4S, kArm64F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kArm64F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kArm64F64x2PromoteLowF32x4) \
+ V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
+ V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
+ V(F32x4DemoteF64x2Zero, kArm64F32x4DemoteF64x2Zero) \
+ V(I64x2BitMask, kArm64I64x2BitMask) \
+ V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
+ V(I32x4RelaxedTruncF32x4S, kArm64I32x4SConvertF32x4) \
+ V(I32x4RelaxedTruncF32x4U, kArm64I32x4UConvertF32x4) \
+ V(I32x4BitMask, kArm64I32x4BitMask) \
+ V(I32x4TruncSatF64x2SZero, kArm64I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kArm64I32x4TruncSatF64x2UZero) \
+ V(I32x4RelaxedTruncF64x2SZero, kArm64I32x4TruncSatF64x2SZero) \
+ V(I32x4RelaxedTruncF64x2UZero, kArm64I32x4TruncSatF64x2UZero) \
+ V(I16x8BitMask, kArm64I16x8BitMask) \
+ V(I8x16BitMask, kArm64I8x16BitMask) \
+ V(S128Not, kArm64S128Not) \
+ V(V128AnyTrue, kArm64V128AnyTrue) \
+ V(I64x2AllTrue, kArm64I64x2AllTrue) \
+ V(I32x4AllTrue, kArm64I32x4AllTrue) \
+ V(I16x8AllTrue, kArm64I16x8AllTrue) \
V(I8x16AllTrue, kArm64I8x16AllTrue)
#define SIMD_UNOP_LANE_SIZE_LIST(V) \
@@ -3554,11 +3550,15 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Add, kArm64FAdd, 64) \
V(F64x2Sub, kArm64FSub, 64) \
V(F64x2Div, kArm64FDiv, 64) \
+ V(F64x2RelaxedMin, kArm64FMin, 64) \
+ V(F64x2RelaxedMax, kArm64FMax, 64) \
V(F32x4Min, kArm64FMin, 32) \
V(F32x4Max, kArm64FMax, 32) \
V(F32x4Add, kArm64FAdd, 32) \
V(F32x4Sub, kArm64FSub, 32) \
V(F32x4Div, kArm64FDiv, 32) \
+ V(F32x4RelaxedMin, kArm64FMin, 32) \
+ V(F32x4RelaxedMax, kArm64FMax, 32) \
V(I64x2Sub, kArm64ISub, 64) \
V(I32x4GtU, kArm64IGtU, 32) \
V(I32x4GeU, kArm64IGeU, 32) \
@@ -4005,6 +4005,22 @@ void InstructionSelector::VisitS128Select(Node* node) {
g.UseRegister(node->InputAt(2)));
}
+void InstructionSelector::VisitI8x16RelaxedLaneSelect(Node* node) {
+ VisitS128Select(node);
+}
+
+void InstructionSelector::VisitI16x8RelaxedLaneSelect(Node* node) {
+ VisitS128Select(node);
+}
+
+void InstructionSelector::VisitI32x4RelaxedLaneSelect(Node* node) {
+ VisitS128Select(node);
+}
+
+void InstructionSelector::VisitI64x2RelaxedLaneSelect(Node* node) {
+ VisitS128Select(node);
+}
+
#define VISIT_SIMD_QFMOP(op) \
void InstructionSelector::Visit##op(Node* node) { \
Arm64OperandGenerator g(this); \
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 7e66120321..367a5ae38b 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -980,8 +980,9 @@ Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
data->SetSharedFunctionInfo(Smi::zero());
}
- Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
- static_cast<int>(deoptimization_literals_.size()), AllocationType::kOld);
+ Handle<DeoptimizationLiteralArray> literals =
+ isolate()->factory()->NewDeoptimizationLiteralArray(
+ static_cast<int>(deoptimization_literals_.size()));
for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
Handle<Object> object = deoptimization_literals_[i].Reify(isolate());
CHECK(!object.is_null());
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index ad2dfcbefd..84cb574821 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -1759,12 +1759,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movddup(i.OutputSimd128Register(), i.InputDoubleRegister(0));
break;
}
- case kF64x2ExtractLane: {
+ case kIA32F64x2ExtractLane: {
__ F64x2ExtractLane(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputUint8(1));
break;
}
- case kF64x2ReplaceLane: {
+ case kIA32F64x2ReplaceLane: {
__ F64x2ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputDoubleRegister(2), i.InputInt8(1));
break;
@@ -1823,12 +1823,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kIA32F64x2Pmin: {
+ case kIA32F64x2Qfma: {
+ __ F64x2Qfma(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
+ break;
+ }
+ case kIA32F64x2Qfms: {
+ __ F64x2Qfms(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
+ break;
+ }
+ case kIA32Minpd: {
__ Minpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kIA32F64x2Pmax: {
+ case kIA32Maxpd: {
__ Maxpd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -2174,12 +2186,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kIA32F32x4Pmin: {
+ case kIA32F32x4Qfma: {
+ __ F32x4Qfma(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
+ break;
+ }
+ case kIA32F32x4Qfms: {
+ __ F32x4Qfms(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
+ break;
+ }
+ case kIA32Minps: {
__ Minps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kIA32F32x4Pmax: {
+ case kIA32Maxps: {
__ Maxps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -3435,6 +3459,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
break;
}
+ case kIA32Pblendvb: {
+ __ Pblendvb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2));
+ break;
+ }
+ case kIA32I32x4TruncF64x2UZero: {
+ __ I32x4TruncF64x2UZero(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), i.TempRegister(0),
+ kScratchDoubleReg);
+ break;
+ }
+ case kIA32I32x4TruncF32x4U: {
+ __ I32x4TruncF32x4U(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.TempRegister(0), kScratchDoubleReg);
+ break;
+ }
+ case kIA32Cvttps2dq: {
+ __ Cvttps2dq(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kIA32Cvttpd2dq: {
+ __ Cvttpd2dq(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kIA32Word32AtomicPairLoad: {
__ movq(kScratchDoubleReg, i.MemoryOperand());
__ Pextrd(i.OutputRegister(0), kScratchDoubleReg, 0);
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 5fd20193e4..8125f25b21 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -102,12 +102,17 @@ namespace compiler {
V(IA32BitcastFI) \
V(IA32BitcastIF) \
V(IA32Lea) \
+ V(IA32Pblendvb) \
V(IA32Push) \
V(IA32Poke) \
V(IA32Peek) \
+ V(IA32Cvttps2dq) \
+ V(IA32Cvttpd2dq) \
+ V(IA32I32x4TruncF32x4U) \
+ V(IA32I32x4TruncF64x2UZero) \
V(IA32F64x2Splat) \
- V(F64x2ExtractLane) \
- V(F64x2ReplaceLane) \
+ V(IA32F64x2ExtractLane) \
+ V(IA32F64x2ReplaceLane) \
V(IA32F64x2Sqrt) \
V(IA32F64x2Add) \
V(IA32F64x2Sub) \
@@ -119,8 +124,10 @@ namespace compiler {
V(IA32F64x2Ne) \
V(IA32F64x2Lt) \
V(IA32F64x2Le) \
- V(IA32F64x2Pmin) \
- V(IA32F64x2Pmax) \
+ V(IA32F64x2Qfma) \
+ V(IA32F64x2Qfms) \
+ V(IA32Minpd) \
+ V(IA32Maxpd) \
V(IA32F64x2Round) \
V(IA32F64x2ConvertLowI32x4S) \
V(IA32F64x2ConvertLowI32x4U) \
@@ -166,8 +173,10 @@ namespace compiler {
V(IA32F32x4Ne) \
V(IA32F32x4Lt) \
V(IA32F32x4Le) \
- V(IA32F32x4Pmin) \
- V(IA32F32x4Pmax) \
+ V(IA32F32x4Qfma) \
+ V(IA32F32x4Qfms) \
+ V(IA32Minps) \
+ V(IA32Maxps) \
V(IA32F32x4Round) \
V(IA32F32x4DemoteF64x2Zero) \
V(IA32I32x4Splat) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 01e4f8faa8..4c95f05fdc 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -89,9 +89,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
+ case kIA32Pblendvb:
+ case kIA32Cvttps2dq:
+ case kIA32Cvttpd2dq:
+ case kIA32I32x4TruncF32x4U:
+ case kIA32I32x4TruncF64x2UZero:
case kIA32F64x2Splat:
- case kF64x2ExtractLane:
- case kF64x2ReplaceLane:
+ case kIA32F64x2ExtractLane:
+ case kIA32F64x2ReplaceLane:
case kIA32F64x2Sqrt:
case kIA32F64x2Add:
case kIA32F64x2Sub:
@@ -103,8 +108,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F64x2Ne:
case kIA32F64x2Lt:
case kIA32F64x2Le:
- case kIA32F64x2Pmin:
- case kIA32F64x2Pmax:
+ case kIA32F64x2Qfma:
+ case kIA32F64x2Qfms:
+ case kIA32Minpd:
+ case kIA32Maxpd:
case kIA32F64x2Round:
case kIA32F64x2ConvertLowI32x4S:
case kIA32F64x2ConvertLowI32x4U:
@@ -150,8 +157,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F32x4Ne:
case kIA32F32x4Lt:
case kIA32F32x4Le:
- case kIA32F32x4Pmin:
- case kIA32F32x4Pmax:
+ case kIA32F32x4Qfma:
+ case kIA32F32x4Qfms:
+ case kIA32Minps:
+ case kIA32Maxps:
case kIA32F32x4Round:
case kIA32F32x4DemoteF64x2Zero:
case kIA32I32x4Splat:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index 1896537d8d..6f92f491e0 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -330,10 +330,14 @@ void VisitRROFloat(InstructionSelector* selector, Node* node,
}
}
+// For float unary operations. Also allocates a temporary general register for
+// used in external operands. If a temp is not required, use VisitRRSimd (since
+// float and SIMD registers are the same on IA32.
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
ArchOpcode opcode) {
IA32OperandGenerator g(selector);
InstructionOperand temps[] = {g.TempRegister()};
+ // No need for unique because inputs are float but temp is general.
if (selector->IsSupported(AVX)) {
selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(input),
arraysize(temps), temps);
@@ -428,6 +432,14 @@ void VisitRROSimdShift(InstructionSelector* selector, Node* node,
}
}
+void VisitRRRR(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ IA32OperandGenerator g(selector);
+ selector->Emit(
+ opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
+}
+
void VisitI8x16Shift(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
IA32OperandGenerator g(selector);
@@ -1457,7 +1469,7 @@ void InstructionSelector::EmitPrepareArguments(
stack_decrement = 0;
if (g.CanBeImmediate(input.node)) {
Emit(kIA32Push, g.NoOutput(), decrement, g.UseImmediate(input.node));
- } else if (IsSupported(ATOM) ||
+ } else if (IsSupported(INTEL_ATOM) ||
sequence()->IsFP(GetVirtualRegister(input.node))) {
// TODO(bbudge): IA32Push cannot handle stack->stack double moves
// because there is no way to encode fixed double slots.
@@ -2445,7 +2457,7 @@ void InstructionSelector::VisitF64x2Splat(Node* node) {
}
void InstructionSelector::VisitF64x2ExtractLane(Node* node) {
- VisitRRISimd(this, node, kF64x2ExtractLane, kF64x2ExtractLane);
+ VisitRRISimd(this, node, kIA32F64x2ExtractLane, kIA32F64x2ExtractLane);
}
void InstructionSelector::VisitI64x2SplatI32Pair(Node* node) {
@@ -2621,7 +2633,7 @@ void InstructionSelector::VisitF64x2ReplaceLane(Node* node) {
// When no-AVX, define dst == src to save a move.
InstructionOperand dst =
IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
- Emit(kF64x2ReplaceLane, dst, g.UseRegister(node->InputAt(0)),
+ Emit(kIA32F64x2ReplaceLane, dst, g.UseRegister(node->InputAt(0)),
g.UseImmediate(lane), g.UseRegister(node->InputAt(1)));
}
@@ -3010,13 +3022,18 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
void InstructionSelector::VisitI8x16Swizzle(Node* node) {
InstructionCode op = kIA32I8x16Swizzle;
- auto m = V128ConstMatcher(node->InputAt(1));
- if (m.HasResolvedValue()) {
- // If the indices vector is a const, check if they are in range, or if the
- // top bit is set, then we can avoid the paddusb in the codegen and simply
- // emit a pshufb.
- auto imms = m.ResolvedValue().immediate();
- op |= MiscField::encode(wasm::SimdSwizzle::AllInRangeOrTopBitSet(imms));
+ bool relaxed = OpParameter<bool>(node->op());
+ if (relaxed) {
+ op |= MiscField::encode(true);
+ } else {
+ auto m = V128ConstMatcher(node->InputAt(1));
+ if (m.HasResolvedValue()) {
+ // If the indices vector is a const, check if they are in range, or if the
+ // top bit is set, then we can avoid the paddusb in the codegen and simply
+ // emit a pshufb.
+ auto imms = m.ResolvedValue().immediate();
+ op |= MiscField::encode(wasm::SimdSwizzle::AllInRangeOrTopBitSet(imms));
+ }
}
IA32OperandGenerator g(this);
@@ -3032,33 +3049,56 @@ void InstructionSelector::VisitI8x16Swizzle(Node* node) { UNREACHABLE(); }
#endif // V8_ENABLE_WEBASSEMBLY
namespace {
-void VisitPminOrPmax(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+void VisitMinOrMax(InstructionSelector* selector, Node* node, ArchOpcode opcode,
+ bool flip_inputs) {
// Due to the way minps/minpd work, we want the dst to be same as the second
// input: b = pmin(a, b) directly maps to minps b a.
IA32OperandGenerator g(selector);
InstructionOperand dst = selector->IsSupported(AVX)
? g.DefineAsRegister(node)
: g.DefineSameAsFirst(node);
- selector->Emit(opcode, dst, g.UseRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(0)));
+ if (flip_inputs) {
+ // Due to the way minps/minpd work, we want the dst to be same as the second
+ // input: b = pmin(a, b) directly maps to minps b a.
+ selector->Emit(opcode, dst, g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(0)));
+ } else {
+ selector->Emit(opcode, dst, g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ }
}
} // namespace
void InstructionSelector::VisitF32x4Pmin(Node* node) {
- VisitPminOrPmax(this, node, kIA32F32x4Pmin);
+ VisitMinOrMax(this, node, kIA32Minps, true);
}
void InstructionSelector::VisitF32x4Pmax(Node* node) {
- VisitPminOrPmax(this, node, kIA32F32x4Pmax);
+ VisitMinOrMax(this, node, kIA32Maxps, true);
}
void InstructionSelector::VisitF64x2Pmin(Node* node) {
- VisitPminOrPmax(this, node, kIA32F64x2Pmin);
+ VisitMinOrMax(this, node, kIA32Minpd, true);
}
void InstructionSelector::VisitF64x2Pmax(Node* node) {
- VisitPminOrPmax(this, node, kIA32F64x2Pmax);
+ VisitMinOrMax(this, node, kIA32Maxpd, true);
+}
+
+void InstructionSelector::VisitF32x4RelaxedMin(Node* node) {
+ VisitMinOrMax(this, node, kIA32Minps, false);
+}
+
+void InstructionSelector::VisitF32x4RelaxedMax(Node* node) {
+ VisitMinOrMax(this, node, kIA32Maxps, false);
+}
+
+void InstructionSelector::VisitF64x2RelaxedMin(Node* node) {
+ VisitMinOrMax(this, node, kIA32Minpd, false);
+}
+
+void InstructionSelector::VisitF64x2RelaxedMax(Node* node) {
+ VisitMinOrMax(this, node, kIA32Maxpd, false);
}
namespace {
@@ -3135,6 +3175,22 @@ void InstructionSelector::VisitI32x4TruncSatF64x2UZero(Node* node) {
arraysize(temps), temps);
}
+void InstructionSelector::VisitI32x4RelaxedTruncF64x2SZero(Node* node) {
+ VisitRRSimd(this, node, kIA32Cvttpd2dq);
+}
+
+void InstructionSelector::VisitI32x4RelaxedTruncF64x2UZero(Node* node) {
+ VisitFloatUnop(this, node, node->InputAt(0), kIA32I32x4TruncF64x2UZero);
+}
+
+void InstructionSelector::VisitI32x4RelaxedTruncF32x4S(Node* node) {
+ VisitRRSimd(this, node, kIA32Cvttps2dq);
+}
+
+void InstructionSelector::VisitI32x4RelaxedTruncF32x4U(Node* node) {
+ VisitFloatUnop(this, node, node->InputAt(0), kIA32I32x4TruncF32x4U);
+}
+
void InstructionSelector::VisitI64x2GtS(Node* node) {
IA32OperandGenerator g(this);
if (CpuFeatures::IsSupported(AVX)) {
@@ -3189,6 +3245,57 @@ void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
VisitRR(this, node, code);
}
+namespace {
+// pblendvb is a correct implementation for all the various relaxed lane select,
+// see https://github.com/WebAssembly/relaxed-simd/issues/17.
+void VisitRelaxedLaneSelect(InstructionSelector* selector, Node* node) {
+ IA32OperandGenerator g(selector);
+ // pblendvb copies src2 when mask is set, opposite from Wasm semantics.
+ // node's inputs are: mask, lhs, rhs (determined in wasm-compiler.cc).
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(kIA32Pblendvb, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(2)),
+ g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(0)));
+ } else {
+ // SSE4.1 pblendvb requires xmm0 to hold the mask as an implicit operand.
+ selector->Emit(kIA32Pblendvb, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(2)),
+ g.UseRegister(node->InputAt(1)),
+ g.UseFixed(node->InputAt(0), xmm0));
+ }
+}
+} // namespace
+
+void InstructionSelector::VisitI8x16RelaxedLaneSelect(Node* node) {
+ VisitRelaxedLaneSelect(this, node);
+}
+void InstructionSelector::VisitI16x8RelaxedLaneSelect(Node* node) {
+ VisitRelaxedLaneSelect(this, node);
+}
+void InstructionSelector::VisitI32x4RelaxedLaneSelect(Node* node) {
+ VisitRelaxedLaneSelect(this, node);
+}
+void InstructionSelector::VisitI64x2RelaxedLaneSelect(Node* node) {
+ VisitRelaxedLaneSelect(this, node);
+}
+
+void InstructionSelector::VisitF64x2Qfma(Node* node) {
+ VisitRRRR(this, node, kIA32F64x2Qfma);
+}
+
+void InstructionSelector::VisitF64x2Qfms(Node* node) {
+ VisitRRRR(this, node, kIA32F64x2Qfms);
+}
+
+void InstructionSelector::VisitF32x4Qfma(Node* node) {
+ VisitRRRR(this, node, kIA32F32x4Qfma);
+}
+
+void InstructionSelector::VisitF32x4Qfms(Node* node) {
+ VisitRRRR(this, node, kIA32F32x4Qfms);
+}
+
void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
int first_input_index,
Node* node) {
diff --git a/deps/v8/src/compiler/backend/instruction-selector-impl.h b/deps/v8/src/compiler/backend/instruction-selector-impl.h
index 5deef6e76b..4b5780b66c 100644
--- a/deps/v8/src/compiler/backend/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/backend/instruction-selector-impl.h
@@ -204,6 +204,10 @@ class OperandGenerator {
return sequence()->AddImmediate(Constant(immediate));
}
+ InstructionOperand UseImmediate64(int64_t immediate) {
+ return sequence()->AddImmediate(Constant(immediate));
+ }
+
InstructionOperand UseImmediate(Node* node) {
return sequence()->AddImmediate(ToConstant(node));
}
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index dce17e6410..fc04b37ec3 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -2782,14 +2782,19 @@ void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 &&
// !V8_TARGET_ARCH_RISCV64
+#endif // !V8_TARGET_ARCH_ARM64
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
+#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfms(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM64
+#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
-#if !V8_TARGET_ARCH_X64
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
void InstructionSelector::VisitI8x16RelaxedLaneSelect(Node* node) {
UNIMPLEMENTED();
}
@@ -2818,7 +2823,7 @@ void InstructionSelector::VisitI32x4RelaxedTruncF32x4S(Node* node) {
void InstructionSelector::VisitI32x4RelaxedTruncF32x4U(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_X64
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 7000469549..37a8209b6b 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -1175,7 +1175,7 @@ class V8_EXPORT_PRIVATE Constant final {
private:
Type type_;
- RelocInfo::Mode rmode_ = RelocInfo::NONE;
+ RelocInfo::Mode rmode_ = RelocInfo::NO_INFO;
int64_t value_;
};
@@ -1742,7 +1742,7 @@ class V8_EXPORT_PRIVATE InstructionSequence final
RpoImmediates& rpo_immediates() { return rpo_immediates_; }
ImmediateOperand AddImmediate(const Constant& constant) {
- if (RelocInfo::IsNone(constant.rmode())) {
+ if (RelocInfo::IsNoInfo(constant.rmode())) {
if (constant.type() == Constant::kRpoNumber) {
// Ideally we would inline RPO numbers into the operand, however jump-
// threading modifies RPO values and so we indirect through a vector
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
index e487c1511d..10d22fcaa2 100644
--- a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
+++ b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
@@ -1086,15 +1086,14 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
Loong64OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
- uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ uint64_t value = static_cast<uint64_t>(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(value)) {
Emit(kLoong64Sll_d | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
return;
}
- if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0 &&
- value - 1 <= 31) {
+ if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0) {
// Alsl_d macro will handle the shifting value out of bound cases.
Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index f552a70341..6b62a7c694 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -1125,17 +1125,15 @@ void InstructionSelector::VisitUint32MulHigh(Node* node) {
void InstructionSelector::VisitInt64Mul(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
- // TODO(dusmil): Add optimization for shifts larger than 32.
if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
- uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ uint64_t value = static_cast<uint64_t>(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(value)) {
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
return;
}
- if (base::bits::IsPowerOfTwo(value - 1) && kArchVariant == kMips64r6 &&
- value - 1 > 0 && value - 1 <= 31) {
+ if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0) {
// Dlsa macro will handle the shifting value out of bound cases.
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index a51ffb4446..46d8b248e9 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -904,44 +904,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int const fp_param_field = FPParamField::decode(instr->opcode());
int num_fp_parameters = fp_param_field;
bool has_function_descriptor = false;
- int offset = 19 * kInstrSize;
-
- if (instr->InputAt(0)->IsImmediate() &&
- !FLAG_enable_embedded_constant_pool) {
- // If loading an immediate without constant pool then 4 instructions get
- // emitted instead of a single load (which makes it 3 extra).
- offset = 22 * kInstrSize;
- }
- if (!instr->InputAt(0)->IsImmediate() && !ABI_CALL_VIA_IP) {
- // On Linux and Sim, there will be an extra
- // instruction to pass the input using the `ip` register. This
- // instruction gets emitted under `CallCFunction` or
- // `CallCFunctionHelper` depending on the type of the input (immediate
- // or register). This extra move is only emitted on AIX if the input is
- // an immediate and not a register.
- offset -= kInstrSize;
- }
#if ABI_USES_FUNCTION_DESCRIPTORS
// AIX/PPC64BE Linux uses a function descriptor
int kNumFPParametersMask = kHasFunctionDescriptorBitMask - 1;
num_fp_parameters = kNumFPParametersMask & fp_param_field;
has_function_descriptor =
(fp_param_field & kHasFunctionDescriptorBitMask) != 0;
- // AIX may emit 2 extra Load instructions under CallCFunctionHelper
- // due to having function descriptor.
- if (has_function_descriptor) {
- offset += 2 * kInstrSize;
- }
#endif
#if V8_ENABLE_WEBASSEMBLY
Label start_call;
+ int start_pc_offset = 0;
bool isWasmCapiFunction =
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
if (isWasmCapiFunction) {
__ mflr(r0);
__ LoadPC(kScratchReg);
__ bind(&start_call);
- __ addi(kScratchReg, kScratchReg, Operand(offset));
+ start_pc_offset = __ pc_offset();
+ // We are going to patch this instruction after emitting
+ // CallCFunction, using a zero offset here as placeholder for now.
+ // patch_wasm_cpi_return_address assumes `addi` is used here to
+ // add the offset to pc.
+ __ addi(kScratchReg, kScratchReg, Operand::Zero());
__ StoreU64(kScratchReg,
MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
__ mtlr(r0);
@@ -956,21 +940,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CallCFunction(func, num_gp_parameters, num_fp_parameters,
has_function_descriptor);
}
- // TODO(miladfar): In the above block, kScratchReg must be populated with
- // the strictly-correct PC, which is the return address at this spot. The
- // offset is counted from where we are binding to the label and ends at
- // this spot. If failed, replace it with the correct offset suggested.
- // More info on f5ab7d3.
#if V8_ENABLE_WEBASSEMBLY
if (isWasmCapiFunction) {
- // The offset calculated is from pc returned by LoadPC above, until this
- // location.
+ int offset_since_start_call = __ SizeOfCodeGeneratedSince(&start_call);
+ // Here we are going to patch the `addi` instruction above to use the
+ // correct offset.
// LoadPC emits two instructions and pc is the address of its
- // second emitted instruction. `start_call` is binding to the address
- // right after the above retrieved pc, therefore there is one less
- // instruction to count when summing the total size of generated code.
- int generated_size = offset - kInstrSize;
- CHECK_EQ(generated_size, __ SizeOfCodeGeneratedSince(&start_call));
+ // second emitted instruction therefore there is one more instruction to
+ // count.
+ offset_since_start_call += kInstrSize;
+ __ patch_wasm_cpi_return_address(kScratchReg, start_pc_offset,
+ offset_since_start_call);
RecordSafepoint(instr->reference_map());
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -2095,6 +2075,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register temp1 = r0;
if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
__ brw(output, input);
+ __ extsw(output, output);
break;
}
__ rotlwi(temp1, input, 8);
@@ -4168,15 +4149,25 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// max(argc_reg, parameter_slots-1), and the receiver is added in
// DropArguments().
if (parameter_slots > 1) {
- const int parameter_slots_without_receiver = parameter_slots - 1;
- Label skip;
- __ CmpS64(argc_reg, Operand(parameter_slots_without_receiver), r0);
- __ bgt(&skip);
- __ mov(argc_reg, Operand(parameter_slots_without_receiver));
- __ bind(&skip);
+ if (kJSArgcIncludesReceiver) {
+ Label skip;
+ __ CmpS64(argc_reg, Operand(parameter_slots), r0);
+ __ bgt(&skip);
+ __ mov(argc_reg, Operand(parameter_slots));
+ __ bind(&skip);
+ } else {
+ const int parameter_slots_without_receiver = parameter_slots - 1;
+ Label skip;
+ __ CmpS64(argc_reg, Operand(parameter_slots_without_receiver), r0);
+ __ bgt(&skip);
+ __ mov(argc_reg, Operand(parameter_slots_without_receiver));
+ __ bind(&skip);
+ }
}
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_slots + additional_count);
diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
index 475ecf3f71..94bcbb6244 100644
--- a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
@@ -481,6 +481,70 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
i.InputSimd128Register(1)); \
} while (0)
+#define ASSEMBLE_RVV_BINOP_INTEGER(instr, OP) \
+ case kRiscvI8x16##instr: { \
+ __ VU.set(kScratchReg, E8, m1); \
+ __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1)); \
+ break; \
+ } \
+ case kRiscvI16x8##instr: { \
+ __ VU.set(kScratchReg, E16, m1); \
+ __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1)); \
+ break; \
+ } \
+ case kRiscvI32x4##instr: { \
+ __ VU.set(kScratchReg, E32, m1); \
+ __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1)); \
+ break; \
+ }
+
+#define ASSEMBLE_RVV_UNOP_INTEGER_VR(instr, OP) \
+ case kRiscvI8x16##instr: { \
+ __ VU.set(kScratchReg, E8, m1); \
+ __ OP(i.OutputSimd128Register(), i.InputRegister(0)); \
+ break; \
+ } \
+ case kRiscvI16x8##instr: { \
+ __ VU.set(kScratchReg, E16, m1); \
+ __ OP(i.OutputSimd128Register(), i.InputRegister(0)); \
+ break; \
+ } \
+ case kRiscvI32x4##instr: { \
+ __ VU.set(kScratchReg, E32, m1); \
+ __ OP(i.OutputSimd128Register(), i.InputRegister(0)); \
+ break; \
+ } \
+ case kRiscvI64x2##instr: { \
+ __ VU.set(kScratchReg, E64, m1); \
+ __ OP(i.OutputSimd128Register(), i.InputRegister(0)); \
+ break; \
+ }
+
+#define ASSEMBLE_RVV_UNOP_INTEGER_VV(instr, OP) \
+ case kRiscvI8x16##instr: { \
+ __ VU.set(kScratchReg, E8, m1); \
+ __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
+ break; \
+ } \
+ case kRiscvI16x8##instr: { \
+ __ VU.set(kScratchReg, E16, m1); \
+ __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
+ break; \
+ } \
+ case kRiscvI32x4##instr: { \
+ __ VU.set(kScratchReg, E32, m1); \
+ __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
+ break; \
+ } \
+ case kRiscvI64x2##instr: { \
+ __ VU.set(kScratchReg, E64, m1); \
+ __ OP(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
+ break; \
+ }
+
void CodeGenerator::AssembleDeconstructFrame() {
__ Move(sp, fp);
__ Pop(ra, fp);
@@ -1889,6 +1953,124 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vl(i.OutputSimd128Register(), src, 0, VSew::E8);
break;
}
+ case kRiscvS128Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E8, m1);
+ __ vmv_vx(dst, zero_reg);
+ break;
+ }
+ case kRiscvS128Load32Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E32, m1);
+ __ Lwu(kScratchReg, i.MemoryOperand());
+ __ vmv_sx(dst, kScratchReg);
+ break;
+ }
+ case kRiscvS128Load64Zero: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E64, m1);
+ __ Ld(kScratchReg, i.MemoryOperand());
+ __ vmv_sx(dst, kScratchReg);
+ break;
+ }
+ case kRiscvS128LoadLane: {
+ Simd128Register dst = i.OutputSimd128Register();
+ DCHECK_EQ(dst, i.InputSimd128Register(0));
+ auto sz = static_cast<int>(MiscField::decode(instr->opcode()));
+ __ LoadLane(sz, dst, i.InputUint8(1), i.MemoryOperand(2));
+ break;
+ }
+ case kRiscvS128StoreLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ DCHECK_EQ(src, i.InputSimd128Register(0));
+ auto sz = static_cast<int>(MiscField::decode(instr->opcode()));
+ __ StoreLane(sz, src, i.InputUint8(1), i.MemoryOperand(2));
+ break;
+ }
+ case kRiscvS128Load64ExtendS: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ Ld(kScratchReg, i.MemoryOperand());
+ __ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ __ VU.set(kScratchReg, i.InputInt8(2), m1);
+ __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvS128Load64ExtendU: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ Ld(kScratchReg, i.MemoryOperand());
+ __ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ __ VU.set(kScratchReg, i.InputInt8(2), m1);
+ __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvS128LoadSplat: {
+ __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3));
+ switch (i.InputInt8(2)) {
+ case E8:
+ __ Lb(kScratchReg, i.MemoryOperand());
+ break;
+ case E16:
+ __ Lh(kScratchReg, i.MemoryOperand());
+ break;
+ case E32:
+ __ Lw(kScratchReg, i.MemoryOperand());
+ break;
+ case E64:
+ __ Ld(kScratchReg, i.MemoryOperand());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ vmv_vx(i.OutputSimd128Register(), kScratchReg);
+ break;
+ }
+ case kRiscvS128AllOnes: {
+ __ VU.set(kScratchReg, E8, m1);
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vnot_vv(i.OutputSimd128Register(), i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvS128Select: {
+ __ VU.set(kScratchReg, E8, m1);
+ __ vand_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ __ vnot_vv(kSimd128ScratchReg2, i.InputSimd128Register(0));
+ __ vand_vv(kSimd128ScratchReg2, i.InputSimd128Register(2),
+ kSimd128ScratchReg2);
+ __ vor_vv(i.OutputSimd128Register(), kSimd128ScratchReg,
+ kSimd128ScratchReg2);
+ break;
+ }
+ case kRiscvS128And: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Or: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Xor: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vxor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Not: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvS128AndNot: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.OutputSimd128Register());
+ break;
+ }
case kRiscvS128Const: {
Simd128Register dst = i.OutputSimd128Register();
uint8_t imm[16];
@@ -1899,24 +2081,89 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ WasmRvvS128const(dst, imm);
break;
}
- case kRiscvI64x2Add: {
+ case kRiscvI64x2Mul: {
(__ VU).set(kScratchReg, VSew::E64, Vlmul::m1);
- __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ __ vmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kRiscvI32x4Add: {
- (__ VU).set(kScratchReg, VSew::E32, Vlmul::m1);
+ case kRiscvI64x2Add: {
+ (__ VU).set(kScratchReg, VSew::E64, Vlmul::m1);
__ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kRiscvI16x8Add: {
+ case kRiscvVrgather: {
+ Simd128Register index = i.InputSimd128Register(0);
+ if (!(instr->InputAt(1)->IsImmediate())) {
+ index = i.InputSimd128Register(1);
+ } else {
+ __ VU.set(kScratchReg, E64, m1);
+ __ li(kScratchReg, i.InputInt64(1));
+ __ vmv_sx(kSimd128ScratchReg3, kScratchReg);
+ index = kSimd128ScratchReg3;
+ }
+ __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3));
+ if (i.OutputSimd128Register() == i.InputSimd128Register(0)) {
+ __ vrgather_vv(kSimd128ScratchReg, i.InputSimd128Register(0), index);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
+ } else {
+ __ vrgather_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ index);
+ }
+ break;
+ }
+ case kRiscvVslidedown: {
+ __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3));
+ if (instr->InputAt(1)->IsImmediate()) {
+ DCHECK(is_uint5(i.InputInt32(1)));
+ __ vslidedown_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ } else {
+ __ vslidedown_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ }
+ break;
+ }
+ case kRiscvI8x16RoundingAverageU: {
+ __ VU.set(kScratchReg2, E8, m1);
+ __ vwaddu_vv(kSimd128ScratchReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ li(kScratchReg, 1);
+ __ vwaddu_wx(kSimd128ScratchReg3, kSimd128ScratchReg, kScratchReg);
+ __ li(kScratchReg, 2);
+ __ VU.set(kScratchReg2, E16, m2);
+ __ vdivu_vx(kSimd128ScratchReg3, kSimd128ScratchReg3, kScratchReg);
+ __ VU.set(kScratchReg2, E8, m1);
+ __ vnclipu_vi(i.OutputSimd128Register(), kSimd128ScratchReg3, 0);
+ break;
+ }
+ case kRiscvI16x8RoundingAverageU: {
+ __ VU.set(kScratchReg2, E16, m1);
+ __ vwaddu_vv(kSimd128ScratchReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ li(kScratchReg, 1);
+ __ vwaddu_wx(kSimd128ScratchReg3, kSimd128ScratchReg, kScratchReg);
+ __ li(kScratchReg, 2);
+ __ VU.set(kScratchReg2, E32, m2);
+ __ vdivu_vx(kSimd128ScratchReg3, kSimd128ScratchReg3, kScratchReg);
+ __ VU.set(kScratchReg2, E16, m1);
+ __ vnclipu_vi(i.OutputSimd128Register(), kSimd128ScratchReg3, 0);
+ break;
+ }
+ case kRiscvI16x8Mul: {
(__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
- __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ __ vmv_vx(kSimd128ScratchReg, zero_reg);
+ __ vmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
+ case kRiscvI16x8Q15MulRSatS: {
+ __ VU.set(kScratchReg, E16, m1);
+ __ vsmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
case kRiscvI16x8AddSatS: {
(__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
__ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -1929,12 +2176,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kRiscvI8x16Add: {
- (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
- __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
case kRiscvI8x16AddSatS: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -1953,18 +2194,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kRiscvI32x4Sub: {
- (__ VU).set(kScratchReg, VSew::E32, Vlmul::m1);
- __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
- case kRiscvI16x8Sub: {
- (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
- __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
case kRiscvI16x8SubSatS: {
(__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
__ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -1977,12 +2206,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kRiscvI8x16Sub: {
- (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
- __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
case kRiscvI8x16SubSatS: {
(__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
__ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -1995,80 +2218,218 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kRiscvS128And: {
- (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
- __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kRiscvI8x16ExtractLaneU: {
+ __ VU.set(kScratchReg, E8, m1);
+ __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0),
+ i.InputInt8(1));
+ __ vmv_xs(i.OutputRegister(), kSimd128ScratchReg);
+ __ slli(i.OutputRegister(), i.OutputRegister(), 64 - 8);
+ __ srli(i.OutputRegister(), i.OutputRegister(), 64 - 8);
break;
}
- case kRiscvS128Or: {
- (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
- __ vor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kRiscvI8x16ExtractLaneS: {
+ __ VU.set(kScratchReg, E8, m1);
+ __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0),
+ i.InputInt8(1));
+ __ vmv_xs(i.OutputRegister(), kSimd128ScratchReg);
break;
}
- case kRiscvS128Xor: {
- (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
- __ vxor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kRiscvI16x8ExtractLaneU: {
+ __ VU.set(kScratchReg, E16, m1);
+ __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0),
+ i.InputInt8(1));
+ __ vmv_xs(i.OutputRegister(), kSimd128ScratchReg);
+ __ slli(i.OutputRegister(), i.OutputRegister(), 64 - 16);
+ __ srli(i.OutputRegister(), i.OutputRegister(), 64 - 16);
+ break;
+ }
+ case kRiscvI16x8ExtractLaneS: {
+ __ VU.set(kScratchReg, E16, m1);
+ __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0),
+ i.InputInt8(1));
+ __ vmv_xs(i.OutputRegister(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI8x16ShrU: {
+ __ VU.set(kScratchReg, E8, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ andi(i.InputRegister(1), i.InputRegister(1), 8 - 1);
+ __ vsrl_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsrl_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1) % 8);
+ }
+ break;
+ }
+ case kRiscvI16x8ShrU: {
+ __ VU.set(kScratchReg, E16, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ andi(i.InputRegister(1), i.InputRegister(1), 16 - 1);
+ __ vsrl_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsrl_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1) % 16);
+ }
+ break;
+ }
+ case kRiscvI32x4Mul: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kRiscvS128Not: {
- (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
- __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ case kRiscvI32x4TruncSatF64x2SZero: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmv_vx(kSimd128ScratchReg, zero_reg);
+ __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
+ __ vmv_vv(kSimd128ScratchReg3, i.InputSimd128Register(0));
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfncvt_x_f_w(kSimd128ScratchReg, kSimd128ScratchReg3, MaskType::Mask);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
break;
}
- case kRiscvS128AndNot: {
- (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
- __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
- __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.OutputSimd128Register());
+ case kRiscvI32x4TruncSatF64x2UZero: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmv_vx(kSimd128ScratchReg, zero_reg);
+ __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
+ __ vmv_vv(kSimd128ScratchReg3, i.InputSimd128Register(0));
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfncvt_xu_f_w(kSimd128ScratchReg, kSimd128ScratchReg3, MaskType::Mask);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
break;
}
- case kRiscvI32x4ExtractLane: {
- __ WasmRvvExtractLane(i.OutputRegister(), i.InputSimd128Register(0),
- i.InputInt8(1), E32, m1);
+ case kRiscvI32x4ShrU: {
+ __ VU.set(kScratchReg, E32, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ andi(i.InputRegister(1), i.InputRegister(1), 32 - 1);
+ __ vsrl_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsrl_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1) % 32);
+ }
break;
}
- case kRiscvI8x16Splat: {
- (__ VU).set(kScratchReg, E8, m1);
- __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ case kRiscvI64x2ShrU: {
+ __ VU.set(kScratchReg, E64, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ andi(i.InputRegister(1), i.InputRegister(1), 64 - 1);
+ __ vsrl_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ if (is_uint5(i.InputInt6(1) % 64)) {
+ __ vsrl_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt6(1) % 64);
+ } else {
+ __ li(kScratchReg, i.InputInt6(1) % 64);
+ __ vsrl_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg);
+ }
+ }
break;
}
- case kRiscvI16x8Splat: {
- (__ VU).set(kScratchReg, E16, m1);
- __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ case kRiscvI8x16ShrS: {
+ __ VU.set(kScratchReg, E8, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ andi(i.InputRegister(1), i.InputRegister(1), 8 - 1);
+ __ vsra_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsra_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1) % 8);
+ }
break;
}
- case kRiscvI32x4Splat: {
- (__ VU).set(kScratchReg, E32, m1);
- __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ case kRiscvI16x8ShrS: {
+ __ VU.set(kScratchReg, E16, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ andi(i.InputRegister(1), i.InputRegister(1), 16 - 1);
+ __ vsra_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsra_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1) % 16);
+ }
break;
}
- case kRiscvI64x2Splat: {
- (__ VU).set(kScratchReg, E64, m1);
- __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ case kRiscvI32x4ShrS: {
+ __ VU.set(kScratchReg, E32, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ andi(i.InputRegister(1), i.InputRegister(1), 32 - 1);
+ __ vsra_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsra_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1) % 32);
+ }
break;
}
- case kRiscvF32x4Splat: {
- (__ VU).set(kScratchReg, E32, m1);
- __ fmv_x_w(kScratchReg, i.InputSingleRegister(0));
- __ vmv_vx(i.OutputSimd128Register(), kScratchReg);
+ case kRiscvI64x2ShrS: {
+ __ VU.set(kScratchReg, E64, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ andi(i.InputRegister(1), i.InputRegister(1), 64 - 1);
+ __ vsra_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ if (is_uint5(i.InputInt6(1) % 64)) {
+ __ vsra_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt6(1) % 64);
+ } else {
+ __ li(kScratchReg, i.InputInt6(1) % 64);
+ __ vsra_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg);
+ }
+ }
break;
}
- case kRiscvF64x2Splat: {
- (__ VU).set(kScratchReg, E64, m1);
- __ fmv_x_d(kScratchReg, i.InputDoubleRegister(0));
- __ vmv_vx(i.OutputSimd128Register(), kScratchReg);
+ case kRiscvI32x4ExtractLane: {
+ __ WasmRvvExtractLane(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1), E32, m1);
break;
}
case kRiscvI32x4Abs: {
__ VU.set(kScratchReg, E32, m1);
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(v0, i.InputSimd128Register(0), kSimd128RegZero);
+ __ vneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ MaskType::Mask);
+ break;
+ }
+ case kRiscvI16x8Abs: {
+ __ VU.set(kScratchReg, E16, m1);
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(v0, i.InputSimd128Register(0), kSimd128RegZero);
+ __ vneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ MaskType::Mask);
+ break;
+ }
+ case kRiscvI8x16Abs: {
+ __ VU.set(kScratchReg, E8, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ vmslt_vv(v0, i.InputSimd128Register(0), kSimd128RegZero);
+ __ vneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ MaskType::Mask);
+ break;
+ }
+ case kRiscvI64x2Abs: {
+ __ VU.set(kScratchReg, E64, m1);
__ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ vmv_vx(kSimd128RegZero, zero_reg);
__ vmslt_vv(v0, i.InputSimd128Register(0), kSimd128RegZero);
- __ vsub_vv(i.OutputSimd128Register(), kSimd128RegZero,
- i.InputSimd128Register(0), Mask);
+ __ vneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ MaskType::Mask);
+ break;
+ }
+ case kRiscvI64x2ExtractLane: {
+ __ WasmRvvExtractLane(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1), E64, m1);
break;
}
case kRiscvI8x16Eq: {
@@ -2184,47 +2545,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvI8x16Shl: {
__ VU.set(kScratchReg, E8, m1);
if (instr->InputAt(1)->IsRegister()) {
+ __ andi(i.InputRegister(1), i.InputRegister(1), 8 - 1);
__ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputRegister(1));
} else {
__ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt3(1));
+ i.InputInt5(1) % 8);
}
break;
}
case kRiscvI16x8Shl: {
__ VU.set(kScratchReg, E16, m1);
if (instr->InputAt(1)->IsRegister()) {
+ __ andi(i.InputRegister(1), i.InputRegister(1), 16 - 1);
__ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputRegister(1));
} else {
__ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt4(1));
+ i.InputInt5(1) % 16);
}
break;
}
case kRiscvI32x4Shl: {
__ VU.set(kScratchReg, E32, m1);
if (instr->InputAt(1)->IsRegister()) {
+ __ andi(i.InputRegister(1), i.InputRegister(1), 32 - 1);
__ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputRegister(1));
} else {
__ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt5(1));
+ i.InputInt5(1) % 32);
}
break;
}
case kRiscvI64x2Shl: {
__ VU.set(kScratchReg, E64, m1);
if (instr->InputAt(1)->IsRegister()) {
+ __ andi(i.InputRegister(1), i.InputRegister(1), 64 - 1);
__ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputRegister(1));
} else {
- if (is_int5(i.InputInt6(1))) {
+ if (is_int5(i.InputInt6(1) % 64)) {
__ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputInt6(1));
+ i.InputInt6(1) % 64);
} else {
- __ li(kScratchReg, i.InputInt6(1));
+ __ li(kScratchReg, i.InputInt6(1) % 64);
__ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
kScratchReg);
}
@@ -2234,9 +2599,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvI8x16ReplaceLane: {
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
- __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(kScratchReg, E64, m1);
__ li(kScratchReg, 0x1 << i.InputInt8(1));
__ vmv_sx(v0, kScratchReg);
+ __ VU.set(kScratchReg, E8, m1);
__ vmerge_vx(dst, i.InputRegister(2), src);
break;
}
@@ -2401,13 +2767,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ vrgather_vv(dst, src0, kSimd128ScratchReg);
__ vadd_vi(kSimd128ScratchReg, kSimd128ScratchReg, -16);
- __ vrgather_vv(kSimd128ScratchReg, src1, kSimd128ScratchReg);
- __ vor_vv(dst, dst, kSimd128ScratchReg);
+ __ vrgather_vv(kSimd128ScratchReg3, src1, kSimd128ScratchReg);
+ __ vor_vv(dst, dst, kSimd128ScratchReg3);
break;
}
- case kRiscvF32x4Abs: {
- __ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
- __ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ case kRiscvF64x2NearestInt: {
+ __ Round_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF64x2Trunc: {
+ __ Trunc_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF64x2Sqrt: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vfsqrt_v(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF64x2Splat: {
+ (__ VU).set(kScratchReg, E64, m1);
+ __ fmv_x_d(kScratchReg, i.InputDoubleRegister(0));
+ __ vmv_vx(i.OutputSimd128Register(), kScratchReg);
break;
}
case kRiscvF64x2Abs: {
@@ -2415,44 +2797,221 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kRiscvF32x4Neg: {
- __ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
- __ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
- break;
- }
case kRiscvF64x2Neg: {
__ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
__ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kRiscvF32x4DemoteF64x2Zero: {
+ case kRiscvF64x2Add: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF64x2Sub: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF64x2Ceil: {
+ __ Ceil_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF64x2Floor: {
+ __ Floor_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF64x2Ne: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmfne_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF64x2Eq: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmfeq_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF64x2ReplaceLane: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ fmv_x_d(kScratchReg, i.InputSingleRegister(2));
+ __ vmerge_vx(i.OutputSimd128Register(), kScratchReg,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF64x2Lt: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmflt_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF64x2Le: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmfle_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF64x2Pmax: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmflt_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ vmerge_vv(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF64x2Pmin: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmflt_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmerge_vv(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF64x2Min: {
+ __ VU.set(kScratchReg, E64, m1);
+ const int64_t kNaN = 0x7ff8000000000000L;
+ __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
+ __ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(1));
+ __ vand_vv(v0, v0, kSimd128ScratchReg);
+ __ li(kScratchReg, kNaN);
+ __ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ __ vfmin_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Mask);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF64x2Max: {
+ __ VU.set(kScratchReg, E64, m1);
+ const int64_t kNaN = 0x7ff8000000000000L;
+ __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
+ __ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(1));
+ __ vand_vv(v0, v0, kSimd128ScratchReg);
+ __ li(kScratchReg, kNaN);
+ __ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ __ vfmax_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Mask);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF64x2Div: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vfdiv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF64x2Mul: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF64x2ExtractLane: {
+ __ VU.set(kScratchReg, E64, m1);
+ if (is_uint5(i.InputInt8(1))) {
+ __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0),
+ i.InputInt8(1));
+ } else {
+ __ li(kScratchReg, i.InputInt8(1));
+ __ vslidedown_vx(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kScratchReg);
+ }
+ __ vfmv_fs(i.OutputDoubleRegister(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF64x2PromoteLowF32x4: {
+ __ VU.set(kScratchReg, E32, mf2);
+ if (i.OutputSimd128Register() != i.InputSimd128Register(0)) {
+ __ vfwcvt_f_f_v(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ } else {
+ __ vfwcvt_f_f_v(kSimd128ScratchReg3, i.InputSimd128Register(0));
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg3);
+ }
+ break;
+ }
+ case kRiscvF64x2ConvertLowI32x4S: {
+ __ VU.set(kScratchReg, E32, mf2);
+ if (i.OutputSimd128Register() != i.InputSimd128Register(0)) {
+ __ vfwcvt_f_x_v(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ } else {
+ __ vfwcvt_f_x_v(kSimd128ScratchReg3, i.InputSimd128Register(0));
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg3);
+ }
+ break;
+ }
+ case kRiscvF64x2ConvertLowI32x4U: {
+ __ VU.set(kScratchReg, E32, mf2);
+ if (i.OutputSimd128Register() != i.InputSimd128Register(0)) {
+ __ vfwcvt_f_xu_v(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ } else {
+ __ vfwcvt_f_xu_v(kSimd128ScratchReg3, i.InputSimd128Register(0));
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg3);
+ }
+ break;
+ }
+ case kRiscvF32x4ExtractLane: {
__ VU.set(kScratchReg, E32, m1);
+ __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0),
+ i.InputInt8(1));
+ __ vfmv_fs(i.OutputDoubleRegister(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF32x4Trunc: {
+ __ Trunc_f(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF32x4NearestInt: {
+ __ Round_f(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF32x4DemoteF64x2Zero: {
+ __ VU.set(kScratchReg, E32, mf2);
__ vfncvt_f_f_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ VU.set(kScratchReg, E32, m1);
__ vmv_vi(v0, 12);
__ vmerge_vx(i.OutputSimd128Register(), zero_reg,
i.OutputSimd128Register());
break;
}
- case kRiscvF32x4Add: {
- __ VU.set(kScratchReg, E32, m1);
- __ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kRiscvF32x4Neg: {
+ __ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kRiscvF32x4Sub: {
- __ VU.set(kScratchReg, E32, m1);
- __ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kRiscvF32x4Abs: {
+ __ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
- case kRiscvF64x2Add: {
- __ VU.set(kScratchReg, E64, m1);
+ case kRiscvF32x4Splat: {
+ (__ VU).set(kScratchReg, E32, m1);
+ __ fmv_x_w(kScratchReg, i.InputSingleRegister(0));
+ __ vmv_vx(i.OutputSimd128Register(), kScratchReg);
+ break;
+ }
+ case kRiscvF32x4Add: {
+ __ VU.set(kScratchReg, E32, m1);
__ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kRiscvF64x2Sub: {
- __ VU.set(kScratchReg, E64, m1);
+ case kRiscvF32x4Sub: {
+ __ VU.set(kScratchReg, E32, m1);
__ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
@@ -2462,32 +3021,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchReg, kSimd128ScratchReg);
break;
}
- case kRiscvF64x2Ceil: {
- __ Ceil_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
- kScratchReg, kSimd128ScratchReg);
- break;
- }
case kRiscvF32x4Floor: {
__ Floor_f(i.OutputSimd128Register(), i.InputSimd128Register(0),
kScratchReg, kSimd128ScratchReg);
break;
}
- case kRiscvF64x2Floor: {
- __ Floor_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
- kScratchReg, kSimd128ScratchReg);
- break;
- }
- case kRiscvS128Select: {
- __ VU.set(kScratchReg, E8, m1);
- __ vand_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
- i.InputSimd128Register(0));
- __ vnot_vv(kSimd128ScratchReg2, i.InputSimd128Register(0));
- __ vand_vv(kSimd128ScratchReg2, i.InputSimd128Register(2),
- kSimd128ScratchReg2);
- __ vor_vv(i.OutputSimd128Register(), kSimd128ScratchReg,
- kSimd128ScratchReg2);
- break;
- }
case kRiscvF32x4UConvertI32x4: {
__ VU.set(kScratchReg, E32, m1);
__ VU.set(RoundingMode::RTZ);
@@ -2503,8 +3041,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvF32x4Div: {
__ VU.set(kScratchReg, E32, m1);
__ VU.set(RoundingMode::RTZ);
- __ vfdiv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ __ vfdiv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kRiscvF32x4Mul: {
@@ -2528,20 +3066,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
break;
}
+ case kRiscvF32x4ReplaceLane: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ fmv_x_w(kScratchReg, i.InputSingleRegister(2));
+ __ vmerge_vx(i.OutputSimd128Register(), kScratchReg,
+ i.InputSimd128Register(0));
+ break;
+ }
case kRiscvF32x4Lt: {
__ VU.set(kScratchReg, E32, m1);
- __ vmflt_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmflt_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ vmv_vx(i.OutputSimd128Register(), zero_reg);
__ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
break;
}
case kRiscvF32x4Le: {
__ VU.set(kScratchReg, E32, m1);
- __ vmfle_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmfle_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ vmv_vx(i.OutputSimd128Register(), zero_reg);
__ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
break;
}
+ case kRiscvF32x4Pmax: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmflt_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ vmerge_vv(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Pmin: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmflt_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmerge_vv(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Sqrt: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfsqrt_v(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kRiscvF32x4Max: {
__ VU.set(kScratchReg, E32, m1);
const int32_t kNaN = 0x7FC00000;
@@ -2570,6 +3136,218 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
break;
}
+ case kRiscvI64x2SConvertI32x4Low: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmv_vv(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+
+ break;
+ }
+ case kRiscvI64x2SConvertI32x4High: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), 2);
+ __ VU.set(kScratchReg, E64, m1);
+ __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI64x2UConvertI32x4Low: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmv_vv(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI64x2UConvertI32x4High: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), 2);
+ __ VU.set(kScratchReg, E64, m1);
+ __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI32x4SConvertI16x8Low: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_vv(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI32x4SConvertI16x8High: {
+ __ VU.set(kScratchReg, E16, m1);
+ __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), 4);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI32x4SConvertF32x4: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
+ if (i.OutputSimd128Register() != i.InputSimd128Register(0)) {
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vfcvt_x_f_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Mask);
+ } else {
+ __ vmv_vx(kSimd128ScratchReg, zero_reg);
+ __ vfcvt_x_f_v(kSimd128ScratchReg, i.InputSimd128Register(0), Mask);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
+ }
+ break;
+ }
+ case kRiscvI32x4UConvertF32x4: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
+ if (i.OutputSimd128Register() != i.InputSimd128Register(0)) {
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vfcvt_xu_f_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ Mask);
+ } else {
+ __ vmv_vx(kSimd128ScratchReg, zero_reg);
+ __ vfcvt_xu_f_v(kSimd128ScratchReg, i.InputSimd128Register(0), Mask);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
+ }
+ break;
+ }
+ case kRiscvI32x4UConvertI16x8Low: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_vv(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI32x4UConvertI16x8High: {
+ __ VU.set(kScratchReg, E16, m1);
+ __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), 4);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI16x8SConvertI8x16Low: {
+ __ VU.set(kScratchReg, E16, m1);
+ __ vmv_vv(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI16x8SConvertI8x16High: {
+ __ VU.set(kScratchReg, E8, m1);
+ __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), 8);
+ __ VU.set(kScratchReg, E16, m1);
+ __ vsext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI16x8UConvertI8x16Low: {
+ __ VU.set(kScratchReg, E16, m1);
+ __ vmv_vv(kSimd128ScratchReg, i.InputSimd128Register(0));
+ __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI16x8UConvertI8x16High: {
+ __ VU.set(kScratchReg, E8, m1);
+ __ vslidedown_vi(kSimd128ScratchReg, i.InputSimd128Register(0), 8);
+ __ VU.set(kScratchReg, E16, m1);
+ __ vzext_vf2(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI8x16SConvertI16x8: {
+ __ VU.set(kScratchReg, E16, m1);
+ __ vmv_vv(v26, i.InputSimd128Register(0));
+ __ vmv_vv(v27, i.InputSimd128Register(1));
+ __ VU.set(kScratchReg, E8, m1);
+ __ VU.set(RoundingMode::RNE);
+ __ vnclip_vi(i.OutputSimd128Register(), v26, 0);
+ break;
+ }
+ case kRiscvI8x16UConvertI16x8: {
+ __ VU.set(kScratchReg, E16, m1);
+ __ vmv_vv(v26, i.InputSimd128Register(0));
+ __ vmv_vv(v27, i.InputSimd128Register(1));
+ __ VU.set(kScratchReg, E16, m2);
+ __ vmax_vx(v26, v26, zero_reg);
+ __ VU.set(kScratchReg, E8, m1);
+ __ VU.set(RoundingMode::RNE);
+ __ vnclipu_vi(i.OutputSimd128Register(), v26, 0);
+ break;
+ }
+ case kRiscvI16x8SConvertI32x4: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_vv(v26, i.InputSimd128Register(0));
+ __ vmv_vv(v27, i.InputSimd128Register(1));
+ __ VU.set(kScratchReg, E16, m1);
+ __ VU.set(RoundingMode::RNE);
+ __ vnclip_vi(i.OutputSimd128Register(), v26, 0);
+ break;
+ }
+ case kRiscvI16x8UConvertI32x4: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_vv(v26, i.InputSimd128Register(0));
+ __ vmv_vv(v27, i.InputSimd128Register(1));
+ __ VU.set(kScratchReg, E32, m2);
+ __ vmax_vx(v26, v26, zero_reg);
+ __ VU.set(kScratchReg, E16, m1);
+ __ VU.set(RoundingMode::RNE);
+ __ vnclipu_vi(i.OutputSimd128Register(), v26, 0);
+ break;
+ }
+ ASSEMBLE_RVV_UNOP_INTEGER_VV(Neg, vneg_vv)
+ ASSEMBLE_RVV_BINOP_INTEGER(MaxU, vmaxu_vv)
+ ASSEMBLE_RVV_BINOP_INTEGER(MaxS, vmax_vv)
+ ASSEMBLE_RVV_BINOP_INTEGER(MinU, vminu_vv)
+ ASSEMBLE_RVV_BINOP_INTEGER(MinS, vmin_vv)
+ ASSEMBLE_RVV_UNOP_INTEGER_VR(Splat, vmv_vx)
+ ASSEMBLE_RVV_BINOP_INTEGER(Add, vadd_vv)
+ ASSEMBLE_RVV_BINOP_INTEGER(Sub, vsub_vv)
+ case kRiscvVwadd: {
+ __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3));
+ __ vwadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvVwaddu: {
+ __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3));
+ __ vwaddu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvVwmul: {
+ __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3));
+ __ vwmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvVwmulu: {
+ __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3));
+ __ vwmulu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvVmvSx: {
+ __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3));
+ if (instr->InputAt(0)->IsRegister()) {
+ __ vmv_sx(i.OutputSimd128Register(), i.InputRegister(0));
+ } else {
+ DCHECK(instr->InputAt(0)->IsImmediate());
+ __ li(kScratchReg, i.InputInt64(0));
+ __ vmv_sx(i.OutputSimd128Register(), kScratchReg);
+ }
+ break;
+ }
+ case kRiscvVcompress: {
+ __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3));
+ if (instr->InputAt(1)->IsSimd128Register()) {
+ __ vcompress_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ } else {
+ DCHECK(instr->InputAt(1)->IsImmediate());
+ __ li(kScratchReg, i.InputInt64(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vcompress_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ v0);
+ }
+ break;
+ }
+ case kRiscvVaddVv: {
+ __ VU.set(kScratchReg, i.InputInt8(2), i.InputInt8(3));
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
default:
#ifdef DEBUG
switch (arch_opcode) {
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
index 0c8d99a8e8..307379be32 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
@@ -263,7 +263,6 @@ namespace compiler {
V(RiscvI32x4GeU) \
V(RiscvI32x4Abs) \
V(RiscvI32x4BitMask) \
- V(RiscvI32x4DotI16x8S) \
V(RiscvI32x4TruncSatF64x2SZero) \
V(RiscvI32x4TruncSatF64x2UZero) \
V(RiscvI16x8Splat) \
@@ -329,6 +328,8 @@ namespace compiler {
V(RiscvS128Not) \
V(RiscvS128Select) \
V(RiscvS128AndNot) \
+ V(RiscvS128Load64Zero) \
+ V(RiscvS128Load32Zero) \
V(RiscvI32x4AllTrue) \
V(RiscvI16x8AllTrue) \
V(RiscvV128AnyTrue) \
@@ -356,21 +357,13 @@ namespace compiler {
V(RiscvS8x16InterleaveEven) \
V(RiscvS8x16InterleaveOdd) \
V(RiscvI8x16Shuffle) \
- V(RiscvI8x16Swizzle) \
V(RiscvS8x16Concat) \
V(RiscvS8x8Reverse) \
V(RiscvS8x4Reverse) \
V(RiscvS8x2Reverse) \
- V(RiscvS128Load8Splat) \
- V(RiscvS128Load16Splat) \
- V(RiscvS128Load32Splat) \
- V(RiscvS128Load64Splat) \
- V(RiscvS128Load8x8S) \
- V(RiscvS128Load8x8U) \
- V(RiscvS128Load16x4S) \
- V(RiscvS128Load16x4U) \
- V(RiscvS128Load32x2S) \
- V(RiscvS128Load32x2U) \
+ V(RiscvS128LoadSplat) \
+ V(RiscvS128Load64ExtendS) \
+ V(RiscvS128Load64ExtendU) \
V(RiscvS128LoadLane) \
V(RiscvS128StoreLane) \
V(RiscvRvvLd) \
@@ -387,6 +380,15 @@ namespace compiler {
V(RiscvI16x8UConvertI8x16High) \
V(RiscvI8x16SConvertI16x8) \
V(RiscvI8x16UConvertI16x8) \
+ V(RiscvVwmul) \
+ V(RiscvVwmulu) \
+ V(RiscvVmvSx) \
+ V(RiscvVcompress) \
+ V(RiscvVaddVv) \
+ V(RiscvVwadd) \
+ V(RiscvVwaddu) \
+ V(RiscvVrgather) \
+ V(RiscvVslidedown) \
V(RiscvWord64AtomicLoadUint64) \
V(RiscvWord64AtomicStoreWord64) \
V(RiscvWord64AtomicAddUint64) \
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
index 54d9a98663..7d4e31ce92 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
@@ -228,7 +228,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvI32x4UConvertI16x8Low:
case kRiscvI32x4Abs:
case kRiscvI32x4BitMask:
- case kRiscvI32x4DotI16x8S:
case kRiscvI8x16Add:
case kRiscvI8x16AddSatS:
case kRiscvI8x16AddSatU:
@@ -287,6 +286,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS128Xor:
case kRiscvS128Const:
case kRiscvS128Zero:
+ case kRiscvS128Load32Zero:
+ case kRiscvS128Load64Zero:
case kRiscvS128AllOnes:
case kRiscvS16x8InterleaveEven:
case kRiscvS16x8InterleaveOdd:
@@ -319,7 +320,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS8x4Reverse:
case kRiscvS8x8Reverse:
case kRiscvI8x16Shuffle:
- case kRiscvI8x16Swizzle:
+ case kRiscvVwmul:
+ case kRiscvVwmulu:
+ case kRiscvVmvSx:
+ case kRiscvVcompress:
+ case kRiscvVaddVv:
+ case kRiscvVwadd:
+ case kRiscvVwaddu:
+ case kRiscvVrgather:
+ case kRiscvVslidedown:
case kRiscvSar32:
case kRiscvSignExtendByte:
case kRiscvSignExtendShort:
@@ -361,16 +370,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvUlw:
case kRiscvUlwu:
case kRiscvULoadFloat:
- case kRiscvS128Load8Splat:
- case kRiscvS128Load16Splat:
- case kRiscvS128Load32Splat:
- case kRiscvS128Load64Splat:
- case kRiscvS128Load8x8S:
- case kRiscvS128Load8x8U:
- case kRiscvS128Load16x4S:
- case kRiscvS128Load16x4U:
- case kRiscvS128Load32x2S:
- case kRiscvS128Load32x2U:
+ case kRiscvS128LoadSplat:
+ case kRiscvS128Load64ExtendU:
+ case kRiscvS128Load64ExtendS:
case kRiscvS128LoadLane:
case kRiscvWord64AtomicLoadUint64:
case kRiscvLoadDecompressTaggedSigned:
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
index 03f849827f..6ec4df95c2 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -389,50 +389,107 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
}
}
-void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); }
+void EmitS128Load(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, VSew sew, Vlmul lmul) {
+ RiscvOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(index), g.UseImmediate(sew),
+ g.UseImmediate(lmul));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0),
+ g.UseImmediate(sew), g.UseImmediate(lmul));
+ }
+}
+
+void InstructionSelector::VisitStoreLane(Node* node) {
+ StoreLaneParameters params = StoreLaneParametersOf(node->op());
+ LoadStoreLaneParams f(params.rep, params.laneidx);
+ InstructionCode opcode = kRiscvS128StoreLane;
+ opcode |= MiscField::encode(f.sz);
-void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); }
+ RiscvOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kRiscvAdd64, addr_reg, g.UseRegister(base), g.UseRegister(index));
+ InstructionOperand inputs[4] = {
+ g.UseRegister(node->InputAt(2)),
+ g.UseImmediate(f.laneidx),
+ addr_reg,
+ g.TempImmediate(0),
+ };
+ opcode |= AddressingModeField::encode(kMode_MRI);
+ Emit(opcode, 0, nullptr, 4, inputs);
+}
+void InstructionSelector::VisitLoadLane(Node* node) {
+ LoadLaneParameters params = LoadLaneParametersOf(node->op());
+ LoadStoreLaneParams f(params.rep.representation(), params.laneidx);
+ InstructionCode opcode = kRiscvS128LoadLane;
+ opcode |= MiscField::encode(f.sz);
+
+ RiscvOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kRiscvAdd64, addr_reg, g.UseRegister(base), g.UseRegister(index));
+ opcode |= AddressingModeField::encode(kMode_MRI);
+ Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(2)),
+ g.UseImmediate(params.laneidx), addr_reg, g.TempImmediate(0));
+}
void InstructionSelector::VisitLoadTransform(Node* node) {
LoadTransformParameters params = LoadTransformParametersOf(node->op());
- InstructionCode opcode = kArchNop;
switch (params.transformation) {
case LoadTransformation::kS128Load8Splat:
- opcode = kRiscvS128Load8Splat;
+ EmitS128Load(this, node, kRiscvS128LoadSplat, E8, m1);
break;
case LoadTransformation::kS128Load16Splat:
- opcode = kRiscvS128Load16Splat;
+ EmitS128Load(this, node, kRiscvS128LoadSplat, E16, m1);
break;
case LoadTransformation::kS128Load32Splat:
- opcode = kRiscvS128Load32Splat;
+ EmitS128Load(this, node, kRiscvS128LoadSplat, E32, m1);
break;
case LoadTransformation::kS128Load64Splat:
- opcode = kRiscvS128Load64Splat;
+ EmitS128Load(this, node, kRiscvS128LoadSplat, E64, m1);
break;
case LoadTransformation::kS128Load8x8S:
- opcode = kRiscvS128Load8x8S;
+ EmitS128Load(this, node, kRiscvS128Load64ExtendS, E16, m1);
break;
case LoadTransformation::kS128Load8x8U:
- opcode = kRiscvS128Load8x8U;
+ EmitS128Load(this, node, kRiscvS128Load64ExtendU, E16, m1);
break;
case LoadTransformation::kS128Load16x4S:
- opcode = kRiscvS128Load16x4S;
+ EmitS128Load(this, node, kRiscvS128Load64ExtendS, E32, m1);
break;
case LoadTransformation::kS128Load16x4U:
- opcode = kRiscvS128Load16x4U;
+ EmitS128Load(this, node, kRiscvS128Load64ExtendU, E32, m1);
break;
case LoadTransformation::kS128Load32x2S:
- opcode = kRiscvS128Load32x2S;
+ EmitS128Load(this, node, kRiscvS128Load64ExtendS, E64, m1);
break;
case LoadTransformation::kS128Load32x2U:
- opcode = kRiscvS128Load32x2U;
+ EmitS128Load(this, node, kRiscvS128Load64ExtendU, E64, m1);
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ EmitS128Load(this, node, kRiscvS128Load32Zero, E32, m1);
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ EmitS128Load(this, node, kRiscvS128Load64Zero, E64, m1);
break;
default:
UNIMPLEMENTED();
}
-
- EmitLoad(this, node, opcode);
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -913,19 +970,55 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
}
void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8S(Node* node) {
- UNIMPLEMENTED();
+ RiscvOperandGenerator g(this);
+ InstructionOperand src1 = g.TempSimd128Register();
+ InstructionOperand src2 = g.TempSimd128Register();
+ InstructionOperand src = g.UseUniqueRegister(node->InputAt(0));
+ Emit(kRiscvVrgather, src1, src, g.UseImmediate64(0x0006000400020000),
+ g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(m1)));
+ Emit(kRiscvVrgather, src2, src, g.UseImmediate64(0x0007000500030001),
+ g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(m1)));
+ Emit(kRiscvVwadd, g.DefineAsRegister(node), src1, src2,
+ g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(mf2)));
}
void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8U(Node* node) {
- UNIMPLEMENTED();
+ RiscvOperandGenerator g(this);
+ InstructionOperand src1 = g.TempSimd128Register();
+ InstructionOperand src2 = g.TempSimd128Register();
+ InstructionOperand src = g.UseUniqueRegister(node->InputAt(0));
+ Emit(kRiscvVrgather, src1, src, g.UseImmediate64(0x0006000400020000),
+ g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(m1)));
+ Emit(kRiscvVrgather, src2, src, g.UseImmediate64(0x0007000500030001),
+ g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(m1)));
+ Emit(kRiscvVwaddu, g.DefineAsRegister(node), src1, src2,
+ g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(mf2)));
}
void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16S(Node* node) {
- UNIMPLEMENTED();
+ RiscvOperandGenerator g(this);
+ InstructionOperand src1 = g.TempSimd128Register();
+ InstructionOperand src2 = g.TempSimd128Register();
+ InstructionOperand src = g.UseUniqueRegister(node->InputAt(0));
+ Emit(kRiscvVrgather, src1, src, g.UseImmediate64(0x0E0C0A0806040200),
+ g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(m1)));
+ Emit(kRiscvVrgather, src2, src, g.UseImmediate64(0x0F0D0B0907050301),
+ g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(m1)));
+ Emit(kRiscvVwadd, g.DefineAsRegister(node), src1, src2,
+ g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(mf2)));
}
void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
- UNIMPLEMENTED();
+ RiscvOperandGenerator g(this);
+ InstructionOperand src1 = g.TempSimd128Register();
+ InstructionOperand src2 = g.TempSimd128Register();
+ InstructionOperand src = g.UseUniqueRegister(node->InputAt(0));
+ Emit(kRiscvVrgather, src1, src, g.UseImmediate64(0x0E0C0A0806040200),
+ g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(m1)));
+ Emit(kRiscvVrgather, src2, src, g.UseImmediate64(0x0F0D0B0907050301),
+ g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(m1)));
+ Emit(kRiscvVwaddu, g.DefineAsRegister(node), src1, src2,
+ g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(mf2)));
}
void InstructionSelector::VisitInt32MulHigh(Node* node) {
@@ -941,7 +1034,7 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
Int64BinopMatcher m(node);
// TODO(dusmil): Add optimization for shifts larger than 32.
if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
- uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ uint64_t value = static_cast<uint64_t>(m.right().ResolvedValue());
if (base::bits::IsPowerOfTwo(value)) {
Emit(kRiscvShl64 | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
@@ -2706,10 +2799,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2Neg, kRiscvI64x2Neg) \
V(I64x2Abs, kRiscvI64x2Abs) \
V(I64x2BitMask, kRiscvI64x2BitMask) \
- V(I64x2Eq, kRiscvI64x2Eq) \
- V(I64x2Ne, kRiscvI64x2Ne) \
- V(I64x2GtS, kRiscvI64x2GtS) \
- V(I64x2GeS, kRiscvI64x2GeS) \
V(F32x4SConvertI32x4, kRiscvF32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kRiscvF32x4UConvertI32x4) \
V(F32x4Abs, kRiscvF32x4Abs) \
@@ -2780,6 +2869,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Ne, kRiscvF64x2Ne) \
V(F64x2Lt, kRiscvF64x2Lt) \
V(F64x2Le, kRiscvF64x2Le) \
+ V(I64x2Eq, kRiscvI64x2Eq) \
+ V(I64x2Ne, kRiscvI64x2Ne) \
+ V(I64x2GtS, kRiscvI64x2GtS) \
+ V(I64x2GeS, kRiscvI64x2GeS) \
V(I64x2Add, kRiscvI64x2Add) \
V(I64x2Sub, kRiscvI64x2Sub) \
V(I64x2Mul, kRiscvI64x2Mul) \
@@ -2806,7 +2899,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4GeS, kRiscvI32x4GeS) \
V(I32x4GtU, kRiscvI32x4GtU) \
V(I32x4GeU, kRiscvI32x4GeU) \
- V(I32x4DotI16x8S, kRiscvI32x4DotI16x8S) \
V(I16x8Add, kRiscvI16x8Add) \
V(I16x8AddSatS, kRiscvI16x8AddSatS) \
V(I16x8AddSatU, kRiscvI16x8AddSatU) \
@@ -2932,6 +3024,23 @@ void InstructionSelector::VisitS128Select(Node* node) {
VisitRRRR(this, kRiscvS128Select, node);
}
+void InstructionSelector::VisitI32x4DotI16x8S(Node* node) {
+ RiscvOperandGenerator g(this);
+ InstructionOperand temp = g.TempFpRegister(v14);
+ InstructionOperand temp1 = g.TempFpRegister(v10);
+ InstructionOperand temp2 = g.TempFpRegister(v18);
+ InstructionOperand dst = g.DefineAsRegister(node);
+ this->Emit(kRiscvVwmul, temp, g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseImmediate(E16),
+ g.UseImmediate(m1));
+ this->Emit(kRiscvVcompress, temp2, temp, g.UseImmediate(0b01010101),
+ g.UseImmediate(E32), g.UseImmediate(m2));
+ this->Emit(kRiscvVcompress, temp1, temp, g.UseImmediate(0b10101010),
+ g.UseImmediate(E32), g.UseImmediate(m2));
+ this->Emit(kRiscvVaddVv, dst, temp1, temp2, g.UseImmediate(E32),
+ g.UseImmediate(m1));
+}
+
namespace {
struct ShuffleEntry {
@@ -3050,9 +3159,10 @@ void InstructionSelector::VisitI8x16Swizzle(Node* node) {
InstructionOperand temps[] = {g.TempSimd128Register()};
// We don't want input 0 or input 1 to be the same as output, since we will
// modify output before do the calculation.
- Emit(kRiscvI8x16Swizzle, g.DefineAsRegister(node),
+ Emit(kRiscvVrgather, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+ g.UseUniqueRegister(node->InputAt(1)), g.UseImmediate(E8),
+ g.UseImmediate(m1), arraysize(temps), temps);
}
void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
@@ -3101,20 +3211,55 @@ void InstructionSelector::VisitF64x2Pmax(Node* node) {
VisitUniqueRRR(this, kRiscvF64x2Pmax, node);
}
-#define VISIT_EXT_MUL(OPCODE1, OPCODE2) \
- void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2(Node* node) { \
- UNREACHABLE(); \
- } \
- void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2(Node* node) { \
- UNREACHABLE(); \
- }
-
-VISIT_EXT_MUL(I64x2, I32x4S)
-VISIT_EXT_MUL(I64x2, I32x4U)
-VISIT_EXT_MUL(I32x4, I16x8S)
-VISIT_EXT_MUL(I32x4, I16x8U)
-VISIT_EXT_MUL(I16x8, I8x16S)
-VISIT_EXT_MUL(I16x8, I8x16U)
+#define VISIT_EXT_MUL(OPCODE1, OPCODE2, TYPE) \
+ void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2##S( \
+ Node* node) { \
+ RiscvOperandGenerator g(this); \
+ Emit(kRiscvVwmul, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), g.UseImmediate(E##TYPE), \
+ g.UseImmediate(mf2)); \
+ } \
+ void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2##S( \
+ Node* node) { \
+ RiscvOperandGenerator g(this); \
+ InstructionOperand t1 = g.TempFpRegister(v10); \
+ Emit(kRiscvVslidedown, t1, g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
+ g.UseImmediate(m1)); \
+ InstructionOperand t2 = g.TempFpRegister(v9); \
+ Emit(kRiscvVslidedown, t2, g.UseUniqueRegister(node->InputAt(1)), \
+ g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
+ g.UseImmediate(m1)); \
+ Emit(kRiscvVwmul, g.DefineAsRegister(node), t1, t2, \
+ g.UseImmediate(E##TYPE), g.UseImmediate(mf2)); \
+ } \
+ void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2##U( \
+ Node* node) { \
+ RiscvOperandGenerator g(this); \
+ Emit(kRiscvVwmulu, g.DefineAsRegister(node), \
+ g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), g.UseImmediate(E##TYPE), \
+ g.UseImmediate(mf2)); \
+ } \
+ void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2##U( \
+ Node* node) { \
+ RiscvOperandGenerator g(this); \
+ InstructionOperand t1 = g.TempFpRegister(v10); \
+ Emit(kRiscvVslidedown, t1, g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
+ g.UseImmediate(m1)); \
+ InstructionOperand t2 = g.TempFpRegister(v9); \
+ Emit(kRiscvVslidedown, t2, g.UseUniqueRegister(node->InputAt(1)), \
+ g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
+ g.UseImmediate(m1)); \
+ Emit(kRiscvVwmulu, g.DefineAsRegister(node), t1, t2, \
+ g.UseImmediate(E##TYPE), g.UseImmediate(mf2)); \
+ }
+
+VISIT_EXT_MUL(I64x2, I32x4, 32)
+VISIT_EXT_MUL(I32x4, I16x8, 16)
+VISIT_EXT_MUL(I16x8, I8x16, 8)
#undef VISIT_EXT_MUL
void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 93f2aa8e2a..7f478fa120 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -734,6 +734,14 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ asm_instr(value, operand); \
} while (0)
+static inline bool is_wasm_on_be(bool IsWasm) {
+#if V8_TARGET_BIG_ENDIAN
+ return IsWasm;
+#else
+ return false;
+#endif
+}
+
#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(load_and_ext) \
do { \
Register old_value = i.InputRegister(0); \
@@ -750,111 +758,183 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ load_and_ext(output, output); \
} while (false)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext) \
- do { \
- Register old_value = i.InputRegister(0); \
- Register new_value = i.InputRegister(1); \
- Register output = i.OutputRegister(); \
- Register addr = kScratchReg; \
- Register temp0 = r0; \
- Register temp1 = r1; \
- size_t index = 2; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode, &index); \
- __ lay(addr, op); \
- __ AtomicCmpExchangeU16(addr, output, old_value, new_value, temp0, temp1); \
- __ load_and_ext(output, output); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext) \
+ do { \
+ Register old_value = i.InputRegister(0); \
+ Register new_value = i.InputRegister(1); \
+ Register output = i.OutputRegister(); \
+ Register addr = kScratchReg; \
+ Register temp0 = r0; \
+ Register temp1 = r1; \
+ size_t index = 2; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode, &index); \
+ __ lay(addr, op); \
+ if (is_wasm_on_be(info()->IsWasm())) { \
+ Register temp2 = \
+ GetRegisterThatIsNotOneOf(output, old_value, new_value); \
+ Register temp3 = \
+ GetRegisterThatIsNotOneOf(output, old_value, new_value, temp2); \
+ __ Push(temp2, temp3); \
+ __ lrvr(temp2, old_value); \
+ __ lrvr(temp3, new_value); \
+ __ ShiftRightU32(temp2, temp2, Operand(16)); \
+ __ ShiftRightU32(temp3, temp3, Operand(16)); \
+ __ AtomicCmpExchangeU16(addr, output, temp2, temp3, temp0, temp1); \
+ __ lrvr(output, output); \
+ __ ShiftRightU32(output, output, Operand(16)); \
+ __ Pop(temp2, temp3); \
+ } else { \
+ __ AtomicCmpExchangeU16(addr, output, old_value, new_value, temp0, \
+ temp1); \
+ } \
+ __ load_and_ext(output, output); \
} while (false)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD() \
- do { \
- Register new_val = i.InputRegister(1); \
- Register output = i.OutputRegister(); \
- Register addr = kScratchReg; \
- size_t index = 2; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode, &index); \
- __ lay(addr, op); \
- __ CmpAndSwap(output, new_val, MemOperand(addr)); \
- __ LoadU32(output, output); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD() \
+ do { \
+ Register new_val = i.InputRegister(1); \
+ Register output = i.OutputRegister(); \
+ Register addr = kScratchReg; \
+ size_t index = 2; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode, &index); \
+ __ lay(addr, op); \
+ if (is_wasm_on_be(info()->IsWasm())) { \
+ __ lrvr(r0, output); \
+ __ lrvr(r1, new_val); \
+ __ CmpAndSwap(r0, r1, MemOperand(addr)); \
+ __ lrvr(output, r0); \
+ } else { \
+ __ CmpAndSwap(output, new_val, MemOperand(addr)); \
+ } \
+ __ LoadU32(output, output); \
} while (false)
-#define ASSEMBLE_ATOMIC_BINOP_WORD(load_and_op) \
- do { \
- Register value = i.InputRegister(2); \
- Register result = i.OutputRegister(0); \
- Register addr = r1; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode); \
- __ lay(addr, op); \
- __ load_and_op(result, value, MemOperand(addr)); \
- __ LoadU32(result, result); \
+#define ASSEMBLE_ATOMIC_BINOP_WORD(load_and_op, op) \
+ do { \
+ Register value = i.InputRegister(2); \
+ Register result = i.OutputRegister(0); \
+ Register addr = r1; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode); \
+ __ lay(addr, op); \
+ if (is_wasm_on_be(info()->IsWasm())) { \
+ Label do_cs; \
+ __ bind(&do_cs); \
+ __ LoadU32(r0, MemOperand(addr)); \
+ __ lrvr(ip, r0); \
+ __ op(ip, ip, value); \
+ __ lrvr(ip, ip); \
+ __ CmpAndSwap(r0, ip, MemOperand(addr)); \
+ __ bne(&do_cs, Label::kNear); \
+ __ lrvr(result, r0); \
+ } else { \
+ __ load_and_op(result, value, MemOperand(addr)); \
+ } \
+ __ LoadU32(result, result); \
} while (false)
-#define ASSEMBLE_ATOMIC_BINOP_WORD64(load_and_op) \
- do { \
- Register value = i.InputRegister(2); \
- Register result = i.OutputRegister(0); \
- Register addr = r1; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode); \
- __ lay(addr, op); \
- __ load_and_op(result, value, MemOperand(addr)); \
+#define ASSEMBLE_ATOMIC_BINOP_WORD64(load_and_op, op) \
+ do { \
+ Register value = i.InputRegister(2); \
+ Register result = i.OutputRegister(0); \
+ Register addr = r1; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode); \
+ __ lay(addr, op); \
+ if (is_wasm_on_be(info()->IsWasm())) { \
+ Label do_cs; \
+ __ bind(&do_cs); \
+ __ LoadU64(r0, MemOperand(addr)); \
+ __ lrvgr(ip, r0); \
+ __ op(ip, ip, value); \
+ __ lrvgr(ip, ip); \
+ __ CmpAndSwap64(r0, ip, MemOperand(addr)); \
+ __ bne(&do_cs, Label::kNear); \
+ __ lrvgr(result, r0); \
+ break; \
+ } \
+ __ load_and_op(result, value, MemOperand(addr)); \
} while (false)
-#define ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end) \
- do { \
- Label do_cs; \
- __ LoadU32(prev, MemOperand(addr, offset)); \
- __ bind(&do_cs); \
- __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
- Operand(static_cast<intptr_t>(shift_amount)), \
- true); \
- __ bin_inst(new_val, prev, temp); \
- __ lr(temp, prev); \
- __ RotateInsertSelectBits(temp, new_val, Operand(start), Operand(end), \
- Operand::Zero(), false); \
- __ CmpAndSwap(prev, temp, MemOperand(addr, offset)); \
- __ bne(&do_cs, Label::kNear); \
+#define ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, \
+ maybe_reverse_bytes) \
+ do { \
+ /* At the moment this is only true when dealing with 2-byte values.*/ \
+ bool reverse_bytes = \
+ maybe_reverse_bytes && is_wasm_on_be(info()->IsWasm()); \
+ USE(reverse_bytes); \
+ Label do_cs; \
+ __ LoadU32(prev, MemOperand(addr, offset)); \
+ __ bind(&do_cs); \
+ if (reverse_bytes) { \
+ Register temp2 = GetRegisterThatIsNotOneOf(value, result, prev); \
+ __ Push(temp2); \
+ __ lrvr(temp2, prev); \
+ __ RotateInsertSelectBits(temp2, temp2, Operand(start), Operand(end), \
+ Operand(static_cast<intptr_t>(shift_amount)), \
+ true); \
+ __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
+ Operand(static_cast<intptr_t>(shift_amount)), \
+ true); \
+ __ bin_inst(new_val, temp2, temp); \
+ __ lrvr(temp2, new_val); \
+ __ lr(temp, prev); \
+ __ RotateInsertSelectBits(temp, temp2, Operand(start), Operand(end), \
+ Operand(static_cast<intptr_t>(shift_amount)), \
+ false); \
+ __ Pop(temp2); \
+ } else { \
+ __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
+ Operand(static_cast<intptr_t>(shift_amount)), \
+ true); \
+ __ bin_inst(new_val, prev, temp); \
+ __ lr(temp, prev); \
+ __ RotateInsertSelectBits(temp, new_val, Operand(start), Operand(end), \
+ Operand::Zero(), false); \
+ } \
+ __ CmpAndSwap(prev, temp, MemOperand(addr, offset)); \
+ __ bne(&do_cs, Label::kNear); \
} while (false)
#ifdef V8_TARGET_BIG_ENDIAN
-#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
- { \
- constexpr int offset = -(2 * index); \
- constexpr int shift_amount = 16 - (index * 16); \
- constexpr int start = 48 - shift_amount; \
- constexpr int end = start + 15; \
- ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
- extract_result(); \
+#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
+ { \
+ constexpr int offset = -(2 * index); \
+ constexpr int shift_amount = 16 - (index * 16); \
+ constexpr int start = 48 - shift_amount; \
+ constexpr int end = start + 15; \
+ ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, true); \
+ extract_result(); \
}
-#define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
- { \
- constexpr int offset = -(index); \
- constexpr int shift_amount = 24 - (index * 8); \
- constexpr int start = 56 - shift_amount; \
- constexpr int end = start + 7; \
- ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
- extract_result(); \
+#define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
+ { \
+ constexpr int offset = -(index); \
+ constexpr int shift_amount = 24 - (index * 8); \
+ constexpr int start = 56 - shift_amount; \
+ constexpr int end = start + 7; \
+ ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, false); \
+ extract_result(); \
}
#else
-#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
- { \
- constexpr int offset = -(2 * index); \
- constexpr int shift_amount = index * 16; \
- constexpr int start = 48 - shift_amount; \
- constexpr int end = start + 15; \
- ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
- extract_result(); \
+#define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
+ { \
+ constexpr int offset = -(2 * index); \
+ constexpr int shift_amount = index * 16; \
+ constexpr int start = 48 - shift_amount; \
+ constexpr int end = start + 15; \
+ ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, false); \
+ extract_result(); \
}
-#define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
- { \
- constexpr int offset = -(index); \
- constexpr int shift_amount = index * 8; \
- constexpr int start = 56 - shift_amount; \
- constexpr int end = start + 7; \
- ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
- extract_result(); \
+#define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
+ { \
+ constexpr int offset = -(index); \
+ constexpr int shift_amount = index * 8; \
+ constexpr int start = 56 - shift_amount; \
+ constexpr int end = start + 7; \
+ ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end, false); \
+ extract_result(); \
}
#endif // V8_TARGET_BIG_ENDIAN
@@ -914,16 +994,23 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ bind(&done); \
} while (false)
-#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64() \
- do { \
- Register new_val = i.InputRegister(1); \
- Register output = i.OutputRegister(); \
- Register addr = kScratchReg; \
- size_t index = 2; \
- AddressingMode mode = kMode_None; \
- MemOperand op = i.MemoryOperand(&mode, &index); \
- __ lay(addr, op); \
- __ CmpAndSwap64(output, new_val, MemOperand(addr)); \
+#define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64() \
+ do { \
+ Register new_val = i.InputRegister(1); \
+ Register output = i.OutputRegister(); \
+ Register addr = kScratchReg; \
+ size_t index = 2; \
+ AddressingMode mode = kMode_None; \
+ MemOperand op = i.MemoryOperand(&mode, &index); \
+ __ lay(addr, op); \
+ if (is_wasm_on_be(info()->IsWasm())) { \
+ __ lrvgr(r0, output); \
+ __ lrvgr(r1, new_val); \
+ __ CmpAndSwap64(r0, r1, MemOperand(addr)); \
+ __ lrvgr(output, r0); \
+ } else { \
+ __ CmpAndSwap64(output, new_val, MemOperand(addr)); \
+ } \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -2308,13 +2395,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
+ bool reverse_bytes = is_wasm_on_be(info()->IsWasm());
__ la(r1, MemOperand(base, index));
- __ AtomicExchangeU16(r1, value, output, r0);
+ Register value_ = value;
+ if (reverse_bytes) {
+ value_ = ip;
+ __ lrvr(value_, value);
+ __ ShiftRightU32(value_, value_, Operand(16));
+ }
+ __ AtomicExchangeU16(r1, value_, output, r0);
if (opcode == kAtomicExchangeInt16) {
__ lghr(output, output);
} else {
__ llghr(output, output);
}
+ if (reverse_bytes) {
+ __ lrvr(output, output);
+ __ ShiftRightU32(output, output, Operand(16));
+ }
break;
}
case kAtomicExchangeWord32: {
@@ -2323,11 +2421,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
Label do_cs;
+ bool reverse_bytes = is_wasm_on_be(info()->IsWasm());
__ lay(r1, MemOperand(base, index));
+ Register value_ = value;
+ if (reverse_bytes) {
+ value_ = ip;
+ __ lrvr(value_, value);
+ }
__ LoadU32(output, MemOperand(r1));
__ bind(&do_cs);
- __ cs(output, value, MemOperand(r1));
+ __ cs(output, value_, MemOperand(r1));
__ bne(&do_cs, Label::kNear);
+ if (reverse_bytes) {
+ __ lrvr(output, output);
+ __ LoadU32(output, output);
+ }
break;
}
case kAtomicCompareExchangeInt8:
@@ -2366,6 +2474,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
__ srlk(result, prev, Operand(shift_right)); \
__ LoadS16(result, result); \
+ if (is_wasm_on_be(info()->IsWasm())) { \
+ __ lrvr(result, result); \
+ __ ShiftRightS32(result, result, Operand(16)); \
+ } \
}); \
break; \
case kAtomic##op##Uint16: \
@@ -2374,6 +2486,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ RotateInsertSelectBits(result, prev, Operand(48), Operand(63), \
Operand(static_cast<intptr_t>(rotate_left)), \
true); \
+ if (is_wasm_on_be(info()->IsWasm())) { \
+ __ lrvr(result, result); \
+ __ ShiftRightU32(result, result, Operand(16)); \
+ } \
}); \
break;
ATOMIC_BINOP_CASE(Add, AddS32)
@@ -2383,46 +2499,55 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
case kAtomicAddWord32:
- ASSEMBLE_ATOMIC_BINOP_WORD(laa);
+ ASSEMBLE_ATOMIC_BINOP_WORD(laa, AddS32);
break;
case kAtomicSubWord32:
- ASSEMBLE_ATOMIC_BINOP_WORD(LoadAndSub32);
+ ASSEMBLE_ATOMIC_BINOP_WORD(LoadAndSub32, SubS32);
break;
case kAtomicAndWord32:
- ASSEMBLE_ATOMIC_BINOP_WORD(lan);
+ ASSEMBLE_ATOMIC_BINOP_WORD(lan, AndP);
break;
case kAtomicOrWord32:
- ASSEMBLE_ATOMIC_BINOP_WORD(lao);
+ ASSEMBLE_ATOMIC_BINOP_WORD(lao, OrP);
break;
case kAtomicXorWord32:
- ASSEMBLE_ATOMIC_BINOP_WORD(lax);
+ ASSEMBLE_ATOMIC_BINOP_WORD(lax, XorP);
break;
case kS390_Word64AtomicAddUint64:
- ASSEMBLE_ATOMIC_BINOP_WORD64(laag);
+ ASSEMBLE_ATOMIC_BINOP_WORD64(laag, AddS64);
break;
case kS390_Word64AtomicSubUint64:
- ASSEMBLE_ATOMIC_BINOP_WORD64(LoadAndSub64);
+ ASSEMBLE_ATOMIC_BINOP_WORD64(LoadAndSub64, SubS64);
break;
case kS390_Word64AtomicAndUint64:
- ASSEMBLE_ATOMIC_BINOP_WORD64(lang);
+ ASSEMBLE_ATOMIC_BINOP_WORD64(lang, AndP);
break;
case kS390_Word64AtomicOrUint64:
- ASSEMBLE_ATOMIC_BINOP_WORD64(laog);
+ ASSEMBLE_ATOMIC_BINOP_WORD64(laog, OrP);
break;
case kS390_Word64AtomicXorUint64:
- ASSEMBLE_ATOMIC_BINOP_WORD64(laxg);
+ ASSEMBLE_ATOMIC_BINOP_WORD64(laxg, XorP);
break;
case kS390_Word64AtomicExchangeUint64: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
+ bool reverse_bytes = is_wasm_on_be(info()->IsWasm());
Label do_cs;
+ Register value_ = value;
__ la(r1, MemOperand(base, index));
+ if (reverse_bytes) {
+ value_ = ip;
+ __ lrvgr(value_, value);
+ }
__ lg(output, MemOperand(r1));
__ bind(&do_cs);
- __ csg(output, value, MemOperand(r1));
+ __ csg(output, value_, MemOperand(r1));
__ bne(&do_cs, Label::kNear);
+ if (reverse_bytes) {
+ __ lrvgr(output, output);
+ }
break;
}
case kS390_Word64AtomicCompareExchangeUint64:
@@ -3914,15 +4039,25 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// max(argc_reg, parameter_slots-1), and the receiver is added in
// DropArguments().
if (parameter_slots > 1) {
- const int parameter_slots_without_receiver = parameter_slots - 1;
- Label skip;
- __ CmpS64(argc_reg, Operand(parameter_slots_without_receiver));
- __ bgt(&skip);
- __ mov(argc_reg, Operand(parameter_slots_without_receiver));
- __ bind(&skip);
+ if (kJSArgcIncludesReceiver) {
+ Label skip;
+ __ CmpS64(argc_reg, Operand(parameter_slots));
+ __ bgt(&skip);
+ __ mov(argc_reg, Operand(parameter_slots));
+ __ bind(&skip);
+ } else {
+ const int parameter_slots_without_receiver = parameter_slots - 1;
+ Label skip;
+ __ CmpS64(argc_reg, Operand(parameter_slots_without_receiver));
+ __ bgt(&skip);
+ __ mov(argc_reg, Operand(parameter_slots_without_receiver));
+ __ bind(&skip);
+ }
}
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
__ Drop(parameter_slots + additional_count);
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index c043aa25a4..2c6e4ad671 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -298,12 +298,8 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
opcode = kX64Movq;
break;
case MachineRepresentation::kCagedPointer:
-#ifdef V8_CAGED_POINTERS
opcode = kX64MovqDecodeCagedPointer;
break;
-#else
- UNREACHABLE();
-#endif
case MachineRepresentation::kSimd128:
opcode = kX64Movdqu;
break;
@@ -341,11 +337,7 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
case MachineRepresentation::kWord64:
return kX64Movq;
case MachineRepresentation::kCagedPointer:
-#ifdef V8_CAGED_POINTERS
return kX64MovqEncodeCagedPointer;
-#else
- UNREACHABLE();
-#endif
case MachineRepresentation::kSimd128:
return kX64Movdqu;
case MachineRepresentation::kNone: // Fall through.
@@ -1960,7 +1952,7 @@ void InstructionSelector::EmitPrepareArguments(
stack_decrement = 0;
if (g.CanBeImmediate(input.node)) {
Emit(kX64Push, g.NoOutput(), decrement, g.UseImmediate(input.node));
- } else if (IsSupported(ATOM) ||
+ } else if (IsSupported(INTEL_ATOM) ||
sequence()->IsFP(GetVirtualRegister(input.node))) {
// TODO(titzer): X64Push cannot handle stack->stack double moves
// because there is no way to encode fixed double slots.
@@ -3726,16 +3718,17 @@ namespace {
void VisitRelaxedLaneSelect(InstructionSelector* selector, Node* node) {
X64OperandGenerator g(selector);
// pblendvb copies src2 when mask is set, opposite from Wasm semantics.
+ // node's inputs are: mask, lhs, rhs (determined in wasm-compiler.cc).
if (selector->IsSupported(AVX)) {
selector->Emit(
- kX64Pblendvb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2)));
+ kX64Pblendvb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(2)),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
} else {
// SSE4.1 pblendvb requires xmm0 to hold the mask as an implicit operand.
selector->Emit(kX64Pblendvb, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(2)),
g.UseRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(0)),
- g.UseFixed(node->InputAt(2), xmm0));
+ g.UseFixed(node->InputAt(0), xmm0));
}
}
} // namespace
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index 27720c80ed..ddd2ad807d 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -62,6 +62,8 @@ const char* CompilationDependencyKindToString(CompilationDependencyKind kind) {
return names[kind];
}
+class PendingDependencies;
+
} // namespace
class CompilationDependency : public ZoneObject {
@@ -70,25 +72,106 @@ class CompilationDependency : public ZoneObject {
virtual bool IsValid() const = 0;
virtual void PrepareInstall() const {}
- virtual void Install(Handle<Code> code) const = 0;
+ virtual void Install(PendingDependencies* deps) const = 0;
-#ifdef DEBUG
#define V(Name) \
bool Is##Name() const { return kind == k##Name; } \
V8_ALLOW_UNUSED const Name##Dependency* As##Name() const;
DEPENDENCY_LIST(V)
#undef V
-#endif
const char* ToString() const {
return CompilationDependencyKindToString(kind);
}
const CompilationDependencyKind kind;
+
+ private:
+ virtual size_t Hash() const = 0;
+ virtual bool Equals(const CompilationDependency* that) const = 0;
+ friend struct CompilationDependencies::CompilationDependencyHash;
+ friend struct CompilationDependencies::CompilationDependencyEqual;
};
+size_t CompilationDependencies::CompilationDependencyHash::operator()(
+ const CompilationDependency* dep) const {
+ return base::hash_combine(dep->kind, dep->Hash());
+}
+
+bool CompilationDependencies::CompilationDependencyEqual::operator()(
+ const CompilationDependency* lhs, const CompilationDependency* rhs) const {
+ return lhs->kind == rhs->kind && lhs->Equals(rhs);
+}
+
namespace {
+// Dependencies can only be fully deduplicated immediately prior to
+// installation (because PrepareInstall may create the object on which the dep
+// will be installed). We gather and dedupe deps in this class, and install
+// them from here.
+class PendingDependencies final {
+ public:
+ explicit PendingDependencies(Zone* zone) : deps_(zone) {}
+
+ void Register(Handle<HeapObject> object,
+ DependentCode::DependencyGroup group) {
+ deps_[object] |= group;
+ }
+
+ void InstallAll(Isolate* isolate, Handle<Code> code) {
+ if (V8_UNLIKELY(FLAG_predictable)) {
+ InstallAllPredictable(isolate, code);
+ return;
+ }
+
+ // With deduplication done we no longer rely on the object address for
+ // hashing.
+ AllowGarbageCollection yes_gc;
+ for (const auto& o_and_g : deps_) {
+ DependentCode::InstallDependency(isolate, code, o_and_g.first,
+ o_and_g.second);
+ }
+ }
+
+ void InstallAllPredictable(Isolate* isolate, Handle<Code> code) {
+ CHECK(FLAG_predictable);
+ // First, guarantee predictable iteration order.
+ using HandleAndGroup =
+ std::pair<Handle<HeapObject>, DependentCode::DependencyGroups>;
+ std::vector<HandleAndGroup> entries(deps_.begin(), deps_.end());
+
+ std::sort(entries.begin(), entries.end(),
+ [](const HandleAndGroup& lhs, const HandleAndGroup& rhs) {
+ return lhs.first->ptr() < rhs.first->ptr();
+ });
+
+ // With deduplication done we no longer rely on the object address for
+ // hashing.
+ AllowGarbageCollection yes_gc;
+ for (const auto& o_and_g : entries) {
+ DependentCode::InstallDependency(isolate, code, o_and_g.first,
+ o_and_g.second);
+ }
+ }
+
+ private:
+ struct HandleHash {
+ size_t operator()(const Handle<HeapObject>& x) const {
+ return static_cast<size_t>(x->ptr());
+ }
+ };
+ struct HandleEqual {
+ bool operator()(const Handle<HeapObject>& lhs,
+ const Handle<HeapObject>& rhs) const {
+ return lhs.is_identical_to(rhs);
+ }
+ };
+ ZoneUnorderedMap<Handle<HeapObject>, DependentCode::DependencyGroups,
+ HandleHash, HandleEqual>
+ deps_;
+ const DisallowGarbageCollection no_gc_;
+};
+
class InitialMapDependency final : public CompilationDependency {
public:
InitialMapDependency(JSHeapBroker* broker, const JSFunctionRef& function,
@@ -103,16 +186,26 @@ class InitialMapDependency final : public CompilationDependency {
function->initial_map() == *initial_map_.object();
}
- void Install(Handle<Code> code) const override {
+ void Install(PendingDependencies* deps) const override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(function_.isolate(), code,
- initial_map_.object(),
- DependentCode::kInitialMapChangedGroup);
+ deps->Register(initial_map_.object(),
+ DependentCode::kInitialMapChangedGroup);
}
private:
- JSFunctionRef function_;
- MapRef initial_map_;
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(function_), h(initial_map_));
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const InitialMapDependency* const zat = that->AsInitialMap();
+ return function_.equals(zat->function_) &&
+ initial_map_.equals(zat->initial_map_);
+ }
+
+ const JSFunctionRef function_;
+ const MapRef initial_map_;
};
class PrototypePropertyDependency final : public CompilationDependency {
@@ -143,18 +236,28 @@ class PrototypePropertyDependency final : public CompilationDependency {
if (!function->has_initial_map()) JSFunction::EnsureHasInitialMap(function);
}
- void Install(Handle<Code> code) const override {
+ void Install(PendingDependencies* deps) const override {
SLOW_DCHECK(IsValid());
Handle<JSFunction> function = function_.object();
CHECK(function->has_initial_map());
Handle<Map> initial_map(function->initial_map(), function_.isolate());
- DependentCode::InstallDependency(function_.isolate(), code, initial_map,
- DependentCode::kInitialMapChangedGroup);
+ deps->Register(initial_map, DependentCode::kInitialMapChangedGroup);
}
private:
- JSFunctionRef function_;
- ObjectRef prototype_;
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(function_), h(prototype_));
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const PrototypePropertyDependency* const zat = that->AsPrototypeProperty();
+ return function_.equals(zat->function_) &&
+ prototype_.equals(zat->prototype_);
+ }
+
+ const JSFunctionRef function_;
+ const ObjectRef prototype_;
};
class StableMapDependency final : public CompilationDependency {
@@ -168,15 +271,23 @@ class StableMapDependency final : public CompilationDependency {
// heap state modifications.
return !map_.object()->is_dictionary_map() && map_.object()->is_stable();
}
-
- void Install(Handle<Code> code) const override {
+ void Install(PendingDependencies* deps) const override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(map_.isolate(), code, map_.object(),
- DependentCode::kPrototypeCheckGroup);
+ deps->Register(map_.object(), DependentCode::kPrototypeCheckGroup);
}
private:
- MapRef map_;
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(map_));
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const StableMapDependency* const zat = that->AsStableMap();
+ return map_.equals(zat->map_);
+ }
+
+ const MapRef map_;
};
class ConstantInDictionaryPrototypeChainDependency final
@@ -197,7 +308,7 @@ class ConstantInDictionaryPrototypeChainDependency final
// starting at |receiver_map_|.
bool IsValid() const override { return !GetHolderIfValid().is_null(); }
- void Install(Handle<Code> code) const override {
+ void Install(PendingDependencies* deps) const override {
SLOW_DCHECK(IsValid());
Isolate* isolate = receiver_map_.isolate();
Handle<JSObject> holder = GetHolderIfValid().ToHandleChecked();
@@ -206,14 +317,12 @@ class ConstantInDictionaryPrototypeChainDependency final
while (map->prototype() != *holder) {
map = handle(map->prototype().map(), isolate);
DCHECK(map->IsJSObjectMap()); // Due to IsValid holding.
- DependentCode::InstallDependency(isolate, code, map,
- DependentCode::kPrototypeCheckGroup);
+ deps->Register(map, DependentCode::kPrototypeCheckGroup);
}
DCHECK(map->prototype().map().IsJSObjectMap()); // Due to IsValid holding.
- DependentCode::InstallDependency(isolate, code,
- handle(map->prototype().map(), isolate),
- DependentCode::kPrototypeCheckGroup);
+ deps->Register(handle(map->prototype().map(), isolate),
+ DependentCode::kPrototypeCheckGroup);
}
private:
@@ -296,10 +405,24 @@ class ConstantInDictionaryPrototypeChainDependency final
return MaybeHandle<JSObject>();
}
- MapRef receiver_map_;
- NameRef property_name_;
- ObjectRef constant_;
- PropertyKind kind_;
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(receiver_map_), h(property_name_), h(constant_),
+ static_cast<int>(kind_));
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const ConstantInDictionaryPrototypeChainDependency* const zat =
+ that->AsConstantInDictionaryPrototypeChain();
+ return receiver_map_.equals(zat->receiver_map_) &&
+ property_name_.equals(zat->property_name_) &&
+ constant_.equals(zat->constant_) && kind_ == zat->kind_;
+ }
+
+ const MapRef receiver_map_;
+ const NameRef property_name_;
+ const ObjectRef constant_;
+ const PropertyKind kind_;
};
class OwnConstantDataPropertyDependency final : public CompilationDependency {
@@ -346,9 +469,23 @@ class OwnConstantDataPropertyDependency final : public CompilationDependency {
return true;
}
- void Install(Handle<Code> code) const override {}
+ void Install(PendingDependencies* deps) const override {}
private:
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(holder_), h(map_), representation_.kind(),
+ index_.bit_field(), h(value_));
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const OwnConstantDataPropertyDependency* const zat =
+ that->AsOwnConstantDataProperty();
+ return holder_.equals(zat->holder_) && map_.equals(zat->map_) &&
+ representation_.Equals(zat->representation_) &&
+ index_ == zat->index_ && value_.equals(zat->value_);
+ }
+
JSHeapBroker* const broker_;
JSObjectRef const holder_;
MapRef const map_;
@@ -403,9 +540,22 @@ class OwnConstantDictionaryPropertyDependency final
return true;
}
- void Install(Handle<Code> code) const override {}
+ void Install(PendingDependencies* deps) const override {}
private:
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(holder_), h(map_), index_.raw_value(),
+ h(value_));
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const OwnConstantDictionaryPropertyDependency* const zat =
+ that->AsOwnConstantDictionaryProperty();
+ return holder_.equals(zat->holder_) && map_.equals(zat->map_) &&
+ index_ == zat->index_ && value_.equals(zat->value_);
+ }
+
JSHeapBroker* const broker_;
JSObjectRef const holder_;
MapRef const map_;
@@ -422,9 +572,20 @@ class ConsistentJSFunctionViewDependency final : public CompilationDependency {
return function_.IsConsistentWithHeapState();
}
- void Install(Handle<Code> code) const override {}
+ void Install(PendingDependencies* deps) const override {}
private:
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(function_));
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const ConsistentJSFunctionViewDependency* const zat =
+ that->AsConsistentJSFunctionView();
+ return function_.equals(zat->function_);
+ }
+
const JSFunctionRef function_;
};
@@ -437,14 +598,23 @@ class TransitionDependency final : public CompilationDependency {
bool IsValid() const override { return !map_.object()->is_deprecated(); }
- void Install(Handle<Code> code) const override {
+ void Install(PendingDependencies* deps) const override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(map_.isolate(), code, map_.object(),
- DependentCode::kTransitionGroup);
+ deps->Register(map_.object(), DependentCode::kTransitionGroup);
}
private:
- MapRef map_;
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(map_));
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const TransitionDependency* const zat = that->AsTransition();
+ return map_.equals(zat->map_);
+ }
+
+ const MapRef map_;
};
class PretenureModeDependency final : public CompilationDependency {
@@ -458,17 +628,25 @@ class PretenureModeDependency final : public CompilationDependency {
bool IsValid() const override {
return allocation_ == site_.object()->GetAllocationType();
}
-
- void Install(Handle<Code> code) const override {
+ void Install(PendingDependencies* deps) const override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(
- site_.isolate(), code, site_.object(),
- DependentCode::kAllocationSiteTenuringChangedGroup);
+ deps->Register(site_.object(),
+ DependentCode::kAllocationSiteTenuringChangedGroup);
}
private:
- AllocationSiteRef site_;
- AllocationType allocation_;
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(site_), allocation_);
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const PretenureModeDependency* const zat = that->AsPretenureMode();
+ return site_.equals(zat->site_) && allocation_ == zat->allocation_;
+ }
+
+ const AllocationSiteRef site_;
+ const AllocationType allocation_;
};
class FieldRepresentationDependency final : public CompilationDependency {
@@ -489,7 +667,7 @@ class FieldRepresentationDependency final : public CompilationDependency {
.representation());
}
- void Install(Handle<Code> code) const override {
+ void Install(PendingDependencies* deps) const override {
SLOW_DCHECK(IsValid());
Isolate* isolate = map_.isolate();
Handle<Map> owner(map_.object()->FindFieldOwner(isolate, descriptor_),
@@ -498,8 +676,7 @@ class FieldRepresentationDependency final : public CompilationDependency {
CHECK(representation_.Equals(owner->instance_descriptors(isolate)
.GetDetails(descriptor_)
.representation()));
- DependentCode::InstallDependency(isolate, code, owner,
- DependentCode::kFieldRepresentationGroup);
+ deps->Register(owner, DependentCode::kFieldRepresentationGroup);
}
bool DependsOn(const Handle<Map>& receiver_map) const {
@@ -507,9 +684,22 @@ class FieldRepresentationDependency final : public CompilationDependency {
}
private:
- MapRef map_;
- InternalIndex descriptor_;
- Representation representation_;
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(map_), descriptor_.as_int(),
+ representation_.kind());
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const FieldRepresentationDependency* const zat =
+ that->AsFieldRepresentation();
+ return map_.equals(zat->map_) && descriptor_ == zat->descriptor_ &&
+ representation_.Equals(zat->representation_);
+ }
+
+ const MapRef map_;
+ const InternalIndex descriptor_;
+ const Representation representation_;
};
class FieldTypeDependency final : public CompilationDependency {
@@ -529,7 +719,7 @@ class FieldTypeDependency final : public CompilationDependency {
.GetFieldType(descriptor_);
}
- void Install(Handle<Code> code) const override {
+ void Install(PendingDependencies* deps) const override {
SLOW_DCHECK(IsValid());
Isolate* isolate = map_.isolate();
Handle<Map> owner(map_.object()->FindFieldOwner(isolate, descriptor_),
@@ -537,14 +727,24 @@ class FieldTypeDependency final : public CompilationDependency {
CHECK(!owner->is_deprecated());
CHECK_EQ(*type_.object(),
owner->instance_descriptors(isolate).GetFieldType(descriptor_));
- DependentCode::InstallDependency(isolate, code, owner,
- DependentCode::kFieldTypeGroup);
+ deps->Register(owner, DependentCode::kFieldTypeGroup);
}
private:
- MapRef map_;
- InternalIndex descriptor_;
- ObjectRef type_;
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(map_), descriptor_.as_int(), h(type_));
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const FieldTypeDependency* const zat = that->AsFieldType();
+ return map_.equals(zat->map_) && descriptor_ == zat->descriptor_ &&
+ type_.equals(zat->type_);
+ }
+
+ const MapRef map_;
+ const InternalIndex descriptor_;
+ const ObjectRef type_;
};
class FieldConstnessDependency final : public CompilationDependency {
@@ -564,7 +764,7 @@ class FieldConstnessDependency final : public CompilationDependency {
.constness();
}
- void Install(Handle<Code> code) const override {
+ void Install(PendingDependencies* deps) const override {
SLOW_DCHECK(IsValid());
Isolate* isolate = map_.isolate();
Handle<Map> owner(map_.object()->FindFieldOwner(isolate, descriptor_),
@@ -573,13 +773,22 @@ class FieldConstnessDependency final : public CompilationDependency {
CHECK_EQ(PropertyConstness::kConst, owner->instance_descriptors(isolate)
.GetDetails(descriptor_)
.constness());
- DependentCode::InstallDependency(isolate, code, owner,
- DependentCode::kFieldConstGroup);
+ deps->Register(owner, DependentCode::kFieldConstGroup);
}
private:
- MapRef map_;
- InternalIndex descriptor_;
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(map_), descriptor_.as_int());
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const FieldConstnessDependency* const zat = that->AsFieldConstness();
+ return map_.equals(zat->map_) && descriptor_ == zat->descriptor_;
+ }
+
+ const MapRef map_;
+ const InternalIndex descriptor_;
};
class GlobalPropertyDependency final : public CompilationDependency {
@@ -604,17 +813,26 @@ class GlobalPropertyDependency final : public CompilationDependency {
return type_ == cell->property_details().cell_type() &&
read_only_ == cell->property_details().IsReadOnly();
}
-
- void Install(Handle<Code> code) const override {
+ void Install(PendingDependencies* deps) const override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(cell_.isolate(), code, cell_.object(),
- DependentCode::kPropertyCellChangedGroup);
+ deps->Register(cell_.object(), DependentCode::kPropertyCellChangedGroup);
}
private:
- PropertyCellRef cell_;
- PropertyCellType type_;
- bool read_only_;
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(cell_), static_cast<int>(type_), read_only_);
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const GlobalPropertyDependency* const zat = that->AsGlobalProperty();
+ return cell_.equals(zat->cell_) && type_ == zat->type_ &&
+ read_only_ == zat->read_only_;
+ }
+
+ const PropertyCellRef cell_;
+ const PropertyCellType type_;
+ const bool read_only_;
};
class ProtectorDependency final : public CompilationDependency {
@@ -626,15 +844,23 @@ class ProtectorDependency final : public CompilationDependency {
Handle<PropertyCell> cell = cell_.object();
return cell->value() == Smi::FromInt(Protectors::kProtectorValid);
}
-
- void Install(Handle<Code> code) const override {
+ void Install(PendingDependencies* deps) const override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(cell_.isolate(), code, cell_.object(),
- DependentCode::kPropertyCellChangedGroup);
+ deps->Register(cell_.object(), DependentCode::kPropertyCellChangedGroup);
}
private:
- PropertyCellRef cell_;
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(cell_));
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const ProtectorDependency* const zat = that->AsProtector();
+ return cell_.equals(zat->cell_);
+ }
+
+ const PropertyCellRef cell_;
};
class ElementsKindDependency final : public CompilationDependency {
@@ -652,17 +878,25 @@ class ElementsKindDependency final : public CompilationDependency {
: site->GetElementsKind();
return kind_ == kind;
}
-
- void Install(Handle<Code> code) const override {
+ void Install(PendingDependencies* deps) const override {
SLOW_DCHECK(IsValid());
- DependentCode::InstallDependency(
- site_.isolate(), code, site_.object(),
- DependentCode::kAllocationSiteTransitionChangedGroup);
+ deps->Register(site_.object(),
+ DependentCode::kAllocationSiteTransitionChangedGroup);
}
private:
- AllocationSiteRef site_;
- ElementsKind kind_;
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(site_), static_cast<int>(kind_));
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const ElementsKindDependency* const zat = that->AsElementsKind();
+ return site_.equals(zat->site_) && kind_ == zat->kind_;
+ }
+
+ const AllocationSiteRef site_;
+ const ElementsKind kind_;
};
// Only valid if the holder can use direct reads, since validation uses
@@ -686,12 +920,21 @@ class OwnConstantElementDependency final : public CompilationDependency {
return maybe_element.value() == *element_.object();
}
+ void Install(PendingDependencies* deps) const override {}
- void Install(Handle<Code> code) const override {
- // This dependency has no effect after code finalization.
+ private:
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(holder_), index_, h(element_));
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const OwnConstantElementDependency* const zat =
+ that->AsOwnConstantElement();
+ return holder_.equals(zat->holder_) && index_ == zat->index_ &&
+ element_.equals(zat->element_);
}
- private:
const JSObjectRef holder_;
const uint32_t index_;
const ObjectRef element_;
@@ -720,22 +963,34 @@ class InitialMapInstanceSizePredictionDependency final
function_.object()->CompleteInobjectSlackTrackingIfActive();
}
- void Install(Handle<Code> code) const override {
+ void Install(PendingDependencies* deps) const override {
SLOW_DCHECK(IsValid());
DCHECK(
!function_.object()->initial_map().IsInobjectSlackTrackingInProgress());
}
private:
- JSFunctionRef function_;
- int instance_size_;
+ size_t Hash() const override {
+ ObjectRef::Hash h;
+ return base::hash_combine(h(function_), instance_size_);
+ }
+
+ bool Equals(const CompilationDependency* that) const override {
+ const InitialMapInstanceSizePredictionDependency* const zat =
+ that->AsInitialMapInstanceSizePrediction();
+ return function_.equals(zat->function_) &&
+ instance_size_ == zat->instance_size_;
+ }
+
+ const JSFunctionRef function_;
+ const int instance_size_;
};
} // namespace
void CompilationDependencies::RecordDependency(
CompilationDependency const* dependency) {
- if (dependency != nullptr) dependencies_.push_front(dependency);
+ if (dependency != nullptr) dependencies_.insert(dependency);
}
MapRef CompilationDependencies::DependOnInitialMap(
@@ -885,32 +1140,27 @@ V8_INLINE void TraceInvalidCompilationDependency(
}
bool CompilationDependencies::Commit(Handle<Code> code) {
- for (auto dep : dependencies_) {
- if (!dep->IsValid()) {
- if (FLAG_trace_compilation_dependencies) {
- TraceInvalidCompilationDependency(dep);
- }
- dependencies_.clear();
- return false;
- }
- dep->PrepareInstall();
- }
-
- DisallowCodeDependencyChange no_dependency_change;
- for (auto dep : dependencies_) {
- // Check each dependency's validity again right before installing it,
- // because the first iteration above might have invalidated some
- // dependencies. For example, PrototypePropertyDependency::PrepareInstall
- // can call EnsureHasInitialMap, which can invalidate a StableMapDependency
- // on the prototype object's map.
- if (!dep->IsValid()) {
- if (FLAG_trace_compilation_dependencies) {
- TraceInvalidCompilationDependency(dep);
+ if (!PrepareInstall()) return false;
+
+ {
+ PendingDependencies pending_deps(zone_);
+ DisallowCodeDependencyChange no_dependency_change;
+ for (const CompilationDependency* dep : dependencies_) {
+ // Check each dependency's validity again right before installing it,
+ // because the first iteration above might have invalidated some
+ // dependencies. For example, PrototypePropertyDependency::PrepareInstall
+ // can call EnsureHasInitialMap, which can invalidate a
+ // StableMapDependency on the prototype object's map.
+ if (!dep->IsValid()) {
+ if (FLAG_trace_compilation_dependencies) {
+ TraceInvalidCompilationDependency(dep);
+ }
+ dependencies_.clear();
+ return false;
}
- dependencies_.clear();
- return false;
+ dep->Install(&pending_deps);
}
- dep->Install(code);
+ pending_deps.InstallAll(broker_->isolate(), code);
}
// It is even possible that a GC during the above installations invalidated
@@ -941,6 +1191,44 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
return true;
}
+bool CompilationDependencies::PrepareInstall() {
+ if (V8_UNLIKELY(FLAG_predictable)) {
+ return PrepareInstallPredictable();
+ }
+
+ for (auto dep : dependencies_) {
+ if (!dep->IsValid()) {
+ if (FLAG_trace_compilation_dependencies) {
+ TraceInvalidCompilationDependency(dep);
+ }
+ dependencies_.clear();
+ return false;
+ }
+ dep->PrepareInstall();
+ }
+ return true;
+}
+
+bool CompilationDependencies::PrepareInstallPredictable() {
+ CHECK(FLAG_predictable);
+
+ std::vector<const CompilationDependency*> deps(dependencies_.begin(),
+ dependencies_.end());
+ std::sort(deps.begin(), deps.end());
+
+ for (auto dep : deps) {
+ if (!dep->IsValid()) {
+ if (FLAG_trace_compilation_dependencies) {
+ TraceInvalidCompilationDependency(dep);
+ }
+ dependencies_.clear();
+ return false;
+ }
+ dep->PrepareInstall();
+ }
+ return true;
+}
+
namespace {
// This function expects to never see a JSProxy.
@@ -960,7 +1248,6 @@ void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
} // namespace
-#ifdef DEBUG
#define V(Name) \
const Name##Dependency* CompilationDependency::As##Name() const { \
DCHECK(Is##Name()); \
@@ -968,7 +1255,6 @@ void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
}
DEPENDENCY_LIST(V)
#undef V
-#endif // DEBUG
void CompilationDependencies::DependOnStablePrototypeChains(
ZoneVector<MapRef> const& receiver_maps, WhereToStart start,
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index f4b49878c8..aa8ff7b82a 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -159,10 +159,25 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
const CompilationDependency* dep, const Handle<Map>& receiver_map);
#endif // DEBUG
+ struct CompilationDependencyHash {
+ size_t operator()(const CompilationDependency* dep) const;
+ };
+ struct CompilationDependencyEqual {
+ bool operator()(const CompilationDependency* lhs,
+ const CompilationDependency* rhs) const;
+ };
+
private:
+ bool PrepareInstall();
+ bool PrepareInstallPredictable();
+
+ using CompilationDependencySet =
+ ZoneUnorderedSet<const CompilationDependency*, CompilationDependencyHash,
+ CompilationDependencyEqual>;
+
Zone* const zone_;
JSHeapBroker* const broker_;
- ZoneForwardList<CompilationDependency const*> dependencies_;
+ CompilationDependencySet dependencies_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc
index ece79a7156..c8bc3a064b 100644
--- a/deps/v8/src/compiler/csa-load-elimination.cc
+++ b/deps/v8/src/compiler/csa-load-elimination.cc
@@ -348,6 +348,9 @@ Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node,
Node* replacement =
TruncateAndExtend(lookup_result.value, from, access.machine_type);
ReplaceWithValue(node, replacement, effect);
+ // This might have opened an opportunity for escape analysis to eliminate
+ // the object altogether.
+ Revisit(object);
return Replace(replacement);
}
}
diff --git a/deps/v8/src/compiler/diamond.h b/deps/v8/src/compiler/diamond.h
index cac1b1726b..abbaf48ffe 100644
--- a/deps/v8/src/compiler/diamond.h
+++ b/deps/v8/src/compiler/diamond.h
@@ -39,8 +39,8 @@ struct Diamond {
void Chain(Node* that) { branch->ReplaceInput(1, that); }
// Nest {this} into either the if_true or if_false branch of {that}.
- void Nest(Diamond const& that, bool if_true) {
- if (if_true) {
+ void Nest(Diamond const& that, bool cond) {
+ if (cond) {
branch->ReplaceInput(1, that.if_true);
that.merge->ReplaceInput(0, merge);
} else {
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index b5cc2b0446..51e23d89d3 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -4985,6 +4985,10 @@ MachineType MachineTypeFor(CTypeInfo::Type type) {
return MachineType::Uint32();
case CTypeInfo::Type::kInt64:
return MachineType::Int64();
+ case CTypeInfo::Type::kAny:
+ static_assert(sizeof(AnyCType) == 8,
+ "CTypeInfo::Type::kAny is assumed to be of size 64 bits.");
+ return MachineType::Int64();
case CTypeInfo::Type::kUint64:
return MachineType::Uint64();
case CTypeInfo::Type::kFloat32:
@@ -5329,7 +5333,7 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
stack_slot,
static_cast<int>(offsetof(v8::FastApiCallbackOptions, fallback)),
- __ ZeroConstant());
+ __ Int32Constant(0));
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
stack_slot,
@@ -5466,6 +5470,11 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
case CTypeInfo::Type::kV8Value:
case CTypeInfo::Type::kApiObject:
UNREACHABLE();
+ case CTypeInfo::Type::kAny:
+ fast_call_result =
+ ChangeFloat64ToTagged(__ ChangeInt64ToFloat64(c_call_result),
+ CheckForMinusZeroMode::kCheckForMinusZero);
+ break;
}
auto merge = __ MakeLabel(MachineRepresentation::kTagged);
diff --git a/deps/v8/src/compiler/fast-api-calls.cc b/deps/v8/src/compiler/fast-api-calls.cc
index 564da611d5..9317d1ad1f 100644
--- a/deps/v8/src/compiler/fast-api-calls.cc
+++ b/deps/v8/src/compiler/fast-api-calls.cc
@@ -27,6 +27,7 @@ ElementsKind GetTypedArrayElementsKind(CTypeInfo::Type type) {
case CTypeInfo::Type::kBool:
case CTypeInfo::Type::kV8Value:
case CTypeInfo::Type::kApiObject:
+ case CTypeInfo::Type::kAny:
UNREACHABLE();
}
}
diff --git a/deps/v8/src/compiler/functional-list.h b/deps/v8/src/compiler/functional-list.h
index b9968524e3..8e2824bef0 100644
--- a/deps/v8/src/compiler/functional-list.h
+++ b/deps/v8/src/compiler/functional-list.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_FUNCTIONAL_LIST_H_
#define V8_COMPILER_FUNCTIONAL_LIST_H_
-#include <iterator>
+#include "src/base/iterator.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -100,7 +100,7 @@ class FunctionalList {
void Clear() { elements_ = nullptr; }
- class iterator : public std::iterator<std::forward_iterator_tag, A> {
+ class iterator : public base::iterator<std::forward_iterator_tag, A> {
public:
explicit iterator(Cons* cur) : current_(cur) {}
@@ -114,14 +114,6 @@ class FunctionalList {
}
bool operator!=(const iterator& other) const { return !(*this == other); }
- // Implemented so that std::find and friends can use std::iterator_traits
- // for this iterator type.
- typedef std::forward_iterator_tag iterator_category;
- typedef ptrdiff_t difference_type;
- typedef A value_type;
- typedef A* pointer;
- typedef A& reference;
-
private:
Cons* current_;
};
diff --git a/deps/v8/src/compiler/globals.h b/deps/v8/src/compiler/globals.h
index 23f834cd6c..cdc6c19c47 100644
--- a/deps/v8/src/compiler/globals.h
+++ b/deps/v8/src/compiler/globals.h
@@ -92,8 +92,8 @@ const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
// to add support for IA32, because it has a totally different approach
// (using FP stack). As support is added to more platforms, please make sure
// to list them here in order to enable tests of this functionality.
-#if defined(V8_TARGET_ARCH_X64) || \
- (defined(V8_TARGET_ARCH_ARM64) && !defined(USE_SIMULATOR))
+// Make sure to sync the following with src/d8/d8-test.cc.
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64)
#define V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
#endif
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index ad65b9d641..93a6ee4a6b 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -578,15 +578,6 @@ TNode<Map> GraphAssembler::LoadMap(Node* object) {
#endif
}
-void GraphAssembler::StoreMap(Node* object, TNode<Map> map) {
-#ifdef V8_MAP_PACKING
- map = PackMapWord(map);
-#endif
- StoreRepresentation rep(MachineType::TaggedRepresentation(),
- kMapWriteBarrier);
- Store(rep, object, HeapObject::kMapOffset - kHeapObjectTag, map);
-}
-
Node* JSGraphAssembler::StoreElement(ElementAccess const& access, Node* object,
Node* index, Node* value) {
return AddNode(graph()->NewNode(simplified()->StoreElement(access), object,
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index fbd0cf4af2..c7da66acfc 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -278,7 +278,6 @@ class V8_EXPORT_PRIVATE GraphAssembler {
TNode<Map> UnpackMapWord(Node* map_word);
#endif
TNode<Map> LoadMap(Node* object);
- void StoreMap(Node* object, TNode<Map> map);
Node* DebugBreak();
diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc
index 2386266c79..d06bf7ef47 100644
--- a/deps/v8/src/compiler/heap-refs.cc
+++ b/deps/v8/src/compiler/heap-refs.cc
@@ -365,20 +365,22 @@ base::Optional<ObjectRef> GetOwnFastDataPropertyFromHeap(
base::Optional<Object> constant;
{
DisallowGarbageCollection no_gc;
+ PtrComprCageBase cage_base = broker->cage_base();
// This check to ensure the live map is the same as the cached map to
// to protect us against reads outside the bounds of the heap. This could
// happen if the Ref was created in a prior GC epoch, and the object
// shrunk in size. It might end up at the edge of a heap boundary. If
// we see that the map is the same in this GC epoch, we are safe.
- Map map = holder.object()->map(kAcquireLoad);
+ Map map = holder.object()->map(cage_base, kAcquireLoad);
if (*holder.map().object() != map) {
TRACE_BROKER_MISSING(broker, "Map changed for " << holder);
return {};
}
if (field_index.is_inobject()) {
- constant = holder.object()->RawInobjectPropertyAt(map, field_index);
+ constant =
+ holder.object()->RawInobjectPropertyAt(cage_base, map, field_index);
if (!constant.has_value()) {
TRACE_BROKER_MISSING(
broker, "Constant field in " << holder << " is unsafe to read");
@@ -386,12 +388,12 @@ base::Optional<ObjectRef> GetOwnFastDataPropertyFromHeap(
}
} else {
Object raw_properties_or_hash =
- holder.object()->raw_properties_or_hash(kRelaxedLoad);
+ holder.object()->raw_properties_or_hash(cage_base, kRelaxedLoad);
// Ensure that the object is safe to inspect.
if (broker->ObjectMayBeUninitialized(raw_properties_or_hash)) {
return {};
}
- if (!raw_properties_or_hash.IsPropertyArray()) {
+ if (!raw_properties_or_hash.IsPropertyArray(cage_base)) {
TRACE_BROKER_MISSING(
broker,
"Expected PropertyArray for backing store in " << holder << ".");
@@ -931,8 +933,8 @@ bool JSFunctionRef::IsConsistentWithHeapState() const {
HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<HeapObject> object, ObjectDataKind kind)
: ObjectData(broker, storage, object, kind),
- map_(broker->GetOrCreateData(object->map(kAcquireLoad),
- kAssumeMemoryFence)) {
+ map_(broker->GetOrCreateData(
+ object->map(broker->cage_base(), kAcquireLoad), kAssumeMemoryFence)) {
CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized,
kind == kBackgroundSerializedHeapObject);
}
@@ -1273,7 +1275,7 @@ bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != PropertyLocation::kField) continue;
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
// Make sure {field_index} agrees with {inobject_properties} on the index of
@@ -1469,7 +1471,7 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object,
#define CREATE_DATA(Name) \
if (object->Is##Name()) { \
- RefsMap::Entry* entry = refs_->LookupOrInsert(object.address()); \
+ entry = refs_->LookupOrInsert(object.address()); \
object_data = zone()->New<ref_traits<Name>::data_type>( \
this, &entry->value, Handle<Name>::cast(object), \
ObjectDataKindFor(ref_traits<Name>::ref_serialization_kind)); \
@@ -1678,7 +1680,8 @@ base::Optional<ObjectRef> JSObjectRef::RawInobjectPropertyAt(
Handle<Object> value;
{
DisallowGarbageCollection no_gc;
- Map current_map = object()->map(kAcquireLoad);
+ PtrComprCageBase cage_base = broker()->cage_base();
+ Map current_map = object()->map(cage_base, kAcquireLoad);
// If the map changed in some prior GC epoch, our {index} could be
// outside the valid bounds of the cached map.
@@ -1688,7 +1691,7 @@ base::Optional<ObjectRef> JSObjectRef::RawInobjectPropertyAt(
}
base::Optional<Object> maybe_value =
- object()->RawInobjectPropertyAt(current_map, index);
+ object()->RawInobjectPropertyAt(cage_base, current_map, index);
if (!maybe_value.has_value()) {
TRACE_BROKER_MISSING(broker(),
"Unable to safely read property in " << *this);
@@ -1803,6 +1806,17 @@ bool StringRef::SupportedStringKind() const {
return IsInternalizedString() || object()->IsThinString();
}
+base::Optional<Handle<String>> StringRef::ObjectIfContentAccessible() {
+ if (data_->kind() == kNeverSerializedHeapObject && !SupportedStringKind()) {
+ TRACE_BROKER_MISSING(
+ broker(),
+ "content for kNeverSerialized unsupported string kind " << *this);
+ return base::nullopt;
+ } else {
+ return object();
+ }
+}
+
base::Optional<int> StringRef::length() const {
if (data_->kind() == kNeverSerializedHeapObject && !SupportedStringKind()) {
TRACE_BROKER_MISSING(
@@ -2516,7 +2530,9 @@ base::Optional<ObjectRef> SourceTextModuleRef::import_meta() const {
}
base::Optional<MapRef> HeapObjectRef::map_direct_read() const {
- return TryMakeRef(broker(), object()->map(kAcquireLoad), kAssumeMemoryFence);
+ PtrComprCageBase cage_base = broker()->cage_base();
+ return TryMakeRef(broker(), object()->map(cage_base, kAcquireLoad),
+ kAssumeMemoryFence);
}
namespace {
@@ -2551,7 +2567,7 @@ OddballType GetOddballType(Isolate* isolate, Map map) {
HeapObjectType HeapObjectRef::GetHeapObjectType() const {
if (data_->should_access_heap()) {
- Map map = Handle<HeapObject>::cast(object())->map();
+ Map map = Handle<HeapObject>::cast(object())->map(broker()->cage_base());
HeapObjectType::Flags flags(0);
if (map.is_undetectable()) flags |= HeapObjectType::kUndetectable;
if (map.is_callable()) flags |= HeapObjectType::kCallable;
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index f54ca63355..7945feb2bc 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -942,6 +942,7 @@ class StringRef : public NameRef {
// When concurrently accessing non-read-only non-supported strings, we return
// base::nullopt for these methods.
+ base::Optional<Handle<String>> ObjectIfContentAccessible();
base::Optional<int> length() const;
base::Optional<uint16_t> GetFirstChar();
base::Optional<double> ToNumber();
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 00930998dd..9a859e4072 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -31,7 +31,7 @@ Int64Lowering::Int64Lowering(
machine_(machine),
common_(common),
simplified_(simplified),
- state_(graph, 3),
+ state_(graph->NodeCount(), State::kUnvisited),
stack_(zone),
replacements_(nullptr),
signature_(signature),
@@ -48,19 +48,19 @@ void Int64Lowering::LowerGraph() {
return;
}
stack_.push_back({graph()->end(), 0});
- state_.Set(graph()->end(), State::kOnStack);
+ state_[graph()->end()->id()] = State::kOnStack;
while (!stack_.empty()) {
NodeState& top = stack_.back();
if (top.input_index == top.node->InputCount()) {
// All inputs of top have already been lowered, now lower top.
stack_.pop_back();
- state_.Set(top.node, State::kVisited);
+ state_[top.node->id()] = State::kVisited;
LowerNode(top.node);
} else {
// Push the next input onto the stack.
Node* input = top.node->InputAt(top.input_index++);
- if (state_.Get(input) == State::kUnvisited) {
+ if (state_[input->id()] == State::kUnvisited) {
if (input->opcode() == IrOpcode::kPhi) {
// To break cycles with phi nodes we push phis on a separate stack so
// that they are processed after all other nodes.
@@ -72,7 +72,7 @@ void Int64Lowering::LowerGraph() {
} else {
stack_.push_back({input, 0});
}
- state_.Set(input, State::kOnStack);
+ state_[input->id()] = State::kOnStack;
}
}
}
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 6a97760f5f..23636a82b6 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -93,7 +93,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
MachineOperatorBuilder* machine_;
CommonOperatorBuilder* common_;
SimplifiedOperatorBuilder* simplified_;
- NodeMarker<State> state_;
+ std::vector<State> state_;
ZoneDeque<NodeState> stack_;
Replacement* replacements_;
Signature<MachineRepresentation>* signature_;
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 5e26a68ada..0b709ad695 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -17,6 +17,7 @@
#include "src/compiler/access-info.h"
#include "src/compiler/allocation-builder-inl.h"
#include "src/compiler/allocation-builder.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/graph-assembler.h"
@@ -24,6 +25,7 @@
#include "src/compiler/linkage.h"
#include "src/compiler/map-inference.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/opcodes.h"
#include "src/compiler/property-access-builder.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/state-values-utils.h"
@@ -37,6 +39,11 @@
#include "src/objects/js-function.h"
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table.h"
+#include "src/objects/string-inl.h"
+
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/intl-objects.h"
+#endif
namespace v8 {
namespace internal {
@@ -956,7 +963,10 @@ class FastApiCallReducerAssembler : public JSCallReducerAssembler {
CallDescriptor::kNeedsFrameState);
ApiFunction api_function(call_handler_info.callback());
ExternalReference function_reference = ExternalReference::Create(
- &api_function, ExternalReference::DIRECT_API_CALL);
+ isolate(), &api_function, ExternalReference::DIRECT_API_CALL,
+ function_template_info_.c_functions().data(),
+ function_template_info_.c_signatures().data(),
+ static_cast<unsigned>(function_template_info_.c_functions().size()));
Node* continuation_frame_state =
CreateGenericLazyDeoptContinuationFrameState(
@@ -2855,11 +2865,11 @@ Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
// ES #sec-object.prototype.hasownproperty
Reduction JSCallReducer::ReduceObjectPrototypeHasOwnProperty(Node* node) {
- JSCallNode n(node);
- Node* receiver = n.receiver();
- Node* name = n.ArgumentOrUndefined(0, jsgraph());
- Effect effect = n.effect();
- Control control = n.control();
+ JSCallNode call_node(node);
+ Node* receiver = call_node.receiver();
+ Node* name = call_node.ArgumentOrUndefined(0, jsgraph());
+ Effect effect = call_node.effect();
+ Control control = call_node.control();
// We can optimize a call to Object.prototype.hasOwnProperty if it's being
// used inside a fast-mode for..in, so for code like this:
@@ -4364,8 +4374,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// the {target} must have the same native context as the call site.
// Same if the {target} is the result of a CheckClosure operation.
if (target->opcode() == IrOpcode::kJSCreateClosure) {
- CreateClosureParameters const& p = JSCreateClosureNode{target}.Parameters();
- return ReduceJSCall(node, p.shared_info(broker()));
+ CreateClosureParameters const& params =
+ JSCreateClosureNode{target}.Parameters();
+ return ReduceJSCall(node, params.shared_info(broker()));
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(target->op()));
base::Optional<SharedFunctionInfoRef> shared = cell.shared_function_info();
@@ -4767,6 +4778,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceStringFromCodePoint(node);
case Builtin::kStringPrototypeIterator:
return ReduceStringPrototypeIterator(node);
+ case Builtin::kStringPrototypeLocaleCompare:
+ return ReduceStringPrototypeLocaleCompare(node);
case Builtin::kStringIteratorPrototypeNext:
return ReduceStringIteratorPrototypeNext(node);
case Builtin::kStringPrototypeConcat:
@@ -5518,8 +5531,8 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
// Collect the value inputs to push.
std::vector<Node*> values(num_values);
- for (int i = 0; i < num_values; ++i) {
- values[i] = n.Argument(i);
+ for (int j = 0; j < num_values; ++j) {
+ values[j] = n.Argument(j);
}
for (auto& value : values) {
@@ -5572,10 +5585,10 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
receiver, new_length, effect, control);
// Append the {values} to the {elements}.
- for (int i = 0; i < num_values; ++i) {
- Node* value = values[i];
+ for (int j = 0; j < num_values; ++j) {
+ Node* value = values[j];
Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->Constant(i));
+ jsgraph()->Constant(j));
effect =
graph()->NewNode(simplified()->StoreElement(
AccessBuilder::ForFixedArrayElement(kind)),
@@ -5866,22 +5879,22 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
etrue1 = eloop;
- Node* control = graph()->NewNode(common()->IfTrue(), branch2);
- Node* effect = etrue1;
+ Node* control2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* effect2 = etrue1;
ElementAccess const access =
AccessBuilder::ForFixedArrayElement(kind);
- Node* value = effect =
+ Node* value2 = effect2 =
graph()->NewNode(simplified()->LoadElement(access), elements,
- index, effect, control);
- effect = graph()->NewNode(
+ index, effect2, control2);
+ effect2 = graph()->NewNode(
simplified()->StoreElement(access), elements,
graph()->NewNode(simplified()->NumberSubtract(), index,
jsgraph()->OneConstant()),
- value, effect, control);
+ value2, effect2, control2);
- loop->ReplaceInput(1, control);
- eloop->ReplaceInput(1, effect);
+ loop->ReplaceInput(1, control2);
+ eloop->ReplaceInput(1, effect2);
index->ReplaceInput(1,
graph()->NewNode(simplified()->NumberAdd(), index,
jsgraph()->OneConstant()));
@@ -6647,6 +6660,73 @@ Reduction JSCallReducer::ReduceStringPrototypeIterator(Node* node) {
return Replace(iterator);
}
+Reduction JSCallReducer::ReduceStringPrototypeLocaleCompare(Node* node) {
+#ifdef V8_INTL_SUPPORT
+ JSCallNode n(node);
+ // Signature: receiver.localeCompare(compareString, locales, options)
+ if (n.ArgumentCount() < 1 || n.ArgumentCount() > 3) {
+ return NoChange();
+ }
+
+ {
+ Handle<Object> locales;
+ {
+ HeapObjectMatcher m(n.ArgumentOrUndefined(1, jsgraph()));
+ if (!m.HasResolvedValue()) return NoChange();
+ if (m.Is(factory()->undefined_value())) {
+ locales = factory()->undefined_value();
+ } else {
+ ObjectRef ref = m.Ref(broker());
+ if (!ref.IsString()) return NoChange();
+ StringRef sref = ref.AsString();
+ if (base::Optional<Handle<String>> maybe_locales =
+ sref.ObjectIfContentAccessible()) {
+ locales = *maybe_locales;
+ } else {
+ return NoChange();
+ }
+ }
+ }
+
+ TNode<Object> options = n.ArgumentOrUndefined(2, jsgraph());
+ {
+ HeapObjectMatcher m(options);
+ if (!m.Is(factory()->undefined_value())) {
+ return NoChange();
+ }
+ }
+
+ if (Intl::CompareStringsOptionsFor(broker()->local_isolate_or_isolate(),
+ locales, factory()->undefined_value()) !=
+ Intl::CompareStringsOptions::kTryFastPath) {
+ return NoChange();
+ }
+ }
+
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtin::kStringFastLocaleCompare);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(),
+ CallDescriptor::kNeedsFrameState);
+ node->RemoveInput(n.FeedbackVectorIndex());
+ if (n.ArgumentCount() == 3) {
+ node->RemoveInput(n.ArgumentIndex(2));
+ } else if (n.ArgumentCount() == 1) {
+ node->InsertInput(graph()->zone(), n.LastArgumentIndex() + 1,
+ jsgraph()->UndefinedConstant());
+ } else {
+ DCHECK_EQ(2, n.ArgumentCount());
+ }
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ return Changed(node);
+#else
+ return NoChange();
+#endif
+}
+
Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) {
JSCallNode n(node);
Node* receiver = n.receiver();
@@ -7098,9 +7178,9 @@ Reduction JSCallReducer::ReduceTypedArrayPrototypeToStringTag(Node* node) {
NodeVector effects(graph()->zone());
NodeVector controls(graph()->zone());
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
- control =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ Node* smi_check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ control = graph()->NewNode(common()->Branch(BranchHint::kFalse), smi_check,
+ control);
values.push_back(jsgraph()->UndefinedConstant());
effects.push_back(effect);
@@ -7507,7 +7587,7 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
Node* iloop = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2), index, index, loop);
- Node* index = effect = graph()->NewNode(
+ index = effect = graph()->NewNode(
common()->TypeGuard(TypeCache::Get()->kFixedArrayLengthType), iloop,
eloop, control);
{
@@ -7560,8 +7640,8 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
{
// Abort loop with resulting value.
- Node* control = graph()->NewNode(common()->IfFalse(), branch1);
- Node* effect = etrue0;
+ control = graph()->NewNode(common()->IfFalse(), branch1);
+ effect = etrue0;
Node* value = effect =
graph()->NewNode(common()->TypeGuard(Type::NonInternal()),
entry_key, effect, control);
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 77e841f298..4905d57021 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -158,6 +158,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceStringFromCharCode(Node* node);
Reduction ReduceStringFromCodePoint(Node* node);
Reduction ReduceStringPrototypeIterator(Node* node);
+ Reduction ReduceStringPrototypeLocaleCompare(Node* node);
Reduction ReduceStringIteratorPrototypeNext(Node* node);
Reduction ReduceStringPrototypeConcat(Node* node);
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 50a523c606..7875ae6be9 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -1712,7 +1712,7 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
PropertyDetails const property_details =
boilerplate_map.GetPropertyDetails(i);
if (property_details.location() != PropertyLocation::kField) continue;
- DCHECK_EQ(kData, property_details.kind());
+ DCHECK_EQ(PropertyKind::kData, property_details.kind());
if ((*max_properties)-- == 0) return {};
NameRef property_name = boilerplate_map.GetPropertyKey(i);
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index cb5f5e88f4..4facc0f25f 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -30,10 +30,13 @@ namespace compiler {
#define TRACE(broker, x) TRACE_BROKER(broker, x)
+#ifdef V8_STATIC_CONSTEXPR_VARIABLES_NEED_DEFINITIONS
// These definitions are here in order to please the linker, which in debug mode
// sometimes requires static constants to be defined in .cc files.
+// This is, however, deprecated (and unnecessary) in C++17.
const uint32_t JSHeapBroker::kMinimalRefsBucketCount;
const uint32_t JSHeapBroker::kInitialRefsBucketCount;
+#endif
void JSHeapBroker::IncrementTracingIndentation() { ++trace_indentation_; }
@@ -43,6 +46,9 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
bool tracing_enabled, bool is_concurrent_inlining,
CodeKind code_kind)
: isolate_(isolate),
+#if V8_COMPRESS_POINTERS
+ cage_base_(isolate),
+#endif // V8_COMPRESS_POINTERS
zone_(broker_zone),
refs_(zone()->New<RefsMap>(kMinimalRefsBucketCount, AddressMatcher(),
zone())),
@@ -582,21 +588,22 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
// If no maps were found for a non-megamorphic access, then our maps died
// and we should soft-deopt.
- if (maps.empty() && nexus.ic_state() != MEGAMORPHIC) {
+ if (maps.empty() && nexus.ic_state() != InlineCacheState::MEGAMORPHIC) {
return NewInsufficientFeedback(kind);
}
if (name.has_value()) {
// We rely on this invariant in JSGenericLowering.
- DCHECK_IMPLIES(maps.empty(), nexus.ic_state() == MEGAMORPHIC);
+ DCHECK_IMPLIES(maps.empty(),
+ nexus.ic_state() == InlineCacheState::MEGAMORPHIC);
return *zone()->New<NamedAccessFeedback>(*name, maps, kind);
- } else if (nexus.GetKeyType() == ELEMENT && !maps.empty()) {
+ } else if (nexus.GetKeyType() == IcCheckType::kElement && !maps.empty()) {
return ProcessFeedbackMapsForElementAccess(
maps, KeyedAccessMode::FromNexus(nexus), kind);
} else {
// No actionable feedback.
DCHECK(maps.empty());
- DCHECK_EQ(nexus.ic_state(), MEGAMORPHIC);
+ DCHECK_EQ(nexus.ic_state(), InlineCacheState::MEGAMORPHIC);
// TODO(neis): Using ElementAccessFeedback here is kind of an abuse.
return *zone()->New<ElementAccessFeedback>(
zone(), KeyedAccessMode::FromNexus(nexus), kind);
@@ -611,7 +618,8 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
nexus.kind() == FeedbackSlotKind::kStoreGlobalSloppy ||
nexus.kind() == FeedbackSlotKind::kStoreGlobalStrict);
if (nexus.IsUninitialized()) return NewInsufficientFeedback(nexus.kind());
- if (nexus.ic_state() != MONOMORPHIC || nexus.GetFeedback()->IsCleared()) {
+ if (nexus.ic_state() != InlineCacheState::MONOMORPHIC ||
+ nexus.GetFeedback()->IsCleared()) {
return *zone()->New<GlobalAccessFeedback>(nexus.kind());
}
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index bf9b9aaac0..32eac69a5f 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -114,6 +114,17 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void InitializeAndStartSerializing();
Isolate* isolate() const { return isolate_; }
+
+ // The pointer compression cage base value used for decompression of all
+ // tagged values except references to Code objects.
+ PtrComprCageBase cage_base() const {
+#if V8_COMPRESS_POINTERS
+ return cage_base_;
+#else
+ return PtrComprCageBase{};
+#endif // V8_COMPRESS_POINTERS
+ }
+
Zone* zone() const { return zone_; }
bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
@@ -413,6 +424,9 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
std::unique_ptr<CanonicalHandlesMap> canonical_handles);
Isolate* const isolate_;
+#if V8_COMPRESS_POINTERS
+ const PtrComprCageBase cage_base_;
+#endif // V8_COMPRESS_POINTERS
Zone* const zone_;
base::Optional<NativeContextRef> target_native_context_;
RefsMap* refs_;
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index c6a223b600..aa5fe632b3 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -110,13 +110,13 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
return out;
}
for (int n = 0; n < value_input_count; ++n) {
- HeapObjectMatcher m(callee->InputAt(n));
- if (!m.HasResolvedValue() || !m.Ref(broker()).IsJSFunction()) {
+ HeapObjectMatcher m2(callee->InputAt(n));
+ if (!m2.HasResolvedValue() || !m2.Ref(broker()).IsJSFunction()) {
out.num_functions = 0;
return out;
}
- out.functions[n] = m.Ref(broker()).AsJSFunction();
+ out.functions[n] = m2.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[n].value();
if (CanConsiderForInlining(broker(), function)) {
out.bytecode[n] = function.shared().GetBytecodeArray();
@@ -602,7 +602,7 @@ bool JSInliningHeuristic::TryReuseDispatch(Node* node, Node* callee,
// frame state, and change all the uses of the callee to the constant
// callee.
Node* target = callee->InputAt(i);
- Node* effect = effect_phi->InputAt(i);
+ Node* effect_phi_effect = effect_phi->InputAt(i);
Node* control = merge->InputAt(i);
if (checkpoint) {
@@ -610,8 +610,8 @@ bool JSInliningHeuristic::TryReuseDispatch(Node* node, Node* callee,
FrameState new_checkpoint_state = DuplicateFrameStateAndRename(
FrameState{checkpoint_state}, callee, target,
(i == num_calls - 1) ? kChangeInPlace : kCloneState);
- effect = graph()->NewNode(checkpoint->op(), new_checkpoint_state, effect,
- control);
+ effect_phi_effect = graph()->NewNode(
+ checkpoint->op(), new_checkpoint_state, effect_phi_effect, control);
}
// Duplicate the call.
@@ -620,7 +620,7 @@ bool JSInliningHeuristic::TryReuseDispatch(Node* node, Node* callee,
(i == num_calls - 1) ? kChangeInPlace : kCloneState);
inputs[0] = target;
inputs[input_count - 3] = new_lazy_frame_state;
- inputs[input_count - 2] = effect;
+ inputs[input_count - 2] = effect_phi_effect;
inputs[input_count - 1] = control;
calls[i] = if_successes[i] =
graph()->NewNode(node->op(), input_count, inputs);
@@ -765,13 +765,13 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
if (candidate.can_inline_function[i] &&
(small_function || total_inlined_bytecode_size_ <
max_inlined_bytecode_size_cumulative_)) {
- Node* node = calls[i];
- Reduction const reduction = inliner_.ReduceJSCall(node);
+ Node* call = calls[i];
+ Reduction const reduction = inliner_.ReduceJSCall(call);
if (reduction.Changed()) {
total_inlined_bytecode_size_ += candidate.bytecode[i]->length();
// Killing the call node is not strictly necessary, but it is safer to
// make sure we do not resurrect the node.
- node->Kill();
+ call->Kill();
}
}
}
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index b2e012d8c4..08e9f54ff4 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -636,17 +636,17 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// instantiation but before the invocation (i.e. inside {JSConstructStub}
// where execution continues at {construct_stub_create_deopt_pc_offset}).
Node* receiver = jsgraph()->TheHoleConstant(); // Implicit receiver.
- Node* context = NodeProperties::GetContextInput(node);
+ Node* caller_context = NodeProperties::GetContextInput(node);
if (NeedsImplicitReceiver(*shared_info)) {
Effect effect = n.effect();
Control control = n.control();
Node* frame_state_inside = CreateArtificialFrameState(
node, frame_state, n.ArgumentCount(),
BytecodeOffset::ConstructStubCreate(), FrameStateType::kConstructStub,
- *shared_info, context);
+ *shared_info, caller_context);
Node* create =
graph()->NewNode(javascript()->Create(), call.target(), new_target,
- context, frame_state_inside, effect, control);
+ caller_context, frame_state_inside, effect, control);
uncaught_subcalls.push_back(create); // Adds {IfSuccess} & {IfException}.
NodeProperties::ReplaceControlInput(node, create);
NodeProperties::ReplaceEffectInput(node, create);
@@ -675,11 +675,11 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
graph()->NewNode(common()->IfTrue(), branch_is_receiver);
Node* branch_is_receiver_false =
graph()->NewNode(common()->IfFalse(), branch_is_receiver);
- branch_is_receiver_false =
- graph()->NewNode(javascript()->CallRuntime(
- Runtime::kThrowConstructorReturnedNonObject),
- context, NodeProperties::GetFrameStateInput(node),
- node, branch_is_receiver_false);
+ branch_is_receiver_false = graph()->NewNode(
+ javascript()->CallRuntime(
+ Runtime::kThrowConstructorReturnedNonObject),
+ caller_context, NodeProperties::GetFrameStateInput(node), node,
+ branch_is_receiver_false);
uncaught_subcalls.push_back(branch_is_receiver_false);
branch_is_receiver_false =
graph()->NewNode(common()->Throw(), branch_is_receiver_false,
@@ -698,7 +698,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
frame_state = CreateArtificialFrameState(
node, frame_state, n.ArgumentCount(),
BytecodeOffset::ConstructStubInvoke(), FrameStateType::kConstructStub,
- *shared_info, context);
+ *shared_info, caller_context);
}
// Insert a JSConvertReceiver node for sloppy callees. Note that the context
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 0c9f057995..c260a7ff9f 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -597,9 +597,10 @@ Reduction JSNativeContextSpecialization::ReduceJSHasInPrototypeChain(
InferHasInPrototypeChainResult result =
InferHasInPrototypeChain(value, effect, m.Ref(broker()));
if (result != kMayBeInPrototypeChain) {
- Node* value = jsgraph()->BooleanConstant(result == kIsInPrototypeChain);
- ReplaceWithValue(node, value);
- return Replace(value);
+ Node* result_in_chain =
+ jsgraph()->BooleanConstant(result == kIsInPrototypeChain);
+ ReplaceWithValue(node, result_in_chain);
+ return Replace(result_in_chain);
}
}
@@ -789,7 +790,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
PropertyDetails property_details = property_cell.property_details();
PropertyCellType property_cell_type = property_details.cell_type();
- DCHECK_EQ(kData, property_details.kind());
+ DCHECK_EQ(PropertyKind::kData, property_details.kind());
Node* control = NodeProperties::GetControlInput(node);
if (effect == nullptr) {
@@ -1130,19 +1131,19 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// Either infer maps from the graph or use the feedback.
- ZoneVector<MapRef> lookup_start_object_maps(zone());
- if (!InferMaps(lookup_start_object, effect, &lookup_start_object_maps)) {
+ ZoneVector<MapRef> inferred_maps(zone());
+ if (!InferMaps(lookup_start_object, effect, &inferred_maps)) {
for (const MapRef& map : feedback.maps()) {
- lookup_start_object_maps.push_back(map);
+ inferred_maps.push_back(map);
}
}
- RemoveImpossibleMaps(lookup_start_object, &lookup_start_object_maps);
+ RemoveImpossibleMaps(lookup_start_object, &inferred_maps);
// Check if we have an access o.x or o.x=v where o is the target native
// contexts' global proxy, and turn that into a direct access to the
// corresponding global object instead.
- if (lookup_start_object_maps.size() == 1) {
- MapRef lookup_start_object_map = lookup_start_object_maps[0];
+ if (inferred_maps.size() == 1) {
+ MapRef lookup_start_object_map = inferred_maps[0];
if (lookup_start_object_map.equals(
native_context().global_proxy_object().map())) {
if (!native_context().GlobalIsDetached()) {
@@ -1161,7 +1162,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
ZoneVector<PropertyAccessInfo> access_infos(zone());
{
ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone());
- for (const MapRef& map : lookup_start_object_maps) {
+ for (const MapRef& map : inferred_maps) {
if (map.is_deprecated()) continue;
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
map, feedback.name(), access_mode, dependencies());
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 0b53c70e93..c7a614569e 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -325,9 +325,8 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
if (!node) {
if (jsgraph()->machine()->Is64()) {
if (GetBinaryOperationHint(slot) == BinaryOperationHint::kBigInt) {
- const Operator* op =
- jsgraph()->simplified()->SpeculativeBigIntNegate(
- BigIntOperationHint::kBigInt);
+ op = jsgraph()->simplified()->SpeculativeBigIntNegate(
+ BigIntOperationHint::kBigInt);
node = jsgraph()->graph()->NewNode(op, operand, effect, control);
}
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 9cd1de1fc9..bf3720cc77 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -1197,7 +1197,7 @@ Reduction JSTypedLowering::ReduceJSHasInPrototypeChain(Node* node) {
// If {value} cannot be a receiver, then it cannot have {prototype} in
// it's prototype chain (all Primitive values have a null prototype).
if (value_type.Is(Type::Primitive())) {
- Node* value = jsgraph()->FalseConstant();
+ value = jsgraph()->FalseConstant();
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -1789,7 +1789,6 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// Patch {node} to a direct code object call.
Callable callable =
Builtins::CallableFor(isolate(), shared->builtin_id());
- CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
const CallInterfaceDescriptor& descriptor = callable.descriptor();
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -2232,11 +2231,11 @@ Reduction JSTypedLowering::ReduceObjectIsArray(Node* node) {
// Constant-fold based on {value} type.
if (value_type.Is(Type::Array())) {
- Node* value = jsgraph()->TrueConstant();
+ value = jsgraph()->TrueConstant();
ReplaceWithValue(node, value);
return Replace(value);
} else if (!value_type.Maybe(Type::ArrayOrProxy())) {
- Node* value = jsgraph()->FalseConstant();
+ value = jsgraph()->FalseConstant();
ReplaceWithValue(node, value);
return Replace(value);
}
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 2197fe6a65..bd62f24600 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -208,6 +208,25 @@ int CallDescriptor::CalculateFixedFrameSize(CodeKind code_kind) const {
UNREACHABLE();
}
+EncodedCSignature CallDescriptor::ToEncodedCSignature() const {
+ int parameter_count = static_cast<int>(ParameterCount());
+ EncodedCSignature sig(parameter_count);
+ CHECK_LT(parameter_count, EncodedCSignature::kInvalidParamCount);
+
+ for (int i = 0; i < parameter_count; ++i) {
+ if (IsFloatingPoint(GetParameterType(i).representation())) {
+ sig.SetFloat(i);
+ }
+ }
+ if (ReturnCount() > 0) {
+ DCHECK_EQ(1, ReturnCount());
+ if (IsFloatingPoint(GetReturnType(0).representation())) {
+ sig.SetFloat(EncodedCSignature::kReturnIndex);
+ }
+ }
+ return sig;
+}
+
void CallDescriptor::ComputeParamCounts() const {
gp_param_count_ = 0;
fp_param_count_ = 0;
@@ -257,6 +276,7 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kPushBlockContext:
case Runtime::kPushCatchContext:
case Runtime::kReThrow:
+ case Runtime::kReThrowWithMessage:
case Runtime::kStringEqual:
case Runtime::kStringLessThan:
case Runtime::kStringLessThanOrEqual:
@@ -517,6 +537,9 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
CallDescriptor::kCanUseRoots | flags, // flags
descriptor.DebugName(), // debug name
descriptor.GetStackArgumentOrder(), // stack order
+#if V8_ENABLE_WEBASSEMBLY
+ nullptr, // wasm function signature
+#endif
allocatable_registers);
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index d157b44e03..5a58a23134 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -15,6 +15,7 @@
#include "src/common/globals.h"
#include "src/compiler/frame.h"
#include "src/compiler/operator.h"
+#include "src/execution/encoded-c-signature.h"
#include "src/runtime/runtime.h"
#include "src/zone/zone.h"
@@ -255,6 +256,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
RegList callee_saved_fp_registers, Flags flags,
const char* debug_name = "",
StackArgumentOrder stack_order = StackArgumentOrder::kDefault,
+#if V8_ENABLE_WEBASSEMBLY
+ const wasm::FunctionSig* wasm_sig = nullptr,
+#endif
const RegList allocatable_registers = 0,
size_t return_slot_count = 0)
: kind_(kind),
@@ -269,7 +273,11 @@ class V8_EXPORT_PRIVATE CallDescriptor final
allocatable_registers_(allocatable_registers),
flags_(flags),
stack_order_(stack_order),
- debug_name_(debug_name) {}
+#if V8_ENABLE_WEBASSEMBLY
+ wasm_sig_(wasm_sig),
+#endif
+ debug_name_(debug_name) {
+ }
CallDescriptor(const CallDescriptor&) = delete;
CallDescriptor& operator=(const CallDescriptor&) = delete;
@@ -292,6 +300,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// Returns {true} if this descriptor is a call to a Wasm C API function.
bool IsWasmCapiFunction() const { return kind_ == kCallWasmCapiFunction; }
+
+ // Returns the wasm signature for this call based on the real parameter types.
+ const wasm::FunctionSig* wasm_sig() const { return wasm_sig_; }
#endif // V8_ENABLE_WEBASSEMBLY
bool RequiresFrameAsIncoming() const {
@@ -434,6 +445,8 @@ class V8_EXPORT_PRIVATE CallDescriptor final
return allocatable_registers_ != 0;
}
+ EncodedCSignature ToEncodedCSignature() const;
+
private:
void ComputeParamCounts() const;
@@ -453,6 +466,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
const RegList allocatable_registers_;
const Flags flags_;
const StackArgumentOrder stack_order_;
+#if V8_ENABLE_WEBASSEMBLY
+ const wasm::FunctionSig* wasm_sig_;
+#endif
const char* const debug_name_;
mutable base::Optional<size_t> gp_param_count_;
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 202f28de7d..357b866ca7 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -168,14 +168,14 @@ LoadElimination::AbstractElements::Kill(Node* object, Node* index,
if (element.object == nullptr) continue;
if (MayAlias(object, element.object)) {
AbstractElements* that = zone->New<AbstractElements>(zone);
- for (Element const element : this->elements_) {
- if (element.object == nullptr) continue;
- DCHECK_NOT_NULL(element.index);
- DCHECK_NOT_NULL(element.value);
- if (!MayAlias(object, element.object) ||
+ for (Element const element2 : this->elements_) {
+ if (element2.object == nullptr) continue;
+ DCHECK_NOT_NULL(element2.index);
+ DCHECK_NOT_NULL(element2.value);
+ if (!MayAlias(object, element2.object) ||
!NodeProperties::GetType(index).Maybe(
- NodeProperties::GetType(element.index))) {
- that->elements_[that->next_index_++] = element;
+ NodeProperties::GetType(element2.index))) {
+ that->elements_[that->next_index_++] = element2;
}
}
that->next_index_ %= arraysize(elements_);
@@ -285,18 +285,18 @@ class LoadElimination::AliasStateInfo {
LoadElimination::AbstractField const* LoadElimination::AbstractField::KillConst(
Node* object, Zone* zone) const {
- for (auto pair : this->info_for_node_) {
- if (pair.first->IsDead()) continue;
+ for (auto info1 : this->info_for_node_) {
+ if (info1.first->IsDead()) continue;
// If we previously recorded information about a const store on the given
// 'object', we might not have done it on the same node; e.g. we might now
// identify the object by a FinishRegion node, whereas the initial const
// store was performed on the Allocate node. We therefore remove information
// on all nodes that must alias with 'object'.
- if (MustAlias(object, pair.first)) {
+ if (MustAlias(object, info1.first)) {
AbstractField* that = zone->New<AbstractField>(zone);
- for (auto pair : this->info_for_node_) {
- if (!MustAlias(object, pair.first)) {
- that->info_for_node_.insert(pair);
+ for (auto info2 : this->info_for_node_) {
+ if (!MustAlias(object, info2.first)) {
+ that->info_for_node_.insert(info2);
}
}
return that;
@@ -308,14 +308,14 @@ LoadElimination::AbstractField const* LoadElimination::AbstractField::KillConst(
LoadElimination::AbstractField const* LoadElimination::AbstractField::Kill(
const AliasStateInfo& alias_info, MaybeHandle<Name> name,
Zone* zone) const {
- for (auto pair : this->info_for_node_) {
- if (pair.first->IsDead()) continue;
- if (alias_info.MayAlias(pair.first)) {
+ for (auto info1 : this->info_for_node_) {
+ if (info1.first->IsDead()) continue;
+ if (alias_info.MayAlias(info1.first)) {
AbstractField* that = zone->New<AbstractField>(zone);
- for (auto pair : this->info_for_node_) {
- if (!alias_info.MayAlias(pair.first) ||
- !MayAlias(name, pair.second.name)) {
- that->info_for_node_.insert(pair);
+ for (auto info2 : this->info_for_node_) {
+ if (!alias_info.MayAlias(info2.first) ||
+ !MayAlias(name, info2.second.name)) {
+ that->info_for_node_.insert(info2);
}
}
return that;
@@ -353,11 +353,12 @@ bool LoadElimination::AbstractMaps::Lookup(
LoadElimination::AbstractMaps const* LoadElimination::AbstractMaps::Kill(
const AliasStateInfo& alias_info, Zone* zone) const {
- for (auto pair : this->info_for_node_) {
- if (alias_info.MayAlias(pair.first)) {
+ for (auto info1 : this->info_for_node_) {
+ if (alias_info.MayAlias(info1.first)) {
AbstractMaps* that = zone->New<AbstractMaps>(zone);
- for (auto pair : this->info_for_node_) {
- if (!alias_info.MayAlias(pair.first)) that->info_for_node_.insert(pair);
+ for (auto info2 : this->info_for_node_) {
+ if (!alias_info.MayAlias(info2.first))
+ that->info_for_node_.insert(info2);
}
return that;
}
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index 7b660856b7..a56e4c2a41 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -589,18 +589,33 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindSmallUnnestedLoopFromHeader(
case IrOpcode::kTailCall:
case IrOpcode::kJSWasmCall:
case IrOpcode::kJSCall:
- // Call nodes are considered to have unbounded size, i.e. >max_size.
- // An exception is the call to the stack guard builtin at the beginning
- // of many loops.
+ // Call nodes are considered to have unbounded size, i.e. >max_size,
+ // with the exception of certain wasm builtins.
return nullptr;
case IrOpcode::kCall: {
Node* callee = node->InputAt(0);
- if (callee->opcode() == IrOpcode::kRelocatableInt32Constant ||
- callee->opcode() == IrOpcode::kRelocatableInt64Constant) {
- auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
- if (info.value() != v8::internal::wasm::WasmCode::kWasmStackGuard) {
- return nullptr;
- }
+ if (callee->opcode() != IrOpcode::kRelocatableInt32Constant &&
+ callee->opcode() != IrOpcode::kRelocatableInt64Constant) {
+ return nullptr;
+ }
+ intptr_t info =
+ OpParameter<RelocatablePtrConstantInfo>(callee->op()).value();
+ using WasmCode = v8::internal::wasm::WasmCode;
+ constexpr intptr_t unrollable_builtins[] = {
+ WasmCode::kWasmStackGuard,
+ WasmCode::kWasmTableGet,
+ WasmCode::kWasmTableSet,
+ WasmCode::kWasmTableGrow,
+ WasmCode::kWasmThrow,
+ WasmCode::kWasmRethrow,
+ WasmCode::kWasmRethrowExplicitContext,
+ WasmCode::kWasmRefFunc,
+ WasmCode::kWasmAllocateRtt,
+ WasmCode::kWasmAllocateFreshRtt};
+ if (std::count(unrollable_builtins,
+ unrollable_builtins + arraysize(unrollable_builtins),
+ info) == 0) {
+ return nullptr;
}
V8_FALLTHROUGH;
}
@@ -662,7 +677,6 @@ bool LoopFinder::HasMarkedExits(LoopTree* loop_tree,
}
if (unmarked_exit) {
if (FLAG_trace_turbo_loop) {
- Node* loop_node = loop_tree->GetLoopControl(loop);
PrintF(
"Cannot peel loop %i. Loop exit without explicit mark: Node %i "
"(%s) is inside loop, but its use %i (%s) is outside.\n",
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 9c61a0cc76..5d2ab6990c 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -635,34 +635,34 @@ class MachineRepresentationChecker {
switch (inferrer_->GetRepresentation(node)) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
- for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
- CheckValueInputIsTagged(node, i);
+ for (int j = 0; j < node->op()->ValueInputCount(); ++j) {
+ CheckValueInputIsTagged(node, j);
}
break;
case MachineRepresentation::kTaggedSigned:
- for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ for (int j = 0; j < node->op()->ValueInputCount(); ++j) {
if (COMPRESS_POINTERS_BOOL) {
- CheckValueInputIsCompressedOrTagged(node, i);
+ CheckValueInputIsCompressedOrTagged(node, j);
} else {
- CheckValueInputIsTagged(node, i);
+ CheckValueInputIsTagged(node, j);
}
}
break;
case MachineRepresentation::kCompressed:
case MachineRepresentation::kCompressedPointer:
- for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
- CheckValueInputIsCompressedOrTagged(node, i);
+ for (int j = 0; j < node->op()->ValueInputCount(); ++j) {
+ CheckValueInputIsCompressedOrTagged(node, j);
}
break;
case MachineRepresentation::kWord32:
- for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
- CheckValueInputForInt32Op(node, i);
+ for (int j = 0; j < node->op()->ValueInputCount(); ++j) {
+ CheckValueInputForInt32Op(node, j);
}
break;
default:
- for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ for (int j = 0; j < node->op()->ValueInputCount(); ++j) {
CheckValueInputRepresentationIs(
- node, i, inferrer_->GetRepresentation(node));
+ node, j, inferrer_->GetRepresentation(node));
}
break;
}
@@ -678,9 +678,9 @@ class MachineRepresentationChecker {
// CheckValueInputRepresentationIs(
// node, 0, MachineType::PointerRepresentation()); // Pop count
size_t return_count = inferrer_->call_descriptor()->ReturnCount();
- for (size_t i = 0; i < return_count; i++) {
- MachineType type = inferrer_->call_descriptor()->GetReturnType(i);
- int input_index = static_cast<int>(i + 1);
+ for (size_t j = 0; j < return_count; j++) {
+ MachineType type = inferrer_->call_descriptor()->GetReturnType(j);
+ int input_index = static_cast<int>(j + 1);
switch (type.representation()) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index db137dfeb4..942e7a17f1 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -1023,7 +1023,7 @@ Reduction MachineOperatorReducer::ReduceInt64Add(Node* node) {
return ReplaceInt64(base::AddWithWraparound(m.left().ResolvedValue(),
m.right().ResolvedValue()));
}
- // (x + Int64Constant(a)) + Int64Constant(b)) => x + Int64Constant(a + b)
+ // (x + Int64Constant(a)) + Int64Constant(b) => x + Int64Constant(a + b)
if (m.right().HasResolvedValue() && m.left().IsInt64Add()) {
Int64BinopMatcher n(m.left().node());
if (n.right().HasResolvedValue() && m.OwnsInput(m.left().node())) {
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index a8830ad7d6..56b298eb55 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -1046,7 +1046,7 @@ struct MachineOperatorGlobalCache {
: Operator1<StoreRepresentation>( \
IrOpcode::kProtectedStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
- "Store", 3, 1, 1, 0, 1, 0, \
+ "ProtectedStore", 3, 1, 1, 0, 1, 0, \
StoreRepresentation(MachineRepresentation::Type, \
kNoWriteBarrier)) {} \
}; \
diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc
index 96b9ccf974..1aa8d2b55c 100644
--- a/deps/v8/src/compiler/memory-lowering.cc
+++ b/deps/v8/src/compiler/memory-lowering.cc
@@ -280,7 +280,7 @@ Reduction MemoryLowering::ReduceAllocateRaw(
// Setup a mutable reservation size node; will be patched as we fold
// additional allocations into this new group.
- Node* size = __ UniqueIntPtrConstant(object_size);
+ Node* reservation_size = __ UniqueIntPtrConstant(object_size);
// Load allocation top and limit.
Node* top =
@@ -290,7 +290,7 @@ Reduction MemoryLowering::ReduceAllocateRaw(
// Check if we need to collect garbage before we can start bump pointer
// allocation (always done for folded allocations).
- Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
+ Node* check = __ UintLessThan(__ IntAdd(top, reservation_size), limit);
__ GotoIfNot(check, &call_runtime);
__ Goto(&done, top);
@@ -298,8 +298,8 @@ Reduction MemoryLowering::ReduceAllocateRaw(
__ Bind(&call_runtime);
{
EnsureAllocateOperator();
- Node* vfalse = __ BitcastTaggedToWord(
- __ Call(allocate_operator_.get(), allocate_builtin, size));
+ Node* vfalse = __ BitcastTaggedToWord(__ Call(
+ allocate_operator_.get(), allocate_builtin, reservation_size));
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
__ Goto(&done, vfalse);
}
@@ -319,8 +319,8 @@ Reduction MemoryLowering::ReduceAllocateRaw(
control = gasm()->control();
// Start a new allocation group.
- AllocationGroup* group =
- zone()->New<AllocationGroup>(value, allocation_type, size, zone());
+ AllocationGroup* group = zone()->New<AllocationGroup>(
+ value, allocation_type, reservation_size, zone());
*state_ptr =
AllocationState::Open(group, object_size, top, effect, zone());
}
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 63a0687ab0..1cd60b023f 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -373,7 +373,7 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
ZoneRefUnorderedSet<MapRef>* maps_out) {
HeapObjectMatcher m(receiver);
if (m.HasResolvedValue()) {
- HeapObjectRef receiver = m.Ref(broker);
+ HeapObjectRef ref = m.Ref(broker);
// We don't use ICs for the Array.prototype and the Object.prototype
// because the runtime has to be able to intercept them properly, so
// we better make sure that TurboFan doesn't outsmart the system here
@@ -381,12 +381,12 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
//
// TODO(bmeurer): This can be removed once the Array.prototype and
// Object.prototype have NO_ELEMENTS elements kind.
- if (!receiver.IsJSObject() ||
- !broker->IsArrayOrObjectPrototype(receiver.AsJSObject())) {
- if (receiver.map().is_stable()) {
+ if (!ref.IsJSObject() ||
+ !broker->IsArrayOrObjectPrototype(ref.AsJSObject())) {
+ if (ref.map().is_stable()) {
// The {receiver_map} is only reliable when we install a stability
// code dependency.
- *maps_out = RefSetOf(broker, receiver.map());
+ *maps_out = RefSetOf(broker, ref.map());
return kUnreliableMaps;
}
}
@@ -442,9 +442,9 @@ NodeProperties::InferMapsResult NodeProperties::InferMapsUnsafe(
access.offset == HeapObject::kMapOffset) {
if (IsSame(receiver, object)) {
Node* const value = GetValueInput(effect, 1);
- HeapObjectMatcher m(value);
- if (m.HasResolvedValue()) {
- *maps_out = RefSetOf(broker, m.Ref(broker).AsMap());
+ HeapObjectMatcher m2(value);
+ if (m2.HasResolvedValue()) {
+ *maps_out = RefSetOf(broker, m2.Ref(broker).AsMap());
return result;
}
}
diff --git a/deps/v8/src/compiler/persistent-map.h b/deps/v8/src/compiler/persistent-map.h
index 1373ff5f25..4ccce395d1 100644
--- a/deps/v8/src/compiler/persistent-map.h
+++ b/deps/v8/src/compiler/persistent-map.h
@@ -490,13 +490,14 @@ PersistentMap<Key, Value, Hasher>::FindLeftmost(
std::array<const FocusedTree*, kHashBits>* path) {
const FocusedTree* current = start;
while (*level < current->length) {
- if (const FocusedTree* child = GetChild(current, *level, kLeft)) {
+ if (const FocusedTree* left_child = GetChild(current, *level, kLeft)) {
(*path)[*level] = GetChild(current, *level, kRight);
- current = child;
+ current = left_child;
++*level;
- } else if (const FocusedTree* child = GetChild(current, *level, kRight)) {
+ } else if (const FocusedTree* right_child =
+ GetChild(current, *level, kRight)) {
(*path)[*level] = GetChild(current, *level, kLeft);
- current = child;
+ current = right_child;
++*level;
} else {
UNREACHABLE();
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index a4ad7244f9..ef16b8f304 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -16,6 +16,7 @@
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/register-configuration.h"
+#include "src/common/high-allocation-throughput-scope.h"
#include "src/compiler/add-type-assertions-reducer.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/frame-elider.h"
@@ -1136,6 +1137,8 @@ class V8_NODISCARD PipelineJobScope {
~PipelineJobScope() { data_->set_runtime_call_stats(nullptr); }
private:
+ HighAllocationThroughputScope high_throughput_scope_{
+ V8::GetCurrentPlatform()};
PipelineData* data_;
};
} // namespace
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index dfb4a2bf2f..bbdbdfefd8 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -1821,6 +1821,7 @@ class RepresentationSelector {
// path.
case CTypeInfo::Type::kInt64:
case CTypeInfo::Type::kUint64:
+ case CTypeInfo::Type::kAny:
return UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, feedback);
case CTypeInfo::Type::kFloat32:
case CTypeInfo::Type::kFloat64:
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index de69655f11..6b87797311 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -1321,12 +1321,21 @@ Type Typer::Visitor::TypeJSGetTemplateObject(Node* node) {
return Type::Array();
}
-Type Typer::Visitor::TypeJSLoadProperty(Node* node) {
+Type Typer::Visitor::TypeJSLoadProperty(Node* node) { return Type::Any(); }
+
+Type Typer::Visitor::TypeJSLoadNamed(Node* node) {
+#ifdef DEBUG
+ // Loading of private methods is compiled to a named load of a BlockContext
+ // via a private brand, which is an internal object. However, native context
+ // specialization should always apply for those cases, so assert that the name
+ // is not a private brand here. Otherwise Type::NonInternal() is wrong.
+ JSLoadNamedNode n(node);
+ NamedAccess const& p = n.Parameters();
+ DCHECK(!p.name(typer_->broker()).object()->IsPrivateBrand());
+#endif
return Type::NonInternal();
}
-Type Typer::Visitor::TypeJSLoadNamed(Node* node) { return Type::NonInternal(); }
-
Type Typer::Visitor::TypeJSLoadNamedFromSuper(Node* node) {
return Type::NonInternal();
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 4aed4e7454..1d051774da 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -119,12 +119,12 @@ Type::bitset Type::BitsetLub() const {
if (IsUnion()) {
// Take the representation from the first element, which is always
// a bitset.
- int bitset = AsUnion()->Get(0).BitsetLub();
+ bitset lub = AsUnion()->Get(0).BitsetLub();
for (int i = 0, n = AsUnion()->Length(); i < n; ++i) {
// Other elements only contribute their semantic part.
- bitset |= AsUnion()->Get(i).BitsetLub();
+ lub |= AsUnion()->Get(i).BitsetLub();
}
- return bitset;
+ return lub;
}
if (IsHeapConstant()) return AsHeapConstant()->Lub();
if (IsOtherNumberConstant()) {
@@ -415,7 +415,7 @@ Type::bitset BitsetType::ExpandInternals(Type::bitset bits) {
Type::bitset BitsetType::Lub(double min, double max) {
DisallowGarbageCollection no_gc;
- int lub = kNone;
+ bitset lub = kNone;
const Boundary* mins = Boundaries();
for (size_t i = 1; i < BoundariesSize(); ++i) {
@@ -431,7 +431,7 @@ Type::bitset BitsetType::NumberBits(bitset bits) { return bits & kPlainNumber; }
Type::bitset BitsetType::Glb(double min, double max) {
DisallowGarbageCollection no_gc;
- int glb = kNone;
+ bitset glb = kNone;
const Boundary* mins = Boundaries();
// If the range does not touch 0, the bound is empty.
@@ -1146,7 +1146,10 @@ std::ostream& operator<<(std::ostream& os, Type type) {
Handle<TurbofanType> Type::AllocateOnHeap(Factory* factory) {
DCHECK(CanBeAsserted());
if (IsBitset()) {
- return factory->NewTurbofanBitsetType(AsBitset(), AllocationType::kYoung);
+ const bitset bits = AsBitset();
+ uint32_t low = bits & 0xffffffff;
+ uint32_t high = (bits >> 32) & 0xffffffff;
+ return factory->NewTurbofanBitsetType(low, high, AllocationType::kYoung);
} else if (IsUnion()) {
const UnionType* union_type = AsUnion();
Handle<TurbofanType> result = union_type->Get(0).AllocateOnHeap(factory);
@@ -1171,12 +1174,18 @@ Handle<TurbofanType> Type::AllocateOnHeap(Factory* factory) {
}
}
-#define VERIFY_TORQUE_BITSET_AGREEMENT(Name, _) \
+#define VERIFY_TORQUE_LOW_BITSET_AGREEMENT(Name, _) \
STATIC_ASSERT(static_cast<uint32_t>(BitsetType::k##Name) == \
- static_cast<uint32_t>(TurbofanTypeBits::k##Name));
-INTERNAL_BITSET_TYPE_LIST(VERIFY_TORQUE_BITSET_AGREEMENT)
-PROPER_ATOMIC_BITSET_TYPE_LIST(VERIFY_TORQUE_BITSET_AGREEMENT)
-#undef VERIFY_TORQUE_BITSET_AGREEMENT
+ static_cast<uint32_t>(TurbofanTypeLowBits::k##Name));
+#define VERIFY_TORQUE_HIGH_BITSET_AGREEMENT(Name, _) \
+ STATIC_ASSERT(static_cast<uint32_t>( \
+ static_cast<uint64_t>(BitsetType::k##Name) >> 32) == \
+ static_cast<uint32_t>(TurbofanTypeHighBits::k##Name));
+INTERNAL_BITSET_TYPE_LIST(VERIFY_TORQUE_LOW_BITSET_AGREEMENT)
+PROPER_ATOMIC_BITSET_TYPE_LOW_LIST(VERIFY_TORQUE_LOW_BITSET_AGREEMENT)
+PROPER_ATOMIC_BITSET_TYPE_HIGH_LIST(VERIFY_TORQUE_HIGH_BITSET_AGREEMENT)
+#undef VERIFY_TORQUE_HIGH_BITSET_AGREEMENT
+#undef VERIFY_TORQUE_LOW_BITSET_AGREEMENT
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index 9feab08f70..d4b129f242 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -96,45 +96,51 @@ namespace compiler {
// clang-format off
#define INTERNAL_BITSET_TYPE_LIST(V) \
- V(OtherUnsigned31, 1u << 1) \
- V(OtherUnsigned32, 1u << 2) \
- V(OtherSigned32, 1u << 3) \
- V(OtherNumber, 1u << 4) \
- V(OtherString, 1u << 5) \
-
-#define PROPER_ATOMIC_BITSET_TYPE_LIST(V) \
- V(Negative31, 1u << 6) \
- V(Null, 1u << 7) \
- V(Undefined, 1u << 8) \
- V(Boolean, 1u << 9) \
- V(Unsigned30, 1u << 10) \
- V(MinusZero, 1u << 11) \
- V(NaN, 1u << 12) \
- V(Symbol, 1u << 13) \
- V(InternalizedString, 1u << 14) \
- V(OtherCallable, 1u << 15) \
- V(OtherObject, 1u << 16) \
- V(OtherUndetectable, 1u << 17) \
- V(CallableProxy, 1u << 18) \
- V(OtherProxy, 1u << 19) \
- V(Function, 1u << 20) \
- V(BoundFunction, 1u << 21) \
- V(Hole, 1u << 22) \
- V(OtherInternal, 1u << 23) \
- V(ExternalPointer, 1u << 24) \
- V(Array, 1u << 25) \
- V(UnsignedBigInt63, 1u << 26) \
- V(OtherUnsignedBigInt64, 1u << 27) \
- V(NegativeBigInt63, 1u << 28) \
- V(OtherBigInt, 1u << 29) \
+ V(OtherUnsigned31, uint64_t{1} << 1) \
+ V(OtherUnsigned32, uint64_t{1} << 2) \
+ V(OtherSigned32, uint64_t{1} << 3) \
+ V(OtherNumber, uint64_t{1} << 4) \
+ V(OtherString, uint64_t{1} << 5) \
+
+#define PROPER_ATOMIC_BITSET_TYPE_LOW_LIST(V) \
+ V(Negative31, uint64_t{1} << 6) \
+ V(Null, uint64_t{1} << 7) \
+ V(Undefined, uint64_t{1} << 8) \
+ V(Boolean, uint64_t{1} << 9) \
+ V(Unsigned30, uint64_t{1} << 10) \
+ V(MinusZero, uint64_t{1} << 11) \
+ V(NaN, uint64_t{1} << 12) \
+ V(Symbol, uint64_t{1} << 13) \
+ V(InternalizedString, uint64_t{1} << 14) \
+ V(OtherCallable, uint64_t{1} << 15) \
+ V(OtherObject, uint64_t{1} << 16) \
+ V(OtherUndetectable, uint64_t{1} << 17) \
+ V(CallableProxy, uint64_t{1} << 18) \
+ V(OtherProxy, uint64_t{1} << 19) \
+ V(Function, uint64_t{1} << 20) \
+ V(BoundFunction, uint64_t{1} << 21) \
+ V(Hole, uint64_t{1} << 22) \
+ V(OtherInternal, uint64_t{1} << 23) \
+ V(ExternalPointer, uint64_t{1} << 24) \
+ V(Array, uint64_t{1} << 25) \
+ V(UnsignedBigInt63, uint64_t{1} << 26) \
+ V(OtherUnsignedBigInt64, uint64_t{1} << 27) \
+ V(NegativeBigInt63, uint64_t{1} << 28) \
+ V(OtherBigInt, uint64_t{1} << 29) \
/* TODO(v8:10391): Remove this type once all ExternalPointer usages are */ \
- /* sandbox-ready. */ \
- V(SandboxedExternalPointer, 1u << 30) \
- V(CagedPointer, 1u << 31) \
+ /* sandbox-ready. */ \
+ V(SandboxedExternalPointer, uint64_t{1} << 30) \
+ V(CagedPointer, uint64_t{1} << 31)
+
+// We split the macro list into two parts because the Torque equivalent in
+// turbofan-types.tq uses two 32bit bitfield structs.
+#define PROPER_ATOMIC_BITSET_TYPE_HIGH_LIST(V) \
+ V(WasmObject, uint64_t{1} << 32)
#define PROPER_BITSET_TYPE_LIST(V) \
- V(None, 0u) \
- PROPER_ATOMIC_BITSET_TYPE_LIST(V) \
+ V(None, uint64_t{0}) \
+ PROPER_ATOMIC_BITSET_TYPE_LOW_LIST(V) \
+ PROPER_ATOMIC_BITSET_TYPE_HIGH_LIST(V) \
V(Signed31, kUnsigned30 | kNegative31) \
V(Signed32, kSigned31 | kOtherUnsigned31 | \
kOtherSigned32) \
@@ -194,7 +200,7 @@ namespace compiler {
V(DetectableReceiver, kDetectableObject | kProxy) \
V(DetectableReceiverOrNull, kDetectableReceiver | kNull) \
V(Object, kDetectableObject | kOtherUndetectable) \
- V(Receiver, kObject | kProxy) \
+ V(Receiver, kObject | kProxy | kWasmObject) \
V(ReceiverOrUndefined, kReceiver | kUndefined) \
V(ReceiverOrNullOrUndefined, kReceiver | kNull | kUndefined) \
V(SymbolOrReceiver, kSymbol | kReceiver) \
@@ -207,7 +213,7 @@ namespace compiler {
V(NonInternal, kPrimitive | kReceiver) \
V(NonBigInt, kNonBigIntPrimitive | kReceiver) \
V(NonNumber, kBigInt | kUnique | kString | kInternal) \
- V(Any, 0xfffffffeu)
+ V(Any, uint64_t{0xfffffffffffffffe})
// clang-format on
@@ -243,9 +249,9 @@ class UnionType;
class V8_EXPORT_PRIVATE BitsetType {
public:
- using bitset = uint32_t; // Internal
+ using bitset = uint64_t; // Internal
- enum : uint32_t {
+ enum : bitset {
#define DECLARE_TYPE(type, value) k##type = (value),
BITSET_TYPE_LIST(DECLARE_TYPE)
#undef DECLARE_TYPE
@@ -376,7 +382,7 @@ class V8_EXPORT_PRIVATE Type {
PROPER_BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
#undef DEFINE_TYPE_CONSTRUCTOR
- Type() : payload_(0) {}
+ Type() : payload_(uint64_t{0}) {}
static Type SignedSmall() { return NewBitset(BitsetType::SignedSmall()); }
static Type UnsignedSmall() { return NewBitset(BitsetType::UnsignedSmall()); }
@@ -396,7 +402,7 @@ class V8_EXPORT_PRIVATE Type {
// Predicates.
bool IsNone() const { return payload_ == None().payload_; }
- bool IsInvalid() const { return payload_ == 0u; }
+ bool IsInvalid() const { return payload_ == uint64_t{0}; }
bool Is(Type that) const {
return payload_ == that.payload_ || this->SlowIs(that);
@@ -405,7 +411,7 @@ class V8_EXPORT_PRIVATE Type {
bool Equals(Type that) const { return this->Is(that) && that.Is(*this); }
// Inspection.
- bool IsBitset() const { return payload_ & 1; }
+ bool IsBitset() const { return payload_ & uint64_t{1}; }
bool IsRange() const { return IsKind(TypeBase::kRange); }
bool IsHeapConstant() const { return IsKind(TypeBase::kHeapConstant); }
bool IsOtherNumberConstant() const {
@@ -469,10 +475,10 @@ class V8_EXPORT_PRIVATE Type {
friend UnionType;
friend size_t hash_value(Type type);
- explicit Type(bitset bits) : payload_(bits | 1u) {}
+ explicit Type(bitset bits) : payload_(bits | uint64_t{1}) {}
Type(TypeBase* type_base) // NOLINT(runtime/explicit)
- : payload_(reinterpret_cast<uintptr_t>(type_base)) {}
+ : payload_(reinterpret_cast<uint64_t>(type_base)) {}
// Internal inspection.
bool IsKind(TypeBase::Kind kind) const {
@@ -491,7 +497,7 @@ class V8_EXPORT_PRIVATE Type {
bitset AsBitset() const {
DCHECK(IsBitset());
- return static_cast<bitset>(payload_) ^ 1u;
+ return static_cast<bitset>(payload_) ^ uint64_t { 1 };
}
const UnionType* AsUnion() const;
@@ -526,7 +532,7 @@ class V8_EXPORT_PRIVATE Type {
// If LSB is set, the payload is a bitset; if LSB is clear, the payload is
// a pointer to a subtype of the TypeBase class.
- uintptr_t payload_;
+ uint64_t payload_;
};
inline size_t hash_value(Type type) { return type.payload_; }
diff --git a/deps/v8/src/compiler/value-numbering-reducer.cc b/deps/v8/src/compiler/value-numbering-reducer.cc
index af0bc99746..dfaa67b471 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.cc
+++ b/deps/v8/src/compiler/value-numbering-reducer.cc
@@ -76,15 +76,15 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
// in this case because we find node1 first, but what we should actually
// do is return Replace(node2) instead.
for (size_t j = (i + 1) & mask;; j = (j + 1) & mask) {
- Node* entry = entries_[j];
- if (!entry) {
+ Node* other_entry = entries_[j];
+ if (!other_entry) {
// No collision, {node} is fine.
return NoChange();
}
- if (entry->IsDead()) {
+ if (other_entry->IsDead()) {
continue;
}
- if (entry == node) {
+ if (other_entry == node) {
// Collision with ourselves, doesn't count as a real collision.
// Opportunistically clean-up the duplicate entry if we're at the end
// of a bucket.
@@ -96,11 +96,11 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
// Otherwise, keep searching for another collision.
continue;
}
- if (NodeProperties::Equals(entry, node)) {
- Reduction reduction = ReplaceIfTypesMatch(node, entry);
+ if (NodeProperties::Equals(other_entry, node)) {
+ Reduction reduction = ReplaceIfTypesMatch(node, other_entry);
if (reduction.Changed()) {
// Overwrite the colliding entry with the actual entry.
- entries_[i] = entry;
+ entries_[i] = other_entry;
// Opportunistically clean-up the duplicate entry if we're at the
// end of a bucket.
if (!entries_[(j + 1) & mask]) {
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index accbb89619..07bb413588 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -98,13 +98,14 @@ MachineType assert_size(int expected_size, MachineType type) {
wasm::ObjectAccess::ToTagged( \
WasmInstanceObject::k##name##Offset)))
-#define LOAD_ROOT(root_name, factory_name) \
- (parameter_mode_ == kNoSpecialParameterMode \
- ? graph()->NewNode(mcgraph()->common()->HeapConstant( \
- isolate_->factory()->factory_name())) \
- : gasm_->LoadImmutable( \
- MachineType::Pointer(), BuildLoadIsolateRoot(), \
- IsolateData::root_slot_offset(RootIndex::k##root_name)))
+#define LOAD_ROOT(root_name, factory_name) \
+ (parameter_mode_ == kNoSpecialParameterMode \
+ ? graph()->NewNode(mcgraph()->common()->HeapConstant( \
+ isolate_->factory()->factory_name())) \
+ : gasm_->LoadImmutable(/* Root pointers do not get compressed. */ \
+ MachineType::Pointer(), BuildLoadIsolateRoot(), \
+ IsolateData::root_slot_offset( \
+ RootIndex::k##root_name)))
bool ContainsSimd(const wasm::FunctionSig* sig) {
for (auto type : sig->all()) {
@@ -306,7 +307,7 @@ class WasmGraphAssembler : public GraphAssembler {
void StoreMap(Node* heap_object, Node* map) {
ObjectAccess access(MachineType::TaggedPointer(), kMapWriteBarrier);
#ifdef V8_MAP_PACKING
- map = PackMapWord(map);
+ map = PackMapWord(TNode<Map>::UncheckedCast(map));
#endif
StoreToObject(access, heap_object, HeapObject::kMapOffset - kHeapObjectTag,
map);
@@ -518,6 +519,9 @@ void WasmGraphBuilder::Start(unsigned params) {
Param(Linkage::kJSCallClosureParamIndex, "%closure")));
break;
case kWasmApiFunctionRefMode:
+ // We need an instance node anyway, because FromJS() needs to pass it to
+ // the WasmIsValidRefValue runtime function.
+ instance_node_ = UndefinedValue();
break;
}
graph()->SetEnd(graph()->NewNode(mcgraph()->common()->End(0)));
@@ -638,8 +642,7 @@ Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
Node* WasmGraphBuilder::RefAsNonNull(Node* arg,
wasm::WasmCodePosition position) {
if (!FLAG_experimental_wasm_skip_null_checks) {
- TrapIfTrue(wasm::kTrapIllegalCast, gasm_->WordEqual(arg, RefNull()),
- position);
+ TrapIfTrue(wasm::kTrapIllegalCast, IsNull(arg), position);
}
return arg;
}
@@ -648,10 +651,7 @@ Node* WasmGraphBuilder::NoContextConstant() {
return mcgraph()->IntPtrConstant(0);
}
-Node* WasmGraphBuilder::GetInstance() {
- DCHECK_NE(parameter_mode_, kWasmApiFunctionRefMode);
- return instance_node_.get();
-}
+Node* WasmGraphBuilder::GetInstance() { return instance_node_.get(); }
Node* WasmGraphBuilder::BuildLoadIsolateRoot() {
switch (parameter_mode_) {
@@ -660,9 +660,12 @@ Node* WasmGraphBuilder::BuildLoadIsolateRoot() {
// that the generated code is Isolate independent.
return LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
case kWasmApiFunctionRefMode:
- return gasm_->Load(MachineType::Pointer(), Param(0),
- wasm::ObjectAccess::ToTagged(
- WasmApiFunctionRef::kForeignAddressOffset));
+ // Note: Even if V8_HEAP_SANDBOX, the pointer to the isolate root is not
+ // encoded, much like the case above. TODO(manoskouk): Decode the pointer
+ // here if that changes.
+ return gasm_->Load(
+ MachineType::Pointer(), Param(0),
+ wasm::ObjectAccess::ToTagged(WasmApiFunctionRef::kIsolateRootOffset));
case kNoSpecialParameterMode:
return mcgraph()->IntPtrConstant(isolate_->isolate_root());
}
@@ -676,6 +679,10 @@ Node* WasmGraphBuilder::Int64Constant(int64_t value) {
return mcgraph()->Int64Constant(value);
}
+Node* WasmGraphBuilder::UndefinedValue() {
+ return LOAD_ROOT(UndefinedValue, undefined_value);
+}
+
void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
DCHECK_NOT_NULL(env_); // Wrappers don't get stack checks.
if (!FLAG_wasm_stack_checks || !env_->runtime_exception_support) {
@@ -1309,7 +1316,11 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
? BuildCcallConvertFloat(input, position, opcode)
: BuildIntConvertFloat(input, position, opcode);
case wasm::kExprRefIsNull:
- return gasm_->WordEqual(input, RefNull());
+ return IsNull(input);
+ // We abuse ref.as_non_null, which isn't otherwise used in this switch, as
+ // a sentinel for the negation of ref.is_null.
+ case wasm::kExprRefAsNonNull:
+ return gasm_->Int32Sub(gasm_->Int32Constant(1), IsNull(input));
case wasm::kExprI32AsmjsLoadMem8S:
return BuildAsmjsLoadMem(MachineType::Int8(), input);
case wasm::kExprI32AsmjsLoadMem8U:
@@ -1773,7 +1784,7 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
}
}
- // We need to sign extend the value
+ // We need to sign or zero extend the value
if (memtype.IsSigned()) {
DCHECK(!isFloat);
if (valueSizeInBits < 32) {
@@ -1792,6 +1803,8 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
shiftBitCount);
}
}
+ } else if (wasmtype == wasm::kWasmI64 && valueSizeInBits < 64) {
+ result = gasm_->ChangeUint32ToUint64(result);
}
return result;
@@ -2885,6 +2898,10 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
return gasm_->LoadFromObject(result_type, stack_slot, 0);
}
+Node* WasmGraphBuilder::IsNull(Node* object) {
+ return gasm_->TaggedEqual(object, RefNull());
+}
+
template <typename... Args>
Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
Args... args) {
@@ -3025,15 +3042,16 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
}
}
-Node* WasmGraphBuilder::CallDirect(uint32_t index, base::Vector<Node*> args,
+Node* WasmGraphBuilder::CallDirect(uint32_t index, wasm::FunctionSig* real_sig,
+ base::Vector<Node*> args,
base::Vector<Node*> rets,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
- const wasm::FunctionSig* sig = env_->module->functions[index].sig;
if (env_ && index < env_->module->num_imported_functions) {
// Call to an imported function.
- return BuildImportCall(sig, args, rets, position, index, kCallContinues);
+ return BuildImportCall(real_sig, args, rets, position, index,
+ kCallContinues);
}
// A direct call to a wasm function defined in this module.
@@ -3041,14 +3059,15 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, base::Vector<Node*> args,
Address code = static_cast<Address>(index);
args[0] = mcgraph()->RelocatableIntPtrConstant(code, RelocInfo::WASM_CALL);
- return BuildWasmCall(sig, args, rets, position, nullptr);
+ return BuildWasmCall(real_sig, args, rets, position, nullptr);
}
Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index,
+ wasm::FunctionSig* sig,
base::Vector<Node*> args,
base::Vector<Node*> rets,
wasm::WasmCodePosition position) {
- return BuildIndirectCall(table_index, sig_index, args, rets, position,
+ return BuildIndirectCall(table_index, sig_index, sig, args, rets, position,
kCallContinues);
}
@@ -3101,12 +3120,10 @@ void WasmGraphBuilder::LoadIndirectFunctionTable(uint32_t table_index,
wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset));
}
-Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
- uint32_t sig_index,
- base::Vector<Node*> args,
- base::Vector<Node*> rets,
- wasm::WasmCodePosition position,
- IsReturnCall continuation) {
+Node* WasmGraphBuilder::BuildIndirectCall(
+ uint32_t table_index, uint32_t sig_index, wasm::FunctionSig* real_sig,
+ base::Vector<Node*> args, base::Vector<Node*> rets,
+ wasm::WasmCodePosition position, IsReturnCall continuation) {
DCHECK_NOT_NULL(args[0]);
DCHECK_NOT_NULL(env_);
@@ -3118,8 +3135,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
LoadIndirectFunctionTable(table_index, &ift_size, &ift_sig_ids, &ift_targets,
&ift_instances);
- const wasm::FunctionSig* sig = env_->module->signature(sig_index);
-
Node* key = args[0];
// Bounds check against the table size.
@@ -3164,67 +3179,74 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
switch (continuation) {
case kCallContinues:
- return BuildWasmCall(sig, args, rets, position, target_instance);
+ return BuildWasmCall(real_sig, args, rets, position, target_instance);
case kReturnCall:
- return BuildWasmReturnCall(sig, args, position, target_instance);
+ return BuildWasmReturnCall(real_sig, args, position, target_instance);
}
}
-Node* WasmGraphBuilder::BuildLoadCallTargetFromExportedFunctionData(
- Node* function_data) {
- // TODO(saelo) move this code into a common LoadExternalPointer routine?
+Node* WasmGraphBuilder::BuildUnsandboxExternalPointer(Node* external_pointer) {
#ifdef V8_HEAP_SANDBOX
- Node* index = gasm_->LoadFromObject(
- MachineType::Pointer(), function_data,
- wasm::ObjectAccess::ToTagged(WasmFunctionData::kForeignAddressOffset));
-
Node* isolate_root = BuildLoadIsolateRoot();
Node* table =
gasm_->LoadFromObject(MachineType::Pointer(), isolate_root,
IsolateData::external_pointer_table_offset() +
Internals::kExternalPointerTableBufferOffset);
- Node* offset = gasm_->Int32Mul(index, gasm_->Int32Constant(8));
+ Node* offset = gasm_->Int32Mul(external_pointer, gasm_->Int32Constant(8));
Node* decoded_ptr = gasm_->Load(MachineType::Pointer(), table, offset);
Node* tag = gasm_->IntPtrConstant(~kForeignForeignAddressTag);
return gasm_->WordAnd(decoded_ptr, tag);
#else
- return gasm_->LoadFromObject(
- MachineType::Pointer(), function_data,
- wasm::ObjectAccess::ToTagged(WasmFunctionData::kForeignAddressOffset));
+ return external_pointer;
#endif
}
+Node* WasmGraphBuilder::BuildLoadCallTargetFromExportedFunctionData(
+ Node* function) {
+ Node* internal = gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), function,
+ wasm::ObjectAccess::ToTagged(WasmExportedFunctionData::kInternalOffset));
+ Node* external_pointer =
+ gasm_->LoadFromObject(MachineType::Pointer(), internal,
+ wasm::ObjectAccess::ToTagged(
+ WasmInternalFunction::kForeignAddressOffset));
+ return BuildUnsandboxExternalPointer(external_pointer);
+}
+
// TODO(9495): Support CAPI function refs.
-Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* sig,
+Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* real_sig,
base::Vector<Node*> args,
base::Vector<Node*> rets,
CheckForNull null_check,
IsReturnCall continuation,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- TrapIfTrue(wasm::kTrapNullDereference, gasm_->WordEqual(args[0], RefNull()),
- position);
+ TrapIfTrue(wasm::kTrapNullDereference, IsNull(args[0]), position);
}
- Node* function_data = gasm_->LoadFunctionDataFromJSFunction(args[0]);
+ Node* function = args[0];
auto load_target = gasm_->MakeLabel();
auto end_label = gasm_->MakeLabel(MachineType::PointerRepresentation());
- Node* instance_node = gasm_->LoadFromObject(
- MachineType::TaggedPointer(), function_data,
- wasm::ObjectAccess::ToTagged(WasmFunctionData::kRefOffset));
+ Node* ref_node = gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), function,
+ wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset));
- Node* target = BuildLoadCallTargetFromExportedFunctionData(function_data);
+ Node* external_target =
+ gasm_->LoadFromObject(MachineType::Pointer(), function,
+ wasm::ObjectAccess::ToTagged(
+ WasmInternalFunction::kForeignAddressOffset));
+
+ Node* target = BuildUnsandboxExternalPointer(external_target);
Node* is_null_target = gasm_->WordEqual(target, gasm_->IntPtrConstant(0));
gasm_->GotoIfNot(is_null_target, &end_label, target);
{
// Compute the call target from the (on-heap) wrapper code. The cached
// target can only be null for WasmJSFunctions.
Node* wrapper_code = gasm_->LoadFromObject(
- MachineType::TaggedPointer(), function_data,
- wasm::ObjectAccess::ToTagged(
- WasmJSFunctionData::kWasmToJsWrapperCodeOffset));
+ MachineType::TaggedPointer(), function,
+ wasm::ObjectAccess::ToTagged(WasmInternalFunction::kCodeOffset));
Node* call_target;
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
CHECK(!V8_HEAP_SANDBOX_BOOL); // Not supported yet.
@@ -3246,52 +3268,53 @@ Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* sig,
args[0] = end_label.PhiAt(0);
Node* call = continuation == kCallContinues
- ? BuildWasmCall(sig, args, rets, position, instance_node)
- : BuildWasmReturnCall(sig, args, position, instance_node);
+ ? BuildWasmCall(real_sig, args, rets, position, ref_node)
+ : BuildWasmReturnCall(real_sig, args, position, ref_node);
return call;
}
-void WasmGraphBuilder::CompareToExternalFunctionAtIndex(
+void WasmGraphBuilder::CompareToInternalFunctionAtIndex(
Node* func_ref, uint32_t function_index, Node** success_control,
Node** failure_control) {
// Since we are comparing to a function reference, it is guaranteed that
- // instance->wasm_external_functions() has been initialized.
- Node* external_functions = gasm_->LoadFromObject(
+ // instance->wasm_internal_functions() has been initialized.
+ Node* internal_functions = gasm_->LoadFromObject(
MachineType::TaggedPointer(), GetInstance(),
wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kWasmExternalFunctionsOffset));
- Node* function_ref = gasm_->LoadFixedArrayElement(
- external_functions, gasm_->IntPtrConstant(function_index),
+ WasmInstanceObject::kWasmInternalFunctionsOffset));
+ Node* function_ref_at_index = gasm_->LoadFixedArrayElement(
+ internal_functions, gasm_->IntPtrConstant(function_index),
MachineType::AnyTagged());
- gasm_->Branch(gasm_->WordEqual(function_ref, func_ref), success_control,
- failure_control, BranchHint::kTrue);
+ gasm_->Branch(gasm_->TaggedEqual(function_ref_at_index, func_ref),
+ success_control, failure_control, BranchHint::kTrue);
}
-Node* WasmGraphBuilder::CallRef(const wasm::FunctionSig* sig,
+Node* WasmGraphBuilder::CallRef(const wasm::FunctionSig* real_sig,
base::Vector<Node*> args,
base::Vector<Node*> rets,
WasmGraphBuilder::CheckForNull null_check,
wasm::WasmCodePosition position) {
- return BuildCallRef(sig, args, rets, null_check, IsReturnCall::kCallContinues,
- position);
+ return BuildCallRef(real_sig, args, rets, null_check,
+ IsReturnCall::kCallContinues, position);
}
-Node* WasmGraphBuilder::ReturnCallRef(const wasm::FunctionSig* sig,
+Node* WasmGraphBuilder::ReturnCallRef(const wasm::FunctionSig* real_sig,
base::Vector<Node*> args,
WasmGraphBuilder::CheckForNull null_check,
wasm::WasmCodePosition position) {
- return BuildCallRef(sig, args, {}, null_check, IsReturnCall::kReturnCall,
+ return BuildCallRef(real_sig, args, {}, null_check, IsReturnCall::kReturnCall,
position);
}
-Node* WasmGraphBuilder::ReturnCall(uint32_t index, base::Vector<Node*> args,
+Node* WasmGraphBuilder::ReturnCall(uint32_t index,
+ const wasm::FunctionSig* real_sig,
+ base::Vector<Node*> args,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
- const wasm::FunctionSig* sig = env_->module->functions[index].sig;
if (env_ && index < env_->module->num_imported_functions) {
// Return Call to an imported function.
- return BuildImportCall(sig, args, {}, position, index, kReturnCall);
+ return BuildImportCall(real_sig, args, {}, position, index, kReturnCall);
}
// A direct tail call to a wasm function defined in this module.
@@ -3300,21 +3323,21 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, base::Vector<Node*> args,
Address code = static_cast<Address>(index);
args[0] = mcgraph()->RelocatableIntPtrConstant(code, RelocInfo::WASM_CALL);
- return BuildWasmReturnCall(sig, args, position, nullptr);
+ return BuildWasmReturnCall(real_sig, args, position, nullptr);
}
Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index,
uint32_t sig_index,
+ wasm::FunctionSig* real_sig,
base::Vector<Node*> args,
wasm::WasmCodePosition position) {
- return BuildIndirectCall(table_index, sig_index, args, {}, position,
+ return BuildIndirectCall(table_index, sig_index, real_sig, args, {}, position,
kReturnCall);
}
void WasmGraphBuilder::BrOnNull(Node* ref_object, Node** null_node,
Node** non_null_node) {
- BranchExpectFalse(gasm_->WordEqual(ref_object, RefNull()), null_node,
- non_null_node);
+ BranchExpectFalse(IsNull(ref_object), null_node, non_null_node);
}
Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
@@ -4437,6 +4460,8 @@ void WasmGraphBuilder::PrintDebugName(Node* node) {
Graph* WasmGraphBuilder::graph() { return mcgraph()->graph(); }
+Zone* WasmGraphBuilder::graph_zone() { return graph()->zone(); }
+
namespace {
Signature<MachineRepresentation>* CreateMachineSignature(
Zone* zone, const wasm::FunctionSig* sig,
@@ -5137,17 +5162,18 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return graph()->NewNode(mcgraph()->machine()->I8x16Swizzle(true),
inputs[0], inputs[1]);
case wasm::kExprI8x16RelaxedLaneSelect:
+ // Relaxed lane select puts the mask as first input (same as S128Select).
return graph()->NewNode(mcgraph()->machine()->I8x16RelaxedLaneSelect(),
- inputs[0], inputs[1], inputs[2]);
+ inputs[2], inputs[0], inputs[1]);
case wasm::kExprI16x8RelaxedLaneSelect:
return graph()->NewNode(mcgraph()->machine()->I16x8RelaxedLaneSelect(),
- inputs[0], inputs[1], inputs[2]);
+ inputs[2], inputs[0], inputs[1]);
case wasm::kExprI32x4RelaxedLaneSelect:
return graph()->NewNode(mcgraph()->machine()->I32x4RelaxedLaneSelect(),
- inputs[0], inputs[1], inputs[2]);
+ inputs[2], inputs[0], inputs[1]);
case wasm::kExprI64x2RelaxedLaneSelect:
return graph()->NewNode(mcgraph()->machine()->I64x2RelaxedLaneSelect(),
- inputs[0], inputs[1], inputs[2]);
+ inputs[2], inputs[0], inputs[1]);
case wasm::kExprF32x4RelaxedMin:
return graph()->NewNode(mcgraph()->machine()->F32x4RelaxedMin(),
inputs[0], inputs[1]);
@@ -5261,15 +5287,24 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
const OperatorByRep operator_by_rep = nullptr;
const OperatorByAtomicLoadRep operator_by_atomic_load_params = nullptr;
const OperatorByAtomicStoreRep operator_by_atomic_store_rep = nullptr;
+ const wasm::ValueType wasm_type;
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByType o)
: type(t), machine_type(m), operator_by_type(o) {}
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByRep o)
: type(t), machine_type(m), operator_by_rep(o) {}
- constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicLoadRep o)
- : type(t), machine_type(m), operator_by_atomic_load_params(o) {}
- constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicStoreRep o)
- : type(t), machine_type(m), operator_by_atomic_store_rep(o) {}
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicLoadRep o,
+ wasm::ValueType v)
+ : type(t),
+ machine_type(m),
+ operator_by_atomic_load_params(o),
+ wasm_type(v) {}
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicStoreRep o,
+ wasm::ValueType v)
+ : type(t),
+ machine_type(m),
+ operator_by_atomic_store_rep(o),
+ wasm_type(v) {}
// Constexpr, hence just a table lookup in most compilers.
static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) {
@@ -5277,6 +5312,10 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
#define CASE(Name, Type, MachType, Op) \
case wasm::kExpr##Name: \
return {Type, MachineType::MachType(), &MachineOperatorBuilder::Op};
+#define CASE_LOAD_STORE(Name, Type, MachType, Op, WasmType) \
+ case wasm::kExpr##Name: \
+ return {Type, MachineType::MachType(), &MachineOperatorBuilder::Op, \
+ WasmType};
// Binops.
CASE(I32AtomicAdd, kOneInput, Uint32, Word32AtomicAdd)
@@ -5339,24 +5378,39 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
Word64AtomicCompareExchange)
// Load.
- CASE(I32AtomicLoad, kNoInput, Uint32, Word32AtomicLoad)
- CASE(I64AtomicLoad, kNoInput, Uint64, Word64AtomicLoad)
- CASE(I32AtomicLoad8U, kNoInput, Uint8, Word32AtomicLoad)
- CASE(I32AtomicLoad16U, kNoInput, Uint16, Word32AtomicLoad)
- CASE(I64AtomicLoad8U, kNoInput, Uint8, Word64AtomicLoad)
- CASE(I64AtomicLoad16U, kNoInput, Uint16, Word64AtomicLoad)
- CASE(I64AtomicLoad32U, kNoInput, Uint32, Word64AtomicLoad)
+ CASE_LOAD_STORE(I32AtomicLoad, kNoInput, Uint32, Word32AtomicLoad,
+ wasm::kWasmI32)
+ CASE_LOAD_STORE(I64AtomicLoad, kNoInput, Uint64, Word64AtomicLoad,
+ wasm::kWasmI64)
+ CASE_LOAD_STORE(I32AtomicLoad8U, kNoInput, Uint8, Word32AtomicLoad,
+ wasm::kWasmI32)
+ CASE_LOAD_STORE(I32AtomicLoad16U, kNoInput, Uint16, Word32AtomicLoad,
+ wasm::kWasmI32)
+ CASE_LOAD_STORE(I64AtomicLoad8U, kNoInput, Uint8, Word64AtomicLoad,
+ wasm::kWasmI64)
+ CASE_LOAD_STORE(I64AtomicLoad16U, kNoInput, Uint16, Word64AtomicLoad,
+ wasm::kWasmI64)
+ CASE_LOAD_STORE(I64AtomicLoad32U, kNoInput, Uint32, Word64AtomicLoad,
+ wasm::kWasmI64)
// Store.
- CASE(I32AtomicStore, kOneInput, Uint32, Word32AtomicStore)
- CASE(I64AtomicStore, kOneInput, Uint64, Word64AtomicStore)
- CASE(I32AtomicStore8U, kOneInput, Uint8, Word32AtomicStore)
- CASE(I32AtomicStore16U, kOneInput, Uint16, Word32AtomicStore)
- CASE(I64AtomicStore8U, kOneInput, Uint8, Word64AtomicStore)
- CASE(I64AtomicStore16U, kOneInput, Uint16, Word64AtomicStore)
- CASE(I64AtomicStore32U, kOneInput, Uint32, Word64AtomicStore)
+ CASE_LOAD_STORE(I32AtomicStore, kOneInput, Uint32, Word32AtomicStore,
+ wasm::kWasmI32)
+ CASE_LOAD_STORE(I64AtomicStore, kOneInput, Uint64, Word64AtomicStore,
+ wasm::kWasmI64)
+ CASE_LOAD_STORE(I32AtomicStore8U, kOneInput, Uint8, Word32AtomicStore,
+ wasm::kWasmI32)
+ CASE_LOAD_STORE(I32AtomicStore16U, kOneInput, Uint16, Word32AtomicStore,
+ wasm::kWasmI32)
+ CASE_LOAD_STORE(I64AtomicStore8U, kOneInput, Uint8, Word64AtomicStore,
+ wasm::kWasmI64)
+ CASE_LOAD_STORE(I64AtomicStore16U, kOneInput, Uint16, Word64AtomicStore,
+ wasm::kWasmI64)
+ CASE_LOAD_STORE(I64AtomicStore32U, kOneInput, Uint32, Word64AtomicStore,
+ wasm::kWasmI64)
#undef CASE
+#undef CASE_LOAD_STORE
case wasm::kExprAtomicNotify:
return {kSpecial, MachineType::Int32(), OperatorByType{nullptr}};
@@ -5399,8 +5453,28 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
std::copy_n(inputs + 1, num_actual_inputs, input_nodes + 2);
input_nodes[num_actual_inputs + 2] = effect();
input_nodes[num_actual_inputs + 3] = control();
- return gasm_->AddNode(
+
+#ifdef V8_TARGET_BIG_ENDIAN
+ // Reverse the value bytes before storing.
+ if (info.operator_by_atomic_store_rep) {
+ input_nodes[num_actual_inputs + 1] = BuildChangeEndiannessStore(
+ input_nodes[num_actual_inputs + 1],
+ info.machine_type.representation(), info.wasm_type);
+ }
+#endif
+
+ Node* result = gasm_->AddNode(
graph()->NewNode(op, num_actual_inputs + 4, input_nodes));
+
+#ifdef V8_TARGET_BIG_ENDIAN
+ // Reverse the value bytes after load.
+ if (info.operator_by_atomic_load_params) {
+ result =
+ BuildChangeEndiannessLoad(result, info.machine_type, info.wasm_type);
+ }
+#endif
+
+ return result;
}
// After we've bounds-checked, compute the effective offset.
@@ -5782,7 +5856,7 @@ void WasmGraphBuilder::TypeCheck(
bool null_succeeds, Callbacks callbacks) {
if (config.object_can_be_null) {
(null_succeeds ? callbacks.succeed_if : callbacks.fail_if)(
- gasm_->WordEqual(object, RefNull()), BranchHint::kFalse);
+ IsNull(object), BranchHint::kFalse);
}
Node* map = gasm_->LoadMap(object);
@@ -5796,19 +5870,27 @@ void WasmGraphBuilder::TypeCheck(
DCHECK(config.reference_kind == kArrayOrStruct);
+ // First, check if types happen to be equal. This has been shown to give large
+ // speedups.
callbacks.succeed_if(gasm_->TaggedEqual(map, rtt), BranchHint::kTrue);
Node* type_info = gasm_->LoadWasmTypeInfo(map);
Node* supertypes = gasm_->LoadSupertypes(type_info);
- Node* supertypes_length =
- BuildChangeSmiToInt32(gasm_->LoadFixedArrayLengthAsSmi(supertypes));
Node* rtt_depth =
config.rtt_depth >= 0
- ? Int32Constant(config.rtt_depth)
- : BuildChangeSmiToInt32(gasm_->LoadFixedArrayLengthAsSmi(
+ ? gasm_->IntPtrConstant(config.rtt_depth)
+ : BuildChangeSmiToIntPtr(gasm_->LoadFixedArrayLengthAsSmi(
gasm_->LoadSupertypes(gasm_->LoadWasmTypeInfo(rtt))));
- callbacks.fail_if_not(gasm_->Uint32LessThan(rtt_depth, supertypes_length),
- BranchHint::kTrue);
+ // If the depth of the rtt is known to be less that the minimum supertype
+ // array length, we can access the supertype without bounds-checking the
+ // supertype array.
+ if (config.rtt_depth < 0 || static_cast<uint32_t>(config.rtt_depth) >=
+ wasm::kMinimumSupertypeArraySize) {
+ Node* supertypes_length =
+ BuildChangeSmiToIntPtr(gasm_->LoadFixedArrayLengthAsSmi(supertypes));
+ callbacks.fail_if_not(gasm_->UintLessThan(rtt_depth, supertypes_length),
+ BranchHint::kTrue);
+ }
Node* maybe_match = gasm_->LoadFixedArrayElement(
supertypes, rtt_depth, MachineType::TaggedPointer());
@@ -5819,7 +5901,7 @@ void WasmGraphBuilder::TypeCheck(
void WasmGraphBuilder::DataCheck(Node* object, bool object_can_be_null,
Callbacks callbacks) {
if (object_can_be_null) {
- callbacks.fail_if(gasm_->WordEqual(object, RefNull()), BranchHint::kFalse);
+ callbacks.fail_if(IsNull(object), BranchHint::kFalse);
}
callbacks.fail_if(gasm_->IsI31(object), BranchHint::kFalse);
Node* map = gasm_->LoadMap(object);
@@ -5829,11 +5911,12 @@ void WasmGraphBuilder::DataCheck(Node* object, bool object_can_be_null,
void WasmGraphBuilder::FuncCheck(Node* object, bool object_can_be_null,
Callbacks callbacks) {
if (object_can_be_null) {
- callbacks.fail_if(gasm_->WordEqual(object, RefNull()), BranchHint::kFalse);
+ callbacks.fail_if(IsNull(object), BranchHint::kFalse);
}
callbacks.fail_if(gasm_->IsI31(object), BranchHint::kFalse);
- callbacks.fail_if_not(gasm_->HasInstanceType(object, JS_FUNCTION_TYPE),
- BranchHint::kTrue);
+ callbacks.fail_if_not(
+ gasm_->HasInstanceType(object, WASM_INTERNAL_FUNCTION_TYPE),
+ BranchHint::kTrue);
}
void WasmGraphBuilder::BrOnCastAbs(
@@ -5979,8 +6062,7 @@ Node* WasmGraphBuilder::StructGet(Node* struct_object,
bool is_signed,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- TrapIfTrue(wasm::kTrapNullDereference,
- gasm_->WordEqual(struct_object, RefNull()), position);
+ TrapIfTrue(wasm::kTrapNullDereference, IsNull(struct_object), position);
}
// It is not enough to invoke ValueType::machine_type(), because the
// signedness has to be determined by {is_signed}.
@@ -5996,8 +6078,7 @@ void WasmGraphBuilder::StructSet(Node* struct_object,
CheckForNull null_check,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- TrapIfTrue(wasm::kTrapNullDereference,
- gasm_->WordEqual(struct_object, RefNull()), position);
+ TrapIfTrue(wasm::kTrapNullDereference, IsNull(struct_object), position);
}
gasm_->StoreStructField(struct_object, struct_type, field_index, field_value);
}
@@ -6027,8 +6108,7 @@ Node* WasmGraphBuilder::ArrayGet(Node* array_object,
CheckForNull null_check, bool is_signed,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- TrapIfTrue(wasm::kTrapNullDereference,
- gasm_->WordEqual(array_object, RefNull()), position);
+ TrapIfTrue(wasm::kTrapNullDereference, IsNull(array_object), position);
}
BoundsCheckArray(array_object, index, position);
MachineType machine_type = MachineType::TypeForRepresentation(
@@ -6042,8 +6122,7 @@ void WasmGraphBuilder::ArraySet(Node* array_object, const wasm::ArrayType* type,
CheckForNull null_check,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- TrapIfTrue(wasm::kTrapNullDereference,
- gasm_->WordEqual(array_object, RefNull()), position);
+ TrapIfTrue(wasm::kTrapNullDereference, IsNull(array_object), position);
}
BoundsCheckArray(array_object, index, position);
Node* offset = gasm_->WasmArrayElementOffset(index, type->element_type());
@@ -6054,8 +6133,7 @@ void WasmGraphBuilder::ArraySet(Node* array_object, const wasm::ArrayType* type,
Node* WasmGraphBuilder::ArrayLen(Node* array_object, CheckForNull null_check,
wasm::WasmCodePosition position) {
if (null_check == kWithNullCheck) {
- TrapIfTrue(wasm::kTrapNullDereference,
- gasm_->WordEqual(array_object, RefNull()), position);
+ TrapIfTrue(wasm::kTrapNullDereference, IsNull(array_object), position);
}
return gasm_->LoadWasmArrayLength(array_object);
}
@@ -6068,19 +6146,17 @@ void WasmGraphBuilder::ArrayCopy(Node* dst_array, Node* dst_index,
Node* length,
wasm::WasmCodePosition position) {
if (dst_null_check == kWithNullCheck) {
- TrapIfTrue(wasm::kTrapNullDereference,
- gasm_->WordEqual(dst_array, RefNull()), position);
+ TrapIfTrue(wasm::kTrapNullDereference, IsNull(dst_array), position);
}
if (src_null_check == kWithNullCheck) {
- TrapIfTrue(wasm::kTrapNullDereference,
- gasm_->WordEqual(src_array, RefNull()), position);
+ TrapIfTrue(wasm::kTrapNullDereference, IsNull(src_array), position);
}
BoundsCheckArrayCopy(dst_array, dst_index, length, position);
BoundsCheckArrayCopy(src_array, src_index, length, position);
auto skip = gasm_->MakeLabel();
- gasm_->GotoIf(gasm_->WordEqual(length, Int32Constant(0)), &skip,
+ gasm_->GotoIf(gasm_->Word32Equal(length, Int32Constant(0)), &skip,
BranchHint::kFalse);
Node* function =
@@ -6207,8 +6283,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
: gasm_->GetBuiltinPointerTarget(builtin);
}
- Node* UndefinedValue() { return LOAD_ROOT(UndefinedValue, undefined_value); }
-
Node* BuildChangeInt32ToNumber(Node* value) {
// We expect most integers at runtime to be Smis, so it is important for
// wrapper performance that Smi conversion be inlined.
@@ -6360,8 +6434,27 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kOptRef:
switch (type.heap_representation()) {
case wasm::HeapType::kExtern:
- case wasm::HeapType::kFunc:
return node;
+ case wasm::HeapType::kFunc: {
+ if (type.kind() == wasm::kOptRef) {
+ auto done =
+ gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
+ // Do not wrap {null}.
+ gasm_->GotoIf(IsNull(node), &done, node);
+ gasm_->Goto(&done,
+ gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), node,
+ wasm::ObjectAccess::ToTagged(
+ WasmInternalFunction::kExternalOffset)));
+ gasm_->Bind(&done);
+ return done.PhiAt(0);
+ } else {
+ return gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), node,
+ wasm::ObjectAccess::ToTagged(
+ WasmInternalFunction::kExternalOffset));
+ }
+ }
case wasm::HeapType::kData:
case wasm::HeapType::kEq:
case wasm::HeapType::kI31:
@@ -6370,7 +6463,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
auto done =
gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
// Do not wrap {null}.
- gasm_->GotoIf(gasm_->WordEqual(node, RefNull()), &done, node);
+ gasm_->GotoIf(IsNull(node), &done, node);
gasm_->Goto(&done, BuildAllocateObjectWrapper(node));
gasm_->Bind(&done);
return done.PhiAt(0);
@@ -6378,23 +6471,34 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return BuildAllocateObjectWrapper(node);
}
case wasm::HeapType::kAny: {
- // Only wrap {node} if it is an array/struct/i31, i.e., do not wrap
- // functions and null.
+ // Wrap {node} in object wrapper if it is an array/struct/i31.
+ // Extract external function if this is a WasmInternalFunction.
+ // Otherwise (i.e. null and external refs), return input.
// TODO(7748): Update this when JS interop is settled.
auto done = gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
gasm_->GotoIf(IsSmi(node), &done, BuildAllocateObjectWrapper(node));
// This includes the case where {node == null}.
- gasm_->GotoIfNot(gasm_->IsDataRefMap(gasm_->LoadMap(node)), &done,
- node);
- gasm_->Goto(&done, BuildAllocateObjectWrapper(node));
+ gasm_->GotoIf(gasm_->IsDataRefMap(gasm_->LoadMap(node)), &done,
+ BuildAllocateObjectWrapper(node));
+ gasm_->GotoIf(
+ gasm_->HasInstanceType(node, WASM_INTERNAL_FUNCTION_TYPE),
+ &done,
+ gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), node,
+ wasm::ObjectAccess::ToTagged(
+ WasmInternalFunction::kExternalOffset)));
+ gasm_->Goto(&done, node);
gasm_->Bind(&done);
return done.PhiAt(0);
}
default:
DCHECK(type.has_index());
if (module_->has_signature(type.ref_index())) {
- // Typed function
- return node;
+ // Typed function. Extract the external function.
+ return gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), node,
+ wasm::ObjectAccess::ToTagged(
+ WasmInternalFunction::kExternalOffset));
}
// If this is reached, then IsJSCompatibleSignature() is too
// permissive.
@@ -6425,24 +6529,43 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
// Assumes {input} has been checked for validity against the target wasm type.
- // Returns the value of the property associated with
- // {wasm_wrapped_object_symbol} in {input}, or {input} itself if the property
- // is not found.
+ // If {input} is a function, returns the WasmInternalFunction associated with
+ // it. If {input} has the {wasm_wrapped_object_symbol} property, returns the
+ // value of that property. Otherwise, returns {input}.
Node* BuildUnpackObjectWrapper(Node* input) {
- if (FLAG_wasm_gc_js_interop) return input;
- Node* obj = gasm_->CallBuiltin(
- Builtin::kWasmGetOwnProperty, Operator::kEliminatable, input,
- LOAD_ROOT(wasm_wrapped_object_symbol, wasm_wrapped_object_symbol),
- LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
- // Invalid object wrappers (i.e. any other JS object that doesn't have the
- // magic hidden property) will return {undefined}. Map that to {null} or
- // {input}, depending on the value of {failure}.
- Node* undefined = UndefinedValue();
- Node* is_undefined = gasm_->WordEqual(obj, undefined);
- Diamond check(graph(), mcgraph()->common(), is_undefined,
- BranchHint::kFalse);
- check.Chain(control());
- return check.Phi(MachineRepresentation::kTagged, input, obj);
+ auto not_a_function = gasm_->MakeLabel();
+ auto end = gasm_->MakeLabel(MachineRepresentation::kTaggedPointer);
+
+ gasm_->GotoIfNot(gasm_->HasInstanceType(input, JS_FUNCTION_TYPE),
+ &not_a_function);
+
+ Node* function_data = gasm_->LoadFunctionDataFromJSFunction(input);
+
+ // Due to type checking, {function_data} will be a WasmFunctionData.
+ Node* internal = gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), function_data,
+ wasm::ObjectAccess::ToTagged(WasmFunctionData::kInternalOffset));
+ gasm_->Goto(&end, internal);
+
+ gasm_->Bind(&not_a_function);
+ if (!FLAG_wasm_gc_js_interop) {
+ Node* obj = gasm_->CallBuiltin(
+ Builtin::kWasmGetOwnProperty, Operator::kEliminatable, input,
+ LOAD_ROOT(wasm_wrapped_object_symbol, wasm_wrapped_object_symbol),
+ LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
+ // Invalid object wrappers (i.e. any other JS object that doesn't have the
+ // magic hidden property) will return {undefined}. Map that to {input}.
+ Node* is_undefined = gasm_->TaggedEqual(obj, UndefinedValue());
+ gasm_->GotoIf(is_undefined, &end, input);
+
+ gasm_->Goto(&end, obj);
+ } else {
+ gasm_->Goto(&end, input);
+ }
+
+ gasm_->Bind(&end);
+
+ return end.PhiAt(0);
}
Node* BuildChangeInt64ToBigInt(Node* input) {
@@ -6486,6 +6609,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
wasm::ValueType type) {
// Make sure ValueType fits in a Smi.
STATIC_ASSERT(wasm::ValueType::kLastUsedBit + 1 <= kSmiValueSize);
+ // The instance node is always defined: if an instance is not available, it
+ // is the undefined value.
Node* inputs[] = {GetInstance(), input,
mcgraph()->IntPtrConstant(
IntToSmi(static_cast<int>(type.raw_bit_field())))};
@@ -6519,7 +6644,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return BuildUnpackObjectWrapper(input);
case wasm::HeapType::kFunc:
BuildCheckValidRefValue(input, js_context, type);
- return input;
+ return BuildUnpackObjectWrapper(input);
case wasm::HeapType::kData:
case wasm::HeapType::kEq:
case wasm::HeapType::kI31:
@@ -6531,7 +6656,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
default:
if (module_->has_signature(type.ref_index())) {
BuildCheckValidRefValue(input, js_context, type);
- return input;
+ return BuildUnpackObjectWrapper(input);
}
// If this is reached, then IsJSCompatibleSignature() is too
// permissive.
@@ -6730,9 +6855,19 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
} else {
// Call to a wasm function defined in this module.
// The (cached) call target is the jump table slot for that function.
- args[0] = BuildLoadCallTargetFromExportedFunctionData(function_data);
+ Node* internal = gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), function_data,
+ wasm::ObjectAccess::ToTagged(WasmFunctionData::kInternalOffset));
+ Node* sandboxed_pointer = gasm_->LoadFromObject(
+ MachineType::Pointer(), internal,
+ wasm::ObjectAccess::ToTagged(
+ WasmInternalFunction::kForeignAddressOffset));
+ args[0] = BuildUnsandboxExternalPointer(sandboxed_pointer);
+ Node* instance_node = gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), internal,
+ wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset));
BuildWasmCall(sig_, base::VectorOf(args), base::VectorOf(rets),
- wasm::kNoCodePosition, nullptr, frame_state);
+ wasm::kNoCodePosition, instance_node, frame_state);
}
}
@@ -6805,7 +6940,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
gasm_->GotoIf(IsSmi(input), &done);
Node* map = gasm_->LoadMap(input);
Node* heap_number_map = LOAD_ROOT(HeapNumberMap, heap_number_map);
+#if V8_MAP_PACKING
Node* is_heap_number = gasm_->WordEqual(heap_number_map, map);
+#else
+ Node* is_heap_number = gasm_->TaggedEqual(heap_number_map, map);
+#endif
gasm_->GotoIf(is_heap_number, &done);
gasm_->Goto(slow_path);
gasm_->Bind(&done);
@@ -7232,9 +7371,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Load the original callable from the closure.
Node* func_data = gasm_->LoadFunctionDataFromJSFunction(closure);
- Node* ref = gasm_->LoadFromObject(
+ Node* internal = gasm_->LoadFromObject(
MachineType::AnyTagged(), func_data,
- wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kRefOffset));
+ wasm::ObjectAccess::ToTagged(WasmFunctionData::kInternalOffset));
+ Node* ref = gasm_->LoadFromObject(
+ MachineType::AnyTagged(), internal,
+ wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset));
Node* callable = gasm_->LoadFromObject(
MachineType::AnyTagged(), ref,
wasm::ObjectAccess::ToTagged(WasmApiFunctionRef::kCallableOffset));
@@ -7993,6 +8135,8 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
info.set_wasm_runtime_exception_support();
}
+ if (FLAG_experimental_wasm_gc) info.set_allocation_folding();
+
if (info.trace_turbo_json()) {
TurboCfgFile tcf;
tcf << AsC1VCompilation(&info);
@@ -8187,23 +8331,58 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, const wasm::FunctionSig* fsig,
flags, // flags
"wasm-call", // debug name
StackArgumentOrder::kDefault, // order of the arguments in the stack
+ fsig, // signature
0, // allocatable registers
return_slots); // return slot count
}
namespace {
+const wasm::FunctionSig* ReplaceTypeInSig(Zone* zone,
+ const wasm::FunctionSig* sig,
+ wasm::ValueType from,
+ wasm::ValueType to,
+ size_t num_replacements) {
+ size_t param_occurences =
+ std::count(sig->parameters().begin(), sig->parameters().end(), from);
+ size_t return_occurences =
+ std::count(sig->returns().begin(), sig->returns().end(), from);
+ if (param_occurences == 0 && return_occurences == 0) return sig;
+
+ wasm::FunctionSig::Builder builder(
+ zone, sig->return_count() + return_occurences * (num_replacements - 1),
+ sig->parameter_count() + param_occurences * (num_replacements - 1));
+
+ for (wasm::ValueType ret : sig->returns()) {
+ if (ret == from) {
+ for (size_t i = 0; i < num_replacements; i++) builder.AddReturn(to);
+ } else {
+ builder.AddReturn(ret);
+ }
+ }
+
+ for (wasm::ValueType param : sig->parameters()) {
+ if (param == from) {
+ for (size_t i = 0; i < num_replacements; i++) builder.AddParam(to);
+ } else {
+ builder.AddParam(param);
+ }
+ }
+
+ return builder.Build();
+}
+
CallDescriptor* ReplaceTypeInCallDescriptorWith(
Zone* zone, const CallDescriptor* call_descriptor, size_t num_replacements,
- MachineType input_type, MachineRepresentation output_type) {
+ wasm::ValueType input_type, wasm::ValueType output_type) {
size_t parameter_count = call_descriptor->ParameterCount();
size_t return_count = call_descriptor->ReturnCount();
for (size_t i = 0; i < call_descriptor->ParameterCount(); i++) {
- if (call_descriptor->GetParameterType(i) == input_type) {
+ if (call_descriptor->GetParameterType(i) == input_type.machine_type()) {
parameter_count += num_replacements - 1;
}
}
for (size_t i = 0; i < call_descriptor->ReturnCount(); i++) {
- if (call_descriptor->GetReturnType(i) == input_type) {
+ if (call_descriptor->GetReturnType(i) == input_type.machine_type()) {
return_count += num_replacements - 1;
}
}
@@ -8224,12 +8403,12 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
LinkageLocationAllocator params(
wasm::kGpParamRegisters, wasm::kFpParamRegisters, 0 /* no slot offset */);
- for (size_t i = 0, e = call_descriptor->ParameterCount() -
- (has_callable_param ? 1 : 0);
- i < e; i++) {
- if (call_descriptor->GetParameterType(i) == input_type) {
+ for (size_t i = 0;
+ i < call_descriptor->ParameterCount() - (has_callable_param ? 1 : 0);
+ i++) {
+ if (call_descriptor->GetParameterType(i) == input_type.machine_type()) {
for (size_t j = 0; j < num_replacements; j++) {
- locations.AddParam(params.Next(output_type));
+ locations.AddParam(params.Next(output_type.machine_representation()));
}
} else {
locations.AddParam(
@@ -8247,9 +8426,9 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
wasm::kFpReturnRegisters, parameter_slots);
for (size_t i = 0; i < call_descriptor->ReturnCount(); i++) {
- if (call_descriptor->GetReturnType(i) == input_type) {
+ if (call_descriptor->GetReturnType(i) == input_type.machine_type()) {
for (size_t j = 0; j < num_replacements; j++) {
- locations.AddReturn(rets.Next(output_type));
+ locations.AddReturn(rets.Next(output_type.machine_representation()));
}
} else {
locations.AddReturn(
@@ -8259,6 +8438,9 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
int return_slots = rets.NumStackSlots();
+ auto sig = ReplaceTypeInSig(zone, call_descriptor->wasm_sig(), input_type,
+ output_type, num_replacements);
+
return zone->New<CallDescriptor>( // --
call_descriptor->kind(), // kind
call_descriptor->GetInputType(0), // target MachineType
@@ -8271,23 +8453,24 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
call_descriptor->flags(), // flags
call_descriptor->debug_name(), // debug name
call_descriptor->GetStackArgumentOrder(), // stack order
+ sig, // signature
call_descriptor->AllocatableRegisters(), // allocatable registers
return_slots); // return slot count
}
} // namespace
+// static
+const wasm::FunctionSig* WasmGraphBuilder::Int64LoweredSig(
+ Zone* zone, const wasm::FunctionSig* sig) {
+ return (kSystemPointerSize == 4)
+ ? ReplaceTypeInSig(zone, sig, wasm::kWasmI64, wasm::kWasmI32, 2)
+ : sig;
+}
+
CallDescriptor* GetI32WasmCallDescriptor(
Zone* zone, const CallDescriptor* call_descriptor) {
return ReplaceTypeInCallDescriptorWith(zone, call_descriptor, 2,
- MachineType::Int64(),
- MachineRepresentation::kWord32);
-}
-
-CallDescriptor* GetI32WasmCallDescriptorForSimd(
- Zone* zone, CallDescriptor* call_descriptor) {
- return ReplaceTypeInCallDescriptorWith(zone, call_descriptor, 4,
- MachineType::Simd128(),
- MachineRepresentation::kWord32);
+ wasm::kWasmI64, wasm::kWasmI32);
}
AssemblerOptions WasmAssemblerOptions() {
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 132cbc34a9..93b08a0dd9 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -333,25 +333,32 @@ class WasmGraphBuilder {
void Trap(wasm::TrapReason reason, wasm::WasmCodePosition position);
- Node* CallDirect(uint32_t index, base::Vector<Node*> args,
- base::Vector<Node*> rets, wasm::WasmCodePosition position);
+ // In all six call-related public functions, we pass a signature based on the
+ // real arguments for this call. This signature gets stored in the Call node
+ // and will later help us generate better code if this call gets inlined.
+ Node* CallDirect(uint32_t index, wasm::FunctionSig* real_sig,
+ base::Vector<Node*> args, base::Vector<Node*> rets,
+ wasm::WasmCodePosition position);
Node* CallIndirect(uint32_t table_index, uint32_t sig_index,
- base::Vector<Node*> args, base::Vector<Node*> rets,
- wasm::WasmCodePosition position);
- Node* CallRef(const wasm::FunctionSig* sig, base::Vector<Node*> args,
+ wasm::FunctionSig* real_sig, base::Vector<Node*> args,
+ base::Vector<Node*> rets, wasm::WasmCodePosition position);
+ Node* CallRef(const wasm::FunctionSig* real_sig, base::Vector<Node*> args,
base::Vector<Node*> rets, CheckForNull null_check,
wasm::WasmCodePosition position);
- void CompareToExternalFunctionAtIndex(Node* func_ref, uint32_t function_index,
- Node** success_control,
- Node** failure_control);
- Node* ReturnCall(uint32_t index, base::Vector<Node*> args,
- wasm::WasmCodePosition position);
+ Node* ReturnCall(uint32_t index, const wasm::FunctionSig* real_sig,
+ base::Vector<Node*> args, wasm::WasmCodePosition position);
Node* ReturnCallIndirect(uint32_t table_index, uint32_t sig_index,
+ wasm::FunctionSig* real_sig,
base::Vector<Node*> args,
wasm::WasmCodePosition position);
- Node* ReturnCallRef(const wasm::FunctionSig* sig, base::Vector<Node*> args,
- CheckForNull null_check, wasm::WasmCodePosition position);
+ Node* ReturnCallRef(const wasm::FunctionSig* real_sig,
+ base::Vector<Node*> args, CheckForNull null_check,
+ wasm::WasmCodePosition position);
+
+ void CompareToInternalFunctionAtIndex(Node* func_ref, uint32_t function_index,
+ Node** success_control,
+ Node** failure_control);
void BrOnNull(Node* ref_object, Node** non_null_node, Node** null_node);
@@ -530,12 +537,16 @@ class WasmGraphBuilder {
MachineGraph* mcgraph() { return mcgraph_; }
Graph* graph();
+ Zone* graph_zone();
void AddBytecodePositionDecorator(NodeOriginTable* node_origins,
wasm::Decoder* decoder);
void RemoveBytecodePositionDecorator();
+ static const wasm::FunctionSig* Int64LoweredSig(Zone* zone,
+ const wasm::FunctionSig* sig);
+
protected:
V8_EXPORT_PRIVATE WasmGraphBuilder(wasm::CompilationEnv* env, Zone* zone,
MachineGraph* mcgraph,
@@ -548,6 +559,7 @@ class WasmGraphBuilder {
Node* GetInstance();
Node* BuildLoadIsolateRoot();
+ Node* UndefinedValue();
// MemBuffer is only called with valid offsets (after bounds checking), so the
// offset fits in a platform-dependent uintptr_t.
@@ -587,7 +599,8 @@ class WasmGraphBuilder {
Node** ift_sig_ids, Node** ift_targets,
Node** ift_instances);
Node* BuildIndirectCall(uint32_t table_index, uint32_t sig_index,
- base::Vector<Node*> args, base::Vector<Node*> rets,
+ wasm::FunctionSig* real_sig, base::Vector<Node*> args,
+ base::Vector<Node*> rets,
wasm::WasmCodePosition position,
IsReturnCall continuation);
Node* BuildWasmCall(const wasm::FunctionSig* sig, base::Vector<Node*> args,
@@ -605,9 +618,9 @@ class WasmGraphBuilder {
base::Vector<Node*> rets,
wasm::WasmCodePosition position, Node* func_index,
IsReturnCall continuation);
- Node* BuildCallRef(const wasm::FunctionSig* sig, base::Vector<Node*> args,
- base::Vector<Node*> rets, CheckForNull null_check,
- IsReturnCall continuation,
+ Node* BuildCallRef(const wasm::FunctionSig* real_sig,
+ base::Vector<Node*> args, base::Vector<Node*> rets,
+ CheckForNull null_check, IsReturnCall continuation,
wasm::WasmCodePosition position);
Node* BuildF32CopySign(Node* left, Node* right);
@@ -678,6 +691,8 @@ class WasmGraphBuilder {
// generates {index > max ? Smi(max) : Smi(index)}
Node* BuildConvertUint32ToSmiWithSaturation(Node* index, uint32_t maxval);
+ Node* IsNull(Node* object);
+
using BranchBuilder = std::function<void(Node*, BranchHint)>;
struct Callbacks {
BranchBuilder succeed_if;
@@ -742,6 +757,8 @@ class WasmGraphBuilder {
Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
Node* iterable, Node* context);
+ Node* BuildUnsandboxExternalPointer(Node* external_pointer);
+
Node* BuildLoadCallTargetFromExportedFunctionData(Node* function_data);
//-----------------------------------------------------------------------
@@ -810,9 +827,6 @@ V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
Zone* zone, const CallDescriptor* call_descriptor);
-V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptorForSimd(
- Zone* zone, CallDescriptor* call_descriptor);
-
AssemblerOptions WasmAssemblerOptions();
AssemblerOptions WasmStubAssemblerOptions();
diff --git a/deps/v8/src/compiler/wasm-escape-analysis.cc b/deps/v8/src/compiler/wasm-escape-analysis.cc
index f2057f77ef..e05a792fba 100644
--- a/deps/v8/src/compiler/wasm-escape-analysis.cc
+++ b/deps/v8/src/compiler/wasm-escape-analysis.cc
@@ -22,35 +22,44 @@ Reduction WasmEscapeAnalysis::Reduce(Node* node) {
Reduction WasmEscapeAnalysis::ReduceAllocateRaw(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kAllocateRaw);
-
// TODO(manoskouk): Account for phis.
- std::vector<Edge> use_edges;
+
+ // Collect all value edges of {node} in this vector.
+ std::vector<Edge> value_edges;
for (Edge edge : node->use_edges()) {
if (NodeProperties::IsValueEdge(edge)) {
if (edge.index() != 0 ||
edge.from()->opcode() != IrOpcode::kStoreToObject) {
+ // The allocated object is used for something other than storing into.
return NoChange();
}
+ value_edges.push_back(edge);
}
- use_edges.push_back(edge);
}
- // Remove all stores from the effect chain.
- for (Edge edge : use_edges) {
- if (NodeProperties::IsValueEdge(edge)) {
- Node* use = edge.from();
- DCHECK_EQ(edge.index(), 0);
- DCHECK_EQ(use->opcode(), IrOpcode::kStoreToObject);
- ReplaceWithValue(use, mcgraph_->Dead(),
- NodeProperties::GetEffectInput(use), mcgraph_->Dead());
- }
+ // Remove all discovered stores from the effect chain.
+ for (Edge edge : value_edges) {
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ DCHECK_EQ(edge.index(), 0);
+ Node* use = edge.from();
+ DCHECK(!use->IsDead());
+ DCHECK_EQ(use->opcode(), IrOpcode::kStoreToObject);
+ // The value stored by this StoreToObject node might be another allocation
+ // which has no more uses. Therefore we have to revisit it. Note that this
+ // will not happen automatically: ReplaceWithValue does not trigger revisits
+ // of former inputs of the replaced node.
+ Node* stored_value = NodeProperties::GetValueInput(use, 2);
+ Revisit(stored_value);
+ ReplaceWithValue(use, mcgraph_->Dead(), NodeProperties::GetEffectInput(use),
+ mcgraph_->Dead());
+ use->Kill();
}
// Remove the allocation from the effect and control chains.
ReplaceWithValue(node, mcgraph_->Dead(), NodeProperties::GetEffectInput(node),
NodeProperties::GetControlInput(node));
- return NoChange();
+ return Changed(node);
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/wasm-inlining.cc b/deps/v8/src/compiler/wasm-inlining.cc
index 784608c0e7..05e65951b3 100644
--- a/deps/v8/src/compiler/wasm-inlining.cc
+++ b/deps/v8/src/compiler/wasm-inlining.cc
@@ -12,6 +12,7 @@
#include "src/wasm/graph-builder-interface.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-subtyping.h"
namespace v8 {
namespace internal {
@@ -122,7 +123,29 @@ void WasmInliner::Finalize() {
&module()->functions[candidate.inlinee_index];
base::Vector<const byte> function_bytes =
wire_bytes_->GetCode(inlinee->code);
- const wasm::FunctionBody inlinee_body(inlinee->sig, inlinee->code.offset(),
+ // We use the signature based on the real argument types stored in the call
+ // node. This is more specific than the callee's formal signature and might
+ // enable some optimizations.
+ const wasm::FunctionSig* real_sig =
+ CallDescriptorOf(call->op())->wasm_sig();
+
+#if DEBUG
+ // Check that the real signature is a subtype of the formal one.
+ const wasm::FunctionSig* formal_sig =
+ WasmGraphBuilder::Int64LoweredSig(zone(), inlinee->sig);
+ CHECK_EQ(real_sig->parameter_count(), formal_sig->parameter_count());
+ CHECK_EQ(real_sig->return_count(), formal_sig->return_count());
+ for (size_t i = 0; i < real_sig->parameter_count(); i++) {
+ CHECK(wasm::IsSubtypeOf(real_sig->GetParam(i), formal_sig->GetParam(i),
+ module()));
+ }
+ for (size_t i = 0; i < real_sig->return_count(); i++) {
+ CHECK(wasm::IsSubtypeOf(formal_sig->GetReturn(i), real_sig->GetReturn(i),
+ module()));
+ }
+#endif
+
+ const wasm::FunctionBody inlinee_body(real_sig, inlinee->code.offset(),
function_bytes.begin(),
function_bytes.end());
wasm::WasmFeatures detected;
@@ -131,24 +154,25 @@ void WasmInliner::Finalize() {
std::vector<WasmLoopInfo> infos;
size_t subgraph_min_node_id = graph()->NodeCount();
- wasm::DecodeResult result;
Node* inlinee_start;
Node* inlinee_end;
{
Graph::SubgraphScope scope(graph());
- result = wasm::BuildTFGraph(
+ wasm::DecodeResult result = wasm::BuildTFGraph(
zone()->allocator(), env_->enabled_features, module(), &builder,
&detected, inlinee_body, &infos, node_origins_,
candidate.inlinee_index, wasm::kInlinedFunction);
+ if (result.failed()) {
+ // This can happen if the inlinee has never been compiled before and is
+ // invalid. Return, as there is no point to keep optimizing.
+ TRACE("failed to compile]\n")
+ return;
+ }
+
+ builder.LowerInt64(WasmGraphBuilder::kCalledFromWasm);
inlinee_start = graph()->start();
inlinee_end = graph()->end();
}
- if (result.failed()) {
- // This can happen if the inlinee has never been compiled before and is
- // invalid. Return, as there is no point to keep optimizing.
- TRACE("failed to compile]\n")
- return;
- }
size_t additional_nodes = graph()->NodeCount() - subgraph_min_node_id;
if (current_graph_size_ + additional_nodes >
@@ -168,6 +192,7 @@ void WasmInliner::Finalize() {
} else {
InlineTailCall(call, inlinee_start, inlinee_end);
}
+ call->Kill();
// Returning after only one inlining has been tried and found worse.
}
}
diff --git a/deps/v8/src/compiler/wasm-inlining.h b/deps/v8/src/compiler/wasm-inlining.h
index afe4ef0f78..0a2b9d2c51 100644
--- a/deps/v8/src/compiler/wasm-inlining.h
+++ b/deps/v8/src/compiler/wasm-inlining.h
@@ -127,8 +127,8 @@ class WasmInliner final : public AdvancedReducer {
}
// The smallest size in TF nodes any meaningful wasm function can have
- // (start, instance parameter, end).
- static constexpr size_t kMinimumFunctionNodeCount = 3;
+ // (start, return, IntConstant(0), end).
+ static constexpr size_t kMinimumFunctionNodeCount = 4;
Reduction ReduceCall(Node* call);
void InlineCall(Node* call, Node* callee_start, Node* callee_end,
diff --git a/deps/v8/src/d8/d8-test.cc b/deps/v8/src/d8/d8-test.cc
index e6c57ead3d..d5af43b532 100644
--- a/deps/v8/src/d8/d8-test.cc
+++ b/deps/v8/src/d8/d8-test.cc
@@ -17,8 +17,7 @@
// and resetting these counters.
// Make sure to sync the following with src/compiler/globals.h.
-#if defined(V8_TARGET_ARCH_X64) || \
- (defined(V8_TARGET_ARCH_ARM64) && !defined(USE_SIMULATOR))
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64)
#define V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
#endif
@@ -40,6 +39,22 @@ namespace {
class FastCApiObject {
public:
+#ifdef V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+ static AnyCType AddAllFastCallbackPatch(AnyCType receiver,
+ AnyCType should_fallback,
+ AnyCType arg_i32, AnyCType arg_u32,
+ AnyCType arg_i64, AnyCType arg_u64,
+ AnyCType arg_f32, AnyCType arg_f64,
+ AnyCType options) {
+ AnyCType ret;
+ ret.double_value = AddAllFastCallback(
+ receiver.object_value, should_fallback.bool_value, arg_i32.int32_value,
+ arg_u32.uint32_value, arg_i64.int64_value, arg_u64.uint64_value,
+ arg_f32.float_value, arg_f64.double_value, *options.options_value);
+ return ret;
+ }
+
+#endif // V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
static double AddAllFastCallback(Local<Object> receiver, bool should_fallback,
int32_t arg_i32, uint32_t arg_u32,
int64_t arg_i64, uint64_t arg_u64,
@@ -52,6 +67,8 @@ class FastCApiObject {
if (should_fallback) {
options.fallback = 1;
return 0;
+ } else {
+ options.fallback = 0;
}
return static_cast<double>(arg_i32) + static_cast<double>(arg_u32) +
@@ -99,6 +116,24 @@ class FastCApiObject {
#else
typedef int32_t Type;
#endif // V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+#ifdef V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+ static AnyCType AddAllSequenceFastCallbackPatch(AnyCType receiver,
+ AnyCType should_fallback,
+ AnyCType seq_arg,
+ AnyCType options) {
+ AnyCType ret;
+#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+ ret.double_value = AddAllSequenceFastCallback(
+ receiver.object_value, should_fallback.bool_value,
+ seq_arg.sequence_value, *options.options_value);
+#else
+ ret.int32_value = AddAllSequenceFastCallback(
+ receiver.object_value, should_fallback.bool_value,
+ seq_arg.sequence_value, *options.options_value);
+#endif // V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+ return ret;
+ }
+#endif // V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
static Type AddAllSequenceFastCallback(Local<Object> receiver,
bool should_fallback,
Local<Array> seq_arg,
@@ -192,6 +227,57 @@ class FastCApiObject {
}
args.GetReturnValue().Set(Number::New(isolate, sum));
}
+#ifdef V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+ template <typename T>
+ static const FastApiTypedArray<T>* AnyCTypeToTypedArray(AnyCType arg);
+
+ template <>
+ const FastApiTypedArray<int32_t>* AnyCTypeToTypedArray<int32_t>(
+ AnyCType arg) {
+ return arg.int32_ta_value;
+ }
+ template <>
+ const FastApiTypedArray<uint32_t>* AnyCTypeToTypedArray<uint32_t>(
+ AnyCType arg) {
+ return arg.uint32_ta_value;
+ }
+ template <>
+ const FastApiTypedArray<int64_t>* AnyCTypeToTypedArray<int64_t>(
+ AnyCType arg) {
+ return arg.int64_ta_value;
+ }
+ template <>
+ const FastApiTypedArray<uint64_t>* AnyCTypeToTypedArray<uint64_t>(
+ AnyCType arg) {
+ return arg.uint64_ta_value;
+ }
+ template <>
+ const FastApiTypedArray<float>* AnyCTypeToTypedArray<float>(AnyCType arg) {
+ return arg.float_ta_value;
+ }
+ template <>
+ const FastApiTypedArray<double>* AnyCTypeToTypedArray<double>(AnyCType arg) {
+ return arg.double_ta_value;
+ }
+
+ template <typename T>
+ static AnyCType AddAllTypedArrayFastCallbackPatch(AnyCType receiver,
+ AnyCType should_fallback,
+ AnyCType typed_array_arg,
+ AnyCType options) {
+ AnyCType ret;
+#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+ ret.double_value = AddAllTypedArrayFastCallback(
+ receiver.object_value, should_fallback.bool_value,
+ *AnyCTypeToTypedArray<T>(typed_array_arg), *options.options_value);
+#else
+ ret.int32_value = AddAllTypedArrayFastCallback(
+ receiver.object_value, should_fallback.bool_value,
+ *AnyCTypeToTypedArray<T>(typed_array_arg), *options.options_value);
+#endif // V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
+ return ret;
+ }
+#endif // V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
template <typename T>
static Type AddAllTypedArrayFastCallback(
Local<Object> receiver, bool should_fallback,
@@ -276,6 +362,20 @@ class FastCApiObject {
UNREACHABLE();
}
+#ifdef V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+ static AnyCType Add32BitIntFastCallbackPatch(AnyCType receiver,
+ AnyCType should_fallback,
+ AnyCType arg_i32,
+ AnyCType arg_u32,
+ AnyCType options) {
+ AnyCType ret;
+ ret.int32_value = Add32BitIntFastCallback(
+ receiver.object_value, should_fallback.bool_value, arg_i32.int32_value,
+ arg_u32.uint32_value, *options.options_value);
+ return ret;
+ }
+#endif // V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+
static int Add32BitIntFastCallback(v8::Local<v8::Object> receiver,
bool should_fallback, int32_t arg_i32,
uint32_t arg_u32,
@@ -311,6 +411,30 @@ class FastCApiObject {
args.GetReturnValue().Set(Number::New(isolate, sum));
}
+#ifdef V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+ static AnyCType AddAll32BitIntFastCallback_6ArgsPatch(
+ AnyCType receiver, AnyCType should_fallback, AnyCType arg1_i32,
+ AnyCType arg2_i32, AnyCType arg3_i32, AnyCType arg4_u32,
+ AnyCType arg5_u32, AnyCType arg6_u32, AnyCType options) {
+ AnyCType ret;
+ ret.int32_value = AddAll32BitIntFastCallback_6Args(
+ receiver.object_value, should_fallback.bool_value, arg1_i32.int32_value,
+ arg2_i32.int32_value, arg3_i32.int32_value, arg4_u32.uint32_value,
+ arg5_u32.uint32_value, arg6_u32.uint32_value, *options.options_value);
+ return ret;
+ }
+ static AnyCType AddAll32BitIntFastCallback_5ArgsPatch(
+ AnyCType receiver, AnyCType should_fallback, AnyCType arg1_i32,
+ AnyCType arg2_i32, AnyCType arg3_i32, AnyCType arg4_u32,
+ AnyCType arg5_u32, AnyCType options) {
+ AnyCType arg6;
+ arg6.uint32_value = 0;
+ return AddAll32BitIntFastCallback_6ArgsPatch(
+ receiver, should_fallback, arg1_i32, arg2_i32, arg3_i32, arg4_u32,
+ arg5_u32, arg6, options);
+ }
+#endif // V8_USE_SIMULATOR_WITH_GENERIC_C_CALLS
+
static int AddAll32BitIntFastCallback_6Args(
Local<Object> receiver, bool should_fallback, int32_t arg1_i32,
int32_t arg2_i32, int32_t arg3_i32, uint32_t arg4_u32, uint32_t arg5_u32,
@@ -520,7 +644,8 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
Local<Signature> signature = Signature::New(isolate, api_obj_ctor);
{
CFunction add_all_c_func =
- CFunction::Make(FastCApiObject::AddAllFastCallback);
+ CFunction::Make(FastCApiObject::AddAllFastCallback V8_IF_USE_SIMULATOR(
+ FastCApiObject::AddAllFastCallbackPatch));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "add_all",
FunctionTemplate::New(isolate, FastCApiObject::AddAllSlowCallback,
@@ -528,8 +653,9 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, &add_all_c_func));
- CFunction add_all_seq_c_func =
- CFunction::Make(FastCApiObject::AddAllSequenceFastCallback);
+ CFunction add_all_seq_c_func = CFunction::Make(
+ FastCApiObject::AddAllSequenceFastCallback V8_IF_USE_SIMULATOR(
+ FastCApiObject::AddAllSequenceFastCallbackPatch));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "add_all_sequence",
FunctionTemplate::New(
@@ -537,8 +663,11 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, &add_all_seq_c_func));
- CFunction add_all_int32_typed_array_c_func =
- CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<int32_t>);
+ CFunction add_all_int32_typed_array_c_func = CFunction::Make(
+ FastCApiObject::AddAllTypedArrayFastCallback<int32_t>
+ V8_IF_USE_SIMULATOR(
+ FastCApiObject::AddAllTypedArrayFastCallbackPatch<int32_t>));
+
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "add_all_int32_typed_array",
FunctionTemplate::New(
@@ -546,8 +675,10 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, &add_all_int32_typed_array_c_func));
- CFunction add_all_int64_typed_array_c_func =
- CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<int64_t>);
+ CFunction add_all_int64_typed_array_c_func = CFunction::Make(
+ FastCApiObject::AddAllTypedArrayFastCallback<int64_t>
+ V8_IF_USE_SIMULATOR(
+ FastCApiObject::AddAllTypedArrayFastCallbackPatch<int64_t>));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "add_all_int64_typed_array",
FunctionTemplate::New(
@@ -555,8 +686,10 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, &add_all_int64_typed_array_c_func));
- CFunction add_all_uint64_typed_array_c_func =
- CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<uint64_t>);
+ CFunction add_all_uint64_typed_array_c_func = CFunction::Make(
+ FastCApiObject::AddAllTypedArrayFastCallback<uint64_t>
+ V8_IF_USE_SIMULATOR(
+ FastCApiObject::AddAllTypedArrayFastCallbackPatch<uint64_t>));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "add_all_uint64_typed_array",
FunctionTemplate::New(
@@ -565,8 +698,10 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
SideEffectType::kHasSideEffect,
&add_all_uint64_typed_array_c_func));
- CFunction add_all_uint32_typed_array_c_func =
- CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<uint32_t>);
+ CFunction add_all_uint32_typed_array_c_func = CFunction::Make(
+ FastCApiObject::AddAllTypedArrayFastCallback<uint32_t>
+ V8_IF_USE_SIMULATOR(
+ FastCApiObject::AddAllTypedArrayFastCallbackPatch<uint32_t>));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "add_all_uint32_typed_array",
FunctionTemplate::New(
@@ -575,8 +710,9 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
SideEffectType::kHasSideEffect,
&add_all_uint32_typed_array_c_func));
- CFunction add_all_float32_typed_array_c_func =
- CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<float>);
+ CFunction add_all_float32_typed_array_c_func = CFunction::Make(
+ FastCApiObject::AddAllTypedArrayFastCallback<float> V8_IF_USE_SIMULATOR(
+ FastCApiObject::AddAllTypedArrayFastCallbackPatch<float>));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "add_all_float32_typed_array",
FunctionTemplate::New(
@@ -585,8 +721,10 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
SideEffectType::kHasSideEffect,
&add_all_float32_typed_array_c_func));
- CFunction add_all_float64_typed_array_c_func =
- CFunction::Make(FastCApiObject::AddAllTypedArrayFastCallback<double>);
+ CFunction add_all_float64_typed_array_c_func = CFunction::Make(
+ FastCApiObject::AddAllTypedArrayFastCallback<double>
+ V8_IF_USE_SIMULATOR(
+ FastCApiObject::AddAllTypedArrayFastCallbackPatch<double>));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "add_all_float64_typed_array",
FunctionTemplate::New(
@@ -619,10 +757,12 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, {add_all_invalid_overloads, 2}));
- CFunction add_all_32bit_int_6args_c_func =
- CFunction::Make(FastCApiObject::AddAll32BitIntFastCallback_6Args);
- CFunction add_all_32bit_int_5args_c_func =
- CFunction::Make(FastCApiObject::AddAll32BitIntFastCallback_5Args);
+ CFunction add_all_32bit_int_6args_c_func = CFunction::Make(
+ FastCApiObject::AddAll32BitIntFastCallback_6Args V8_IF_USE_SIMULATOR(
+ FastCApiObject::AddAll32BitIntFastCallback_6ArgsPatch));
+ CFunction add_all_32bit_int_5args_c_func = CFunction::Make(
+ FastCApiObject::AddAll32BitIntFastCallback_5Args V8_IF_USE_SIMULATOR(
+ FastCApiObject::AddAll32BitIntFastCallback_5ArgsPatch));
const CFunction c_function_overloads[] = {add_all_32bit_int_6args_c_func,
add_all_32bit_int_5args_c_func};
api_obj_ctor->PrototypeTemplate()->Set(
@@ -632,8 +772,9 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, {c_function_overloads, 2}));
- CFunction add_32bit_int_c_func =
- CFunction::Make(FastCApiObject::Add32BitIntFastCallback);
+ CFunction add_32bit_int_c_func = CFunction::Make(
+ FastCApiObject::Add32BitIntFastCallback V8_IF_USE_SIMULATOR(
+ FastCApiObject::Add32BitIntFastCallbackPatch));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "add_32bit_int",
FunctionTemplate::New(
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index 3a04345b0f..770fcdd0b8 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -453,7 +453,9 @@ class ExternalOwningOneByteStringResource
std::unique_ptr<base::OS::MemoryMappedFile> file_;
};
+// static variables:
CounterMap* Shell::counter_map_;
+base::SharedMutex Shell::counter_mutex_;
base::OS::MemoryMappedFile* Shell::counters_file_ = nullptr;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
@@ -644,9 +646,39 @@ MaybeLocal<T> Shell::CompileString(Isolate* isolate, Local<Context> context,
return result;
}
+namespace {
+// For testing.
+const int kHostDefinedOptionsLength = 2;
+const uint32_t kHostDefinedOptionsMagicConstant = 0xF1F2F3F0;
+
+ScriptOrigin CreateScriptOrigin(Isolate* isolate, Local<String> resource_name,
+ v8::ScriptType type) {
+ Local<PrimitiveArray> options =
+ PrimitiveArray::New(isolate, kHostDefinedOptionsLength);
+ options->Set(isolate, 0,
+ v8::Uint32::New(isolate, kHostDefinedOptionsMagicConstant));
+ options->Set(isolate, 1, resource_name);
+ return ScriptOrigin(isolate, resource_name, 0, 0, false, -1, Local<Value>(),
+ false, false, type == v8::ScriptType::kModule, options);
+}
+
+bool IsValidHostDefinedOptions(Local<Context> context, Local<Data> options,
+ Local<Value> resource_name) {
+ if (!options->IsFixedArray()) return false;
+ Local<FixedArray> array = options.As<FixedArray>();
+ if (array->Length() != kHostDefinedOptionsLength) return false;
+ uint32_t magic = 0;
+ if (!array->Get(context, 0).As<Value>()->Uint32Value(context).To(&magic)) {
+ return false;
+ }
+ if (magic != kHostDefinedOptionsMagicConstant) return false;
+ return array->Get(context, 1).As<String>()->StrictEquals(resource_name);
+}
+} // namespace
+
// Executes a string within the current v8 context.
bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
- Local<Value> name, PrintResult print_result,
+ Local<String> name, PrintResult print_result,
ReportExceptions report_exceptions,
ProcessMessageQueue process_message_queue) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -655,7 +687,8 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
i::Handle<i::String> str = Utils::OpenHandle(*(source));
// Set up ParseInfo.
- i::UnoptimizedCompileState compile_state(i_isolate);
+ i::UnoptimizedCompileState compile_state;
+ i::ReusableUnoptimizedCompileState reusable_state(i_isolate);
i::UnoptimizedCompileFlags flags =
i::UnoptimizedCompileFlags::ForToplevelCompile(
@@ -666,7 +699,7 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
flags.set_is_eager(true);
}
- i::ParseInfo parse_info(i_isolate, flags, &compile_state);
+ i::ParseInfo parse_info(i_isolate, flags, &compile_state, &reusable_state);
i::Handle<i::Script> script = parse_info.CreateScript(
i_isolate, str, i::kNullMaybeHandle, ScriptOriginOptions());
@@ -702,9 +735,9 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Context> realm =
Local<Context>::New(isolate, data->realms_[data->realm_current_]);
Context::Scope context_scope(realm);
- MaybeLocal<Script> maybe_script;
Local<Context> context(isolate->GetCurrentContext());
- ScriptOrigin origin(isolate, name);
+ ScriptOrigin origin =
+ CreateScriptOrigin(isolate, name, ScriptType::kClassic);
for (int i = 1; i < options.repeat_compile; ++i) {
HandleScope handle_scope_for_compiling(isolate);
@@ -726,8 +759,14 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
StoreInCodeCache(isolate, source, cached_data);
delete cached_data;
}
- if (options.compile_only) {
- return true;
+ if (options.compile_only) return true;
+ if (options.compile_options == ScriptCompiler::kConsumeCodeCache) {
+ i::Handle<i::Script> i_script(
+ i::Script::cast(Utils::OpenHandle(*script)->shared().script()),
+ i_isolate);
+ // TODO(cbruni, chromium:1244145): remove once context-allocated.
+ i_script->set_host_defined_options(i::FixedArray::cast(
+ *Utils::OpenHandle(*(origin.GetHostDefinedOptions()))));
}
maybe_result = script->Run(realm);
if (options.code_cache_options ==
@@ -768,7 +807,6 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
snapshot_data.buffer_size);
} else {
CHECK(try_catch.HasCaught());
- ReportException(isolate, &try_catch);
return false;
}
} else if (options.web_snapshot_output) {
@@ -1005,9 +1043,11 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Module> referrer,
v8::String::NewFromUtf8(isolate, msg.c_str()).ToLocalChecked());
return MaybeLocal<Module>();
}
- ScriptOrigin origin(
- isolate, String::NewFromUtf8(isolate, file_name.c_str()).ToLocalChecked(),
- 0, 0, false, -1, Local<Value>(), false, false, true);
+
+ Local<String> resource_name =
+ String::NewFromUtf8(isolate, file_name.c_str()).ToLocalChecked();
+ ScriptOrigin origin =
+ CreateScriptOrigin(isolate, resource_name, ScriptType::kModule);
Local<Module> module;
if (module_type == ModuleType::kJavaScript) {
@@ -1100,14 +1140,10 @@ MaybeLocal<Value> Shell::JSONModuleEvaluationSteps(Local<Context> context,
CHECK(!try_catch.HasCaught());
CHECK(!result.IsNothing() && result.FromJust());
- if (i::FLAG_harmony_top_level_await) {
- Local<Promise::Resolver> resolver =
- Promise::Resolver::New(context).ToLocalChecked();
- resolver->Resolve(context, Undefined(isolate)).ToChecked();
- return resolver->GetPromise();
- }
-
- return Undefined(isolate);
+ Local<Promise::Resolver> resolver =
+ Promise::Resolver::New(context).ToLocalChecked();
+ resolver->Resolve(context, Undefined(isolate)).ToChecked();
+ return resolver->GetPromise();
}
struct DynamicImportData {
@@ -1185,23 +1221,30 @@ void Shell::ModuleResolutionFailureCallback(
}
MaybeLocal<Promise> Shell::HostImportModuleDynamically(
- Local<Context> context, Local<ScriptOrModule> script_or_module,
- Local<String> specifier, Local<FixedArray> import_assertions) {
+ Local<Context> context, Local<Data> host_defined_options,
+ Local<Value> resource_name, Local<String> specifier,
+ Local<FixedArray> import_assertions) {
Isolate* isolate = context->GetIsolate();
MaybeLocal<Promise::Resolver> maybe_resolver =
Promise::Resolver::New(context);
Local<Promise::Resolver> resolver;
- if (maybe_resolver.ToLocal(&resolver)) {
- DynamicImportData* data = new DynamicImportData(
- isolate, script_or_module->GetResourceName().As<String>(), specifier,
- import_assertions, resolver);
+ if (!maybe_resolver.ToLocal(&resolver)) return MaybeLocal<Promise>();
+
+ if (!IsValidHostDefinedOptions(context, host_defined_options,
+ resource_name)) {
+ resolver
+ ->Reject(context, v8::Exception::TypeError(String::NewFromUtf8Literal(
+ isolate, "Invalid host defined options")))
+ .ToChecked();
+ } else {
+ DynamicImportData* data =
+ new DynamicImportData(isolate, resource_name.As<String>(), specifier,
+ import_assertions, resolver);
PerIsolateData::Get(isolate)->AddDynamicImportData(data);
isolate->EnqueueMicrotask(Shell::DoHostImportModuleDynamically, data);
- return resolver->GetPromise();
}
-
- return MaybeLocal<Promise>();
+ return resolver->GetPromise();
}
void Shell::HostInitializeImportMetaObject(Local<Context> context,
@@ -1278,7 +1321,7 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
if (root_module->InstantiateModule(realm, ResolveModuleCallback)
.FromMaybe(false)) {
maybe_result = root_module->Evaluate(realm);
- CHECK_IMPLIES(i::FLAG_harmony_top_level_await, !maybe_result.IsEmpty());
+ CHECK(!maybe_result.IsEmpty());
EmptyMessageQueues(isolate);
}
@@ -1290,28 +1333,21 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
}
Local<Value> module_namespace = root_module->GetModuleNamespace();
- if (i::FLAG_harmony_top_level_await) {
- Local<Promise> result_promise(result.As<Promise>());
-
- // Setup callbacks, and then chain them to the result promise.
- // ModuleResolutionData will be deleted by the callbacks.
- auto module_resolution_data =
- new ModuleResolutionData(isolate, module_namespace, resolver);
- Local<v8::External> edata = External::New(isolate, module_resolution_data);
- Local<Function> callback_success;
- CHECK(Function::New(realm, ModuleResolutionSuccessCallback, edata)
- .ToLocal(&callback_success));
- Local<Function> callback_failure;
- CHECK(Function::New(realm, ModuleResolutionFailureCallback, edata)
- .ToLocal(&callback_failure));
- result_promise->Then(realm, callback_success, callback_failure)
- .ToLocalChecked();
- } else {
- // TODO(cbruni): Clean up exception handling after introducing new
- // API for evaluating async modules.
- DCHECK(!try_catch.HasCaught());
- resolver->Resolve(realm, module_namespace).ToChecked();
- }
+ Local<Promise> result_promise(result.As<Promise>());
+
+ // Setup callbacks, and then chain them to the result promise.
+ // ModuleResolutionData will be deleted by the callbacks.
+ auto module_resolution_data =
+ new ModuleResolutionData(isolate, module_namespace, resolver);
+ Local<v8::External> edata = External::New(isolate, module_resolution_data);
+ Local<Function> callback_success;
+ CHECK(Function::New(realm, ModuleResolutionSuccessCallback, edata)
+ .ToLocal(&callback_success));
+ Local<Function> callback_failure;
+ CHECK(Function::New(realm, ModuleResolutionFailureCallback, edata)
+ .ToLocal(&callback_failure));
+ result_promise->Then(realm, callback_success, callback_failure)
+ .ToLocalChecked();
}
bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
@@ -1343,7 +1379,7 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
if (root_module->InstantiateModule(realm, ResolveModuleCallback)
.FromMaybe(false)) {
maybe_result = root_module->Evaluate(realm);
- CHECK_IMPLIES(i::FLAG_harmony_top_level_await, !maybe_result.IsEmpty());
+ CHECK(!maybe_result.IsEmpty());
EmptyMessageQueues(isolate);
}
Local<Value> result;
@@ -1352,28 +1388,27 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
ReportException(isolate, &try_catch);
return false;
}
- if (i::FLAG_harmony_top_level_await) {
- // Loop until module execution finishes
- // TODO(cbruni): This is a bit wonky. "Real" engines would not be
- // able to just busy loop waiting for execution to finish.
- Local<Promise> result_promise(result.As<Promise>());
- while (result_promise->State() == Promise::kPending) {
- isolate->PerformMicrotaskCheckpoint();
- }
- if (result_promise->State() == Promise::kRejected) {
- // If the exception has been caught by the promise pipeline, we rethrow
- // here in order to ReportException.
- // TODO(cbruni): Clean this up after we create a new API for the case
- // where TLA is enabled.
- if (!try_catch.HasCaught()) {
- isolate->ThrowException(result_promise->Result());
- } else {
- DCHECK_EQ(try_catch.Exception(), result_promise->Result());
- }
- ReportException(isolate, &try_catch);
- return false;
+ // Loop until module execution finishes
+ // TODO(cbruni): This is a bit wonky. "Real" engines would not be
+ // able to just busy loop waiting for execution to finish.
+ Local<Promise> result_promise(result.As<Promise>());
+ while (result_promise->State() == Promise::kPending) {
+ isolate->PerformMicrotaskCheckpoint();
+ }
+
+ if (result_promise->State() == Promise::kRejected) {
+ // If the exception has been caught by the promise pipeline, we rethrow
+ // here in order to ReportException.
+ // TODO(cbruni): Clean this up after we create a new API for the case
+ // where TLA is enabled.
+ if (!try_catch.HasCaught()) {
+ isolate->ThrowException(result_promise->Result());
+ } else {
+ DCHECK_EQ(try_catch.Exception(), result_promise->Result());
}
+ ReportException(isolate, &try_catch);
+ return false;
}
DCHECK(!try_catch.HasCaught());
@@ -1389,26 +1424,16 @@ bool Shell::ExecuteWebSnapshot(Isolate* isolate, const char* file_name) {
std::string absolute_path = NormalizePath(file_name, GetWorkingDirectory());
- TryCatch try_catch(isolate);
- try_catch.SetVerbose(true);
int length = 0;
std::unique_ptr<uint8_t[]> snapshot_data(
reinterpret_cast<uint8_t*>(ReadChars(absolute_path.c_str(), &length)));
if (length == 0) {
isolate->ThrowError("Error reading the web snapshot");
- DCHECK(try_catch.HasCaught());
- ReportException(isolate, &try_catch);
return false;
}
i::WebSnapshotDeserializer deserializer(isolate);
- if (!deserializer.UseWebSnapshot(snapshot_data.get(),
- static_cast<size_t>(length))) {
- DCHECK(try_catch.HasCaught());
- ReportException(isolate, &try_catch);
- return false;
- }
- DCHECK(!try_catch.HasCaught());
- return true;
+ return deserializer.UseWebSnapshot(snapshot_data.get(),
+ static_cast<size_t>(length));
}
PerIsolateData::PerIsolateData(Isolate* isolate)
@@ -1690,6 +1715,7 @@ void Shell::RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
MaybeLocal<Context> Shell::CreateRealm(
const v8::FunctionCallbackInfo<v8::Value>& args, int index,
v8::MaybeLocal<Value> global_object) {
+ const char* kGlobalHandleLabel = "d8::realm";
Isolate* isolate = args.GetIsolate();
TryCatch try_catch(isolate);
PerIsolateData* data = PerIsolateData::Get(isolate);
@@ -1698,7 +1724,11 @@ MaybeLocal<Context> Shell::CreateRealm(
index = data->realm_count_;
data->realms_ = new Global<Context>[++data->realm_count_];
for (int i = 0; i < index; ++i) {
- data->realms_[i].Reset(isolate, old_realms[i]);
+ Global<Context>& realm = data->realms_[i];
+ realm.Reset(isolate, old_realms[i]);
+ if (!realm.IsEmpty()) {
+ realm.AnnotateStrongRetainer(kGlobalHandleLabel);
+ }
old_realms[i].Reset();
}
delete[] old_realms;
@@ -1710,6 +1740,7 @@ MaybeLocal<Context> Shell::CreateRealm(
if (context.IsEmpty()) return MaybeLocal<Context>();
InitializeModuleEmbedderData(context);
data->realms_[index].Reset(isolate, context);
+ data->realms_[index].AnnotateStrongRetainer(kGlobalHandleLabel);
args.GetReturnValue().Set(index);
return context;
}
@@ -1830,9 +1861,10 @@ void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
isolate->ThrowError("Invalid argument");
return;
}
- ScriptOrigin origin(isolate,
- String::NewFromUtf8Literal(isolate, "(d8)",
- NewStringType::kInternalized));
+ ScriptOrigin origin =
+ CreateScriptOrigin(isolate, String::NewFromUtf8Literal(isolate, "(d8)"),
+ ScriptType::kClassic);
+
ScriptCompiler::Source script_source(source, origin);
Local<UnboundScript> script;
if (!ScriptCompiler::CompileUnboundScript(isolate, &script_source)
@@ -1897,10 +1929,13 @@ void Shell::RealmTakeWebSnapshot(
// Take the snapshot in the specified Realm.
auto snapshot_data_shared = std::make_shared<i::WebSnapshotData>();
{
+ TryCatch try_catch(isolate);
+ try_catch.SetVerbose(true);
PerIsolateData::ExplicitRealmScope realm_scope(data, index);
i::WebSnapshotSerializer serializer(isolate);
if (!serializer.TakeSnapshot(realm_scope.context(), exports,
*snapshot_data_shared)) {
+ CHECK(try_catch.HasCaught());
args.GetReturnValue().Set(Undefined(isolate));
return;
}
@@ -2515,10 +2550,7 @@ void Shell::WorkerTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope handle_scope(isolate);
std::shared_ptr<Worker> worker =
GetWorkerFromInternalField(isolate, args.Holder());
- if (!worker.get()) {
- return;
- }
-
+ if (!worker.get()) return;
worker->Terminate();
}
@@ -2541,7 +2573,10 @@ void Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args) {
.FromMaybe(0);
WaitForRunningWorkers();
args->GetIsolate()->Exit();
- OnExit(args->GetIsolate());
+ // As we exit the process anyway, we do not dispose the platform and other
+ // global data. Other isolates might still be running, so disposing here can
+ // cause them to crash.
+ OnExit(args->GetIsolate(), false);
base::OS::ExitProcess(exit_code);
}
@@ -2617,6 +2652,9 @@ void Shell::Fuzzilli(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::ReportException(Isolate* isolate, Local<v8::Message> message,
Local<v8::Value> exception_obj) {
+ // Using ErrorPrototypeToString for converting the error to string will fail
+ // if there's a pending exception.
+ CHECK(!reinterpret_cast<i::Isolate*>(isolate)->has_pending_exception());
HandleScope handle_scope(isolate);
Local<Context> context = isolate->GetCurrentContext();
bool enter_context = context.IsEmpty();
@@ -2681,18 +2719,16 @@ void Shell::ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
ReportException(isolate, try_catch->Message(), try_catch->Exception());
}
-int32_t* Counter::Bind(const char* name, bool is_histogram) {
- int i;
- for (i = 0; i < kMaxNameSize - 1 && name[i]; i++)
- name_[i] = static_cast<char>(name[i]);
- name_[i] = '\0';
+void Counter::Bind(const char* name, bool is_histogram) {
+ base::OS::StrNCpy(name_, kMaxNameSize, name, kMaxNameSize);
+ // Explicitly null-terminate, in case {name} is longer than {kMaxNameSize}.
+ name_[kMaxNameSize - 1] = '\0';
is_histogram_ = is_histogram;
- return ptr();
}
-void Counter::AddSample(int32_t sample) {
- count_++;
- sample_total_ += sample;
+void Counter::AddSample(int sample) {
+ count_.fetch_add(1, std::memory_order_relaxed);
+ sample_total_.fetch_add(sample, std::memory_order_relaxed);
}
CounterCollection::CounterCollection() {
@@ -2723,30 +2759,38 @@ void Shell::MapCounters(v8::Isolate* isolate, const char* name) {
}
Counter* Shell::GetCounter(const char* name, bool is_histogram) {
- auto map_entry = counter_map_->find(name);
- Counter* counter =
- map_entry != counter_map_->end() ? map_entry->second : nullptr;
+ Counter* counter = nullptr;
+ {
+ base::SharedMutexGuard<base::kShared> mutex_guard(&counter_mutex_);
+ auto map_entry = counter_map_->find(name);
+ if (map_entry != counter_map_->end()) {
+ counter = map_entry->second;
+ }
+ }
if (counter == nullptr) {
- counter = counters_->GetNextCounter();
- if (counter != nullptr) {
+ base::SharedMutexGuard<base::kExclusive> mutex_guard(&counter_mutex_);
+
+ counter = (*counter_map_)[name];
+
+ if (counter == nullptr) {
+ counter = counters_->GetNextCounter();
+ if (counter == nullptr) {
+ // Too many counters.
+ return nullptr;
+ }
(*counter_map_)[name] = counter;
counter->Bind(name, is_histogram);
}
- } else {
- DCHECK(counter->is_histogram() == is_histogram);
}
+
+ DCHECK_EQ(is_histogram, counter->is_histogram());
return counter;
}
int* Shell::LookupCounter(const char* name) {
Counter* counter = GetCounter(name, false);
-
- if (counter != nullptr) {
- return counter->ptr();
- } else {
- return nullptr;
- }
+ return counter ? counter->ptr() : nullptr;
}
void* Shell::CreateHistogram(const char* name, int min, int max,
@@ -3293,18 +3337,32 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
}
}
-void Shell::OnExit(v8::Isolate* isolate) {
+void Shell::OnExit(v8::Isolate* isolate, bool dispose) {
isolate->Dispose();
if (shared_isolate) {
i::Isolate::Delete(reinterpret_cast<i::Isolate*>(shared_isolate));
}
- if (i::FLAG_dump_counters || i::FLAG_dump_counters_nvp) {
+ // Simulate errors before disposing V8, as that resets flags (via
+ // FlagList::ResetAllFlags()), but error simulation reads the random seed.
+ if (options.simulate_errors && is_valid_fuzz_script()) {
+ // Simulate several errors detectable by fuzzers behind a flag if the
+ // minimum file size for fuzzing was executed.
+ FuzzerMonitor::SimulateErrors();
+ }
+
+ if (dispose) {
+ V8::Dispose();
+ V8::DisposePlatform();
+ }
+
+ if (options.dump_counters || options.dump_counters_nvp) {
+ base::SharedMutexGuard<base::kShared> mutex_guard(&counter_mutex_);
std::vector<std::pair<std::string, Counter*>> counters(
counter_map_->begin(), counter_map_->end());
std::sort(counters.begin(), counters.end());
- if (i::FLAG_dump_counters_nvp) {
+ if (options.dump_counters_nvp) {
// Dump counters as name-value pairs.
for (const auto& pair : counters) {
std::string key = pair.first;
@@ -3348,13 +3406,12 @@ void Shell::OnExit(v8::Isolate* isolate) {
}
}
- delete counters_file_;
- delete counter_map_;
-
- if (options.simulate_errors && is_valid_fuzz_script()) {
- // Simulate several errors detectable by fuzzers behind a flag if the
- // minimum file size for fuzzing was executed.
- FuzzerMonitor::SimulateErrors();
+ // Only delete the counters if we are done executing; after calling `quit`,
+ // other isolates might still be running and accessing that memory. This is a
+ // memory leak, which is OK in this case.
+ if (dispose) {
+ delete counters_file_;
+ delete counter_map_;
}
}
@@ -3940,7 +3997,7 @@ void SerializationDataQueue::Clear() {
}
Worker::Worker(const char* script) : script_(i::StrDup(script)) {
- running_.store(false);
+ state_.store(State::kReady);
}
Worker::~Worker() {
@@ -3952,8 +4009,11 @@ Worker::~Worker() {
script_ = nullptr;
}
+bool Worker::is_running() const { return state_.load() == State::kRunning; }
+
bool Worker::StartWorkerThread(std::shared_ptr<Worker> worker) {
- worker->running_.store(true);
+ auto expected = State::kReady;
+ CHECK(worker->state_.compare_exchange_strong(expected, State::kRunning));
auto thread = new WorkerThread(worker);
worker->thread_ = thread;
if (thread->Start()) {
@@ -3994,12 +4054,10 @@ class ProcessMessageTask : public i::CancelableTask {
};
void Worker::PostMessage(std::unique_ptr<SerializationData> data) {
+ if (!is_running()) return;
// Hold the worker_mutex_ so that the worker thread can't delete task_runner_
- // after we've checked running_.
+ // after we've checked is_running().
base::MutexGuard lock_guard(&worker_mutex_);
- if (!running_.load()) {
- return;
- }
std::unique_ptr<v8::Task> task(new ProcessMessageTask(
task_manager_, shared_from_this(), std::move(data)));
task_runner_->PostNonNestableTask(std::move(task));
@@ -4012,9 +4070,12 @@ class TerminateTask : public i::CancelableTask {
: i::CancelableTask(task_manager), worker_(worker) {}
void RunInternal() override {
- // Make sure the worker doesn't enter the task loop after processing this
- // task.
- worker_->running_.store(false);
+ auto expected = Worker::State::kTerminating;
+ if (!worker_->state_.compare_exchange_strong(expected,
+ Worker::State::kTerminated)) {
+ // Thread was joined in the meantime.
+ CHECK_EQ(worker_->state_.load(), Worker::State::kTerminatingAndJoining);
+ }
}
private:
@@ -4026,37 +4087,49 @@ std::unique_ptr<SerializationData> Worker::GetMessage() {
while (!out_queue_.Dequeue(&result)) {
// If the worker is no longer running, and there are no messages in the
// queue, don't expect any more messages from it.
- if (!running_.load()) {
- break;
- }
+ if (!is_running()) break;
out_semaphore_.Wait();
}
return result;
}
+void Worker::TerminateAndWaitForThread() {
+ Terminate();
+ // Don't double-join a terminated thread.
+ auto expected = State::kTerminating;
+ if (!state_.compare_exchange_strong(expected,
+ State::kTerminatingAndJoining)) {
+ expected = State::kTerminated;
+ if (!state_.compare_exchange_strong(expected,
+ State::kTerminatingAndJoining)) {
+ // Avoid double-joining thread.
+ DCHECK(state_.load() == State::kTerminatingAndJoining ||
+ state_.load() == State::kTerminatedAndJoined);
+ return;
+ }
+ }
+
+ thread_->Join();
+ expected = State::kTerminatingAndJoining;
+ CHECK(state_.compare_exchange_strong(expected, State::kTerminatedAndJoined));
+}
+
void Worker::Terminate() {
+ auto expected = State::kRunning;
+ if (!state_.compare_exchange_strong(expected, State::kTerminating)) return;
// Hold the worker_mutex_ so that the worker thread can't delete task_runner_
- // after we've checked running_.
+ // after we've checked state_.
base::MutexGuard lock_guard(&worker_mutex_);
- if (!running_.load()) {
- return;
- }
+ CHECK(state_.load() == State::kTerminating ||
+ state_.load() == State::kTerminatingAndJoining);
// Post a task to wake up the worker thread.
std::unique_ptr<v8::Task> task(
new TerminateTask(task_manager_, shared_from_this()));
task_runner_->PostTask(std::move(task));
}
-void Worker::TerminateAndWaitForThread() {
- Terminate();
- thread_->Join();
-}
-
void Worker::ProcessMessage(std::unique_ptr<SerializationData> data) {
- if (!running_.load()) {
- return;
- }
-
+ if (!is_running()) return;
DCHECK_NOT_NULL(isolate_);
HandleScope scope(isolate_);
Local<Context> context = context_.Get(isolate_);
@@ -4087,10 +4160,10 @@ void Worker::ProcessMessages() {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
i::SaveAndSwitchContext saved_context(i_isolate, i::Context());
SealHandleScope shs(isolate_);
- while (running_.load() && v8::platform::PumpMessageLoop(
- g_default_platform, isolate_,
- platform::MessageLoopBehavior::kWaitForWork)) {
- if (running_.load()) {
+ while (is_running() && v8::platform::PumpMessageLoop(
+ g_default_platform, isolate_,
+ platform::MessageLoopBehavior::kWaitForWork)) {
+ if (is_running()) {
MicrotasksScope::PerformCheckpoint(isolate_);
}
}
@@ -4165,10 +4238,12 @@ void Worker::ExecuteInThread() {
}
// TODO(cbruni): Check for unhandled promises here.
{
- // Hold the mutex to ensure running_ and task_runner_ change state
+ // Hold the mutex to ensure task_runner_ changes state
// atomically (see Worker::PostMessage which reads them).
base::MutexGuard lock_guard(&worker_mutex_);
- running_.store(false);
+ // Mark worker as terminated if it's still running.
+ auto expected = State::kRunning;
+ state_.compare_exchange_strong(expected, State::kTerminated);
task_runner_.reset();
task_manager_ = nullptr;
}
@@ -4282,6 +4357,14 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--no-fail") == 0) {
options.no_fail = true;
argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--dump-counters") == 0) {
+ i::FLAG_slow_histograms = true;
+ options.dump_counters = true;
+ argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--dump-counters-nvp") == 0) {
+ i::FLAG_slow_histograms = true;
+ options.dump_counters_nvp = true;
+ argv[i] = nullptr;
} else if (strncmp(argv[i], "--icu-data-file=", 16) == 0) {
options.icu_data_file = argv[i] + 16;
argv[i] = nullptr;
@@ -5132,7 +5215,7 @@ int Shell::Main(int argc, char* argv[]) {
base::SysInfo::AmountOfVirtualMemory());
Shell::counter_map_ = new CounterMap();
- if (i::FLAG_dump_counters || i::FLAG_dump_counters_nvp ||
+ if (options.dump_counters || options.dump_counters_nvp ||
i::TracingFlags::is_gc_stats_enabled()) {
create_params.counter_lookup_callback = LookupCounter;
create_params.create_histogram_callback = CreateHistogram;
@@ -5152,7 +5235,7 @@ int Shell::Main(int argc, char* argv[]) {
Isolate::CreateParams shared_create_params;
shared_create_params.array_buffer_allocator = Shell::array_buffer_allocator;
shared_isolate =
- reinterpret_cast<Isolate*>(i::Isolate::NewShared(create_params));
+ reinterpret_cast<Isolate*>(i::Isolate::NewShared(shared_create_params));
create_params.experimental_attach_to_shared_isolate = shared_isolate;
}
@@ -5327,10 +5410,7 @@ int Shell::Main(int argc, char* argv[]) {
#endif // V8_FUZZILLI
} while (fuzzilli_reprl);
}
- OnExit(isolate);
-
- V8::Dispose();
- V8::ShutdownPlatform();
+ OnExit(isolate, true);
// Delete the platform explicitly here to write the tracing output to the
// tracing file.
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index 7490e91b9d..61f44455fb 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -45,18 +45,24 @@ struct DynamicImportData;
class Counter {
public:
static const int kMaxNameSize = 64;
- int32_t* Bind(const char* name, bool histogram);
- int32_t* ptr() { return &count_; }
- int32_t count() { return count_; }
- int32_t sample_total() { return sample_total_; }
- bool is_histogram() { return is_histogram_; }
+ void Bind(const char* name, bool histogram);
+ // TODO(12482): Return pointer to an atomic.
+ int* ptr() {
+ STATIC_ASSERT(sizeof(int) == sizeof(count_));
+ return reinterpret_cast<int*>(&count_);
+ }
+ int count() const { return count_.load(std::memory_order_relaxed); }
+ int sample_total() const {
+ return sample_total_.load(std::memory_order_relaxed);
+ }
+ bool is_histogram() const { return is_histogram_; }
void AddSample(int32_t sample);
private:
- int32_t count_;
- int32_t sample_total_;
+ std::atomic<int> count_;
+ std::atomic<int> sample_total_;
bool is_histogram_;
- uint8_t name_[kMaxNameSize];
+ char name_[kMaxNameSize];
};
// A set of counters and associated information. An instance of this
@@ -203,6 +209,16 @@ class Worker : public std::enable_shared_from_this<Worker> {
friend class ProcessMessageTask;
friend class TerminateTask;
+ enum class State {
+ kReady,
+ kRunning,
+ kTerminating,
+ kTerminated,
+ kTerminatingAndJoining,
+ kTerminatedAndJoined
+ };
+ bool is_running() const;
+
void ProcessMessage(std::unique_ptr<SerializationData> data);
void ProcessMessages();
@@ -225,7 +241,7 @@ class Worker : public std::enable_shared_from_this<Worker> {
SerializationDataQueue out_queue_;
base::Thread* thread_ = nullptr;
char* script_;
- std::atomic<bool> running_;
+ std::atomic<State> state_;
// For signalling that the worker has started.
base::Semaphore started_semaphore_{0};
@@ -393,6 +409,8 @@ class ShellOptions {
bool test_shell = false;
DisallowReassignment<bool> expected_to_throw = {"throws", false};
DisallowReassignment<bool> no_fail = {"no-fail", false};
+ DisallowReassignment<bool> dump_counters = {"dump-counters", false};
+ DisallowReassignment<bool> dump_counters_nvp = {"dump-counters-nvp", false};
DisallowReassignment<bool> ignore_unhandled_promises = {
"ignore-unhandled-promises", false};
DisallowReassignment<bool> mock_arraybuffer_allocator = {
@@ -462,7 +480,7 @@ class Shell : public i::AllStatic {
enum class CodeType { kFileName, kString, kFunction, kInvalid, kNone };
static bool ExecuteString(Isolate* isolate, Local<String> source,
- Local<Value> name, PrintResult print_result,
+ Local<String> name, PrintResult print_result,
ReportExceptions report_exceptions,
ProcessMessageQueue process_message_queue);
static bool ExecuteModule(Isolate* isolate, const char* file_name);
@@ -478,7 +496,7 @@ class Shell : public i::AllStatic {
static int RunMain(Isolate* isolate, bool last_run);
static int Main(int argc, char* argv[]);
static void Exit(int exit_code);
- static void OnExit(Isolate* isolate);
+ static void OnExit(Isolate* isolate, bool dispose);
static void CollectGarbage(Isolate* isolate);
static bool EmptyMessageQueues(Isolate* isolate);
static bool CompleteMessageLoop(Isolate* isolate);
@@ -607,8 +625,9 @@ class Shell : public i::AllStatic {
static void MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static MaybeLocal<Promise> HostImportModuleDynamically(
- Local<Context> context, Local<ScriptOrModule> script_or_module,
- Local<String> specifier, Local<FixedArray> import_assertions);
+ Local<Context> context, Local<Data> host_defined_options,
+ Local<Value> resource_name, Local<String> specifier,
+ Local<FixedArray> import_assertions);
static void ModuleResolutionSuccessCallback(
const v8::FunctionCallbackInfo<v8::Value>& info);
@@ -668,6 +687,7 @@ class Shell : public i::AllStatic {
static Global<Function> stringify_function_;
static const char* stringify_source_;
static CounterMap* counter_map_;
+ static base::SharedMutex counter_mutex_;
// We statically allocate a set of local counters to be used if we
// don't want to store the stats in a memory-mapped file
static CounterCollection local_counters_;
diff --git a/deps/v8/src/date/dateparser-inl.h b/deps/v8/src/date/dateparser-inl.h
index d4a153356e..623986d2b1 100644
--- a/deps/v8/src/date/dateparser-inl.h
+++ b/deps/v8/src/date/dateparser-inl.h
@@ -94,9 +94,9 @@ bool DateParser::Parse(Isolate* isolate, base::Vector<Char> str, double* out) {
} else if (scanner.SkipSymbol('.') && time.IsExpecting(n)) {
time.Add(n);
if (!scanner.Peek().IsNumber()) return false;
- int n = ReadMilliseconds(scanner.Next());
- if (n < 0) return false;
- time.AddFinal(n);
+ int ms = ReadMilliseconds(scanner.Next());
+ if (ms < 0) return false;
+ time.AddFinal(ms);
} else if (tz.IsExpecting(n)) {
tz.SetAbsoluteMinute(n);
} else if (time.IsExpecting(n)) {
@@ -138,9 +138,9 @@ bool DateParser::Parse(Isolate* isolate, base::Vector<Char> str, double* out) {
int n = 0;
int length = 0;
if (scanner.Peek().IsNumber()) {
- DateToken token = scanner.Next();
- length = token.length();
- n = token.number();
+ DateToken next_token = scanner.Next();
+ length = next_token.length();
+ n = next_token.number();
}
has_read_number = true;
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index 46f6c366cc..122a7a2213 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -7,6 +7,7 @@
#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
#include "src/base/hashmap.h"
+#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -527,6 +528,7 @@ void CollectAndMaybeResetCounts(Isolate* isolate,
->feedback_vectors_for_profiling_tools()
->IsArrayList());
DCHECK_EQ(v8::debug::CoverageMode::kBestEffort, coverage_mode);
+ AllowGarbageCollection allow_gc;
HeapObjectIterator heap_iterator(isolate->heap());
for (HeapObject current_obj = heap_iterator.Next();
!current_obj.is_null(); current_obj = heap_iterator.Next()) {
diff --git a/deps/v8/src/debug/debug-interface.cc b/deps/v8/src/debug/debug-interface.cc
index 2d4cc0319f..1ec2f04bc7 100644
--- a/deps/v8/src/debug/debug-interface.cc
+++ b/deps/v8/src/debug/debug-interface.cc
@@ -49,20 +49,6 @@ v8_inspector::V8Inspector* GetInspector(Isolate* isolate) {
return reinterpret_cast<i::Isolate*>(isolate)->inspector();
}
-Local<String> GetFunctionDebugName(Local<StackFrame> frame) {
-#if V8_ENABLE_WEBASSEMBLY
- auto info = Utils::OpenHandle(*frame);
- if (info->IsWasm()) {
- auto isolate = info->GetIsolate();
- auto instance = handle(info->GetWasmInstance(), isolate);
- auto func_index = info->GetWasmFunctionIndex();
- return Utils::ToLocal(
- i::GetWasmFunctionDebugName(isolate, instance, func_index));
- }
-#endif // V8_ENABLE_WEBASSEMBLY
- return frame->GetFunctionName();
-}
-
Local<String> GetFunctionDescription(Local<Function> function) {
auto receiver = Utils::OpenHandle(*function);
if (receiver->IsJSBoundFunction()) {
@@ -70,28 +56,29 @@ Local<String> GetFunctionDescription(Local<Function> function) {
i::Handle<i::JSBoundFunction>::cast(receiver)));
}
if (receiver->IsJSFunction()) {
- auto function = i::Handle<i::JSFunction>::cast(receiver);
+ auto js_function = i::Handle<i::JSFunction>::cast(receiver);
#if V8_ENABLE_WEBASSEMBLY
- if (function->shared().HasWasmExportedFunctionData()) {
- auto isolate = function->GetIsolate();
+ if (js_function->shared().HasWasmExportedFunctionData()) {
+ auto isolate = js_function->GetIsolate();
auto func_index =
- function->shared().wasm_exported_function_data().function_index();
+ js_function->shared().wasm_exported_function_data().function_index();
auto instance = i::handle(
- function->shared().wasm_exported_function_data().instance(), isolate);
+ js_function->shared().wasm_exported_function_data().instance(),
+ isolate);
if (instance->module()->origin == i::wasm::kWasmOrigin) {
// For asm.js functions, we can still print the source
// code (hopefully), so don't bother with them here.
auto debug_name =
i::GetWasmFunctionDebugName(isolate, instance, func_index);
i::IncrementalStringBuilder builder(isolate);
- builder.AppendCString("function ");
+ builder.AppendCStringLiteral("function ");
builder.AppendString(debug_name);
- builder.AppendCString("() { [native code] }");
+ builder.AppendCStringLiteral("() { [native code] }");
return Utils::ToLocal(builder.Finish().ToHandleChecked());
}
}
#endif // V8_ENABLE_WEBASSEMBLY
- return Utils::ToLocal(i::JSFunction::ToString(function));
+ return Utils::ToLocal(i::JSFunction::ToString(js_function));
}
return Utils::ToLocal(
receiver->GetIsolate()->factory()->function_native_code_string());
@@ -148,13 +135,13 @@ void CollectPrivateMethodsAndAccessorsFromContext(
} // namespace
-bool GetPrivateMembers(Local<Context> context, Local<Object> value,
+bool GetPrivateMembers(Local<Context> context, Local<Object> object,
std::vector<Local<Value>>* names_out,
std::vector<Local<Value>>* values_out) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
LOG_API(isolate, debug, GetPrivateMembers);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- i::Handle<i::JSReceiver> receiver = Utils::OpenHandle(*value);
+ i::Handle<i::JSReceiver> receiver = Utils::OpenHandle(*object);
i::Handle<i::JSArray> names;
i::Handle<i::FixedArray> values;
@@ -179,8 +166,8 @@ bool GetPrivateMembers(Local<Context> context, Local<Object> value,
isolate, value, i::Object::GetProperty(isolate, receiver, key),
false);
- i::Handle<i::Context> context(i::Context::cast(*value), isolate);
- i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
+ i::Handle<i::Context> value_context(i::Context::cast(*value), isolate);
+ i::Handle<i::ScopeInfo> scope_info(value_context->scope_info(), isolate);
// At least one slot contains the brand symbol so it does not count.
private_entries_count += (scope_info->ContextLocalCount() - 1);
} else {
@@ -196,8 +183,8 @@ bool GetPrivateMembers(Local<Context> context, Local<Object> value,
if (shared->is_class_constructor() &&
shared->has_static_private_methods_or_accessors()) {
has_static_private_methods_or_accessors = true;
- i::Handle<i::Context> context(func->context(), isolate);
- i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
+ i::Handle<i::Context> func_context(func->context(), isolate);
+ i::Handle<i::ScopeInfo> scope_info(func_context->scope_info(), isolate);
int local_count = scope_info->ContextLocalCount();
for (int j = 0; j < local_count; ++j) {
i::VariableMode mode = scope_info->ContextLocalMode(j);
@@ -218,10 +205,11 @@ bool GetPrivateMembers(Local<Context> context, Local<Object> value,
values_out->reserve(private_entries_count);
if (has_static_private_methods_or_accessors) {
- i::Handle<i::Context> context(i::JSFunction::cast(*receiver).context(),
- isolate);
- CollectPrivateMethodsAndAccessorsFromContext(
- isolate, context, i::IsStaticFlag::kStatic, names_out, values_out);
+ i::Handle<i::Context> recevier_context(
+ i::JSFunction::cast(*receiver).context(), isolate);
+ CollectPrivateMethodsAndAccessorsFromContext(isolate, recevier_context,
+ i::IsStaticFlag::kStatic,
+ names_out, values_out);
}
for (int i = 0; i < keys->length(); ++i) {
@@ -234,9 +222,10 @@ bool GetPrivateMembers(Local<Context> context, Local<Object> value,
if (key->is_private_brand()) {
DCHECK(value->IsContext());
- i::Handle<i::Context> context(i::Context::cast(*value), isolate);
- CollectPrivateMethodsAndAccessorsFromContext(
- isolate, context, i::IsStaticFlag::kNotStatic, names_out, values_out);
+ i::Handle<i::Context> value_context(i::Context::cast(*value), isolate);
+ CollectPrivateMethodsAndAccessorsFromContext(isolate, value_context,
+ i::IsStaticFlag::kNotStatic,
+ names_out, values_out);
} else { // Private fields
i::Handle<i::String> name(
i::String::cast(i::Symbol::cast(*key).description()), isolate);
@@ -289,10 +278,12 @@ void ClearStepping(Isolate* v8_isolate) {
isolate->debug()->ClearStepping();
}
-void BreakRightNow(Isolate* v8_isolate) {
+void BreakRightNow(Isolate* v8_isolate,
+ base::EnumSet<debug::BreakReason> break_reasons) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8_DO_NOT_USE(isolate);
- isolate->debug()->HandleDebugBreak(i::kIgnoreIfAllFramesBlackboxed);
+ isolate->debug()->HandleDebugBreak(i::kIgnoreIfAllFramesBlackboxed,
+ break_reasons);
}
void SetTerminateOnResume(Isolate* v8_isolate) {
@@ -1005,10 +996,10 @@ void GlobalLexicalScopeNames(v8::Local<v8::Context> v8_context,
context->global_object().native_context().script_context_table(),
isolate);
for (int i = 0; i < table->used(kAcquireLoad); i++) {
- i::Handle<i::Context> context =
+ i::Handle<i::Context> script_context =
i::ScriptContextTable::GetContext(isolate, table, i);
- DCHECK(context->IsScriptContext());
- i::Handle<i::ScopeInfo> scope_info(context->scope_info(), isolate);
+ DCHECK(script_context->IsScriptContext());
+ i::Handle<i::ScopeInfo> scope_info(script_context->scope_info(), isolate);
int local_count = scope_info->ContextLocalCount();
for (int j = 0; j < local_count; ++j) {
i::String name = scope_info->ContextLocalName(j);
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 07ead0adbd..125623afea 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -15,6 +15,7 @@
#include "include/v8-promise.h"
#include "include/v8-script.h"
#include "include/v8-util.h"
+#include "src/base/enum-set.h"
#include "src/base/vector.h"
#include "src/common/globals.h"
#include "src/debug/interface-types.h"
@@ -48,13 +49,6 @@ int GetContextId(Local<Context> context);
void SetInspector(Isolate* isolate, v8_inspector::V8Inspector*);
v8_inspector::V8Inspector* GetInspector(Isolate* isolate);
-// Returns the debug name for the function, which is supposed to be used
-// by the debugger and the developer tools. This can thus be different from
-// the name returned by the StackFrame::GetFunctionName() method. For example,
-// in case of WebAssembly, the debug name is WAT-compatible and thus always
-// preceeded by a dollar ('$').
-Local<String> GetFunctionDebugName(Local<StackFrame> frame);
-
// Returns a debug string representation of the function.
Local<String> GetFunctionDescription(Local<Function> function);
@@ -119,9 +113,24 @@ enum StepAction {
// in the current function.
};
+// Record the reason for why the debugger breaks.
+enum class BreakReason : uint8_t {
+ kAlreadyPaused,
+ kStep,
+ kAsyncStep,
+ kException,
+ kAssert,
+ kDebuggerStatement,
+ kOOM,
+ kScheduled,
+ kAgent
+};
+typedef base::EnumSet<BreakReason> BreakReasons;
+
void PrepareStep(Isolate* isolate, StepAction action);
void ClearStepping(Isolate* isolate);
-V8_EXPORT_PRIVATE void BreakRightNow(Isolate* isolate);
+V8_EXPORT_PRIVATE void BreakRightNow(
+ Isolate* isolate, base::EnumSet<BreakReason> break_reason = {});
// Use `SetTerminateOnResume` to indicate that an TerminateExecution interrupt
// should be set shortly before resuming, i.e. shortly before returning into
@@ -228,7 +237,8 @@ class DebugDelegate {
// debug::Script::SetBreakpoint API.
virtual void BreakProgramRequested(
v8::Local<v8::Context> paused_context,
- const std::vector<debug::BreakpointId>& inspector_break_points_hit) {}
+ const std::vector<debug::BreakpointId>& inspector_break_points_hit,
+ base::EnumSet<BreakReason> break_reasons = {}) {}
virtual void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Value> exception,
v8::Local<v8::Value> promise, bool is_uncaught,
@@ -259,7 +269,8 @@ class AsyncEventDelegate {
bool is_blackboxed) = 0;
};
-void SetAsyncEventDelegate(Isolate* isolate, AsyncEventDelegate* delegate);
+V8_EXPORT_PRIVATE void SetAsyncEventDelegate(Isolate* isolate,
+ AsyncEventDelegate* delegate);
void ResetBlackboxedStateCache(Isolate* isolate,
v8::Local<debug::Script> script);
diff --git a/deps/v8/src/debug/debug-property-iterator.cc b/deps/v8/src/debug/debug-property-iterator.cc
index b0bca65e30..40406deacf 100644
--- a/deps/v8/src/debug/debug-property-iterator.cc
+++ b/deps/v8/src/debug/debug-property-iterator.cc
@@ -109,6 +109,21 @@ v8::Maybe<v8::PropertyAttribute> DebugPropertyIterator::attributes() {
PrototypeIterator::GetCurrent<JSReceiver>(prototype_iterator_);
auto result = JSReceiver::GetPropertyAttributes(receiver, raw_name());
if (result.IsNothing()) return Nothing<v8::PropertyAttribute>();
+ // This should almost never happen, however we have seen cases where we do
+ // trigger this check. In these rare events, it typically is a
+ // misconfiguration by an embedder (such as Blink) in how the embedder
+ // processes properities.
+ //
+ // In the case of crbug.com/1262066 we discovered that Blink was returning
+ // a list of properties to contain in an object, after which V8 queries each
+ // property individually. But, Blink incorrectly claimed that the property
+ // in question did *not* exist. As such, V8 is instructed to process a
+ // property, requests the embedder for more information and then suddenly the
+ // embedder claims it doesn't exist. In these cases, we hit this DCHECK.
+ //
+ // If you are running into this problem, check your embedder implementation
+ // and verify that the data from both sides matches. If there is a mismatch,
+ // V8 will crash.
DCHECK(result.FromJust() != ABSENT);
return Just(static_cast<v8::PropertyAttribute>(result.FromJust()));
}
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 2de06dee5b..93f7667486 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -272,9 +272,12 @@ void ScopeIterator::TryParseAndRetrieveScopes(ReparseStrategy strategy) {
scope_info->scope_type() == FUNCTION_SCOPE);
}
- UnoptimizedCompileState compile_state(isolate_);
+ UnoptimizedCompileState compile_state;
- info_ = std::make_unique<ParseInfo>(isolate_, flags, &compile_state);
+ reusable_compile_state_ =
+ std::make_unique<ReusableUnoptimizedCompileState>(isolate_);
+ info_ = std::make_unique<ParseInfo>(isolate_, flags, &compile_state,
+ reusable_compile_state_.get());
const bool parse_result =
flags.is_toplevel()
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 590e9e9bfe..7cc0e1ed16 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -8,6 +8,7 @@
#include <vector>
#include "src/debug/debug-frames.h"
+#include "src/parsing/parse-info.h"
namespace v8 {
namespace internal {
@@ -110,6 +111,7 @@ class ScopeIterator {
private:
Isolate* isolate_;
+ std::unique_ptr<ReusableUnoptimizedCompileState> reusable_compile_state_;
std::unique_ptr<ParseInfo> info_;
FrameInspector* const frame_inspector_ = nullptr;
Handle<JSGeneratorObject> generator_;
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 09485741f1..81b2487a5b 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -82,7 +82,8 @@ int DebugStackTraceIterator::GetContextId() const {
v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const {
DCHECK(!Done());
if (frame_inspector_->IsJavaScript() &&
- frame_inspector_->GetFunction()->shared().kind() == kArrowFunction) {
+ frame_inspector_->GetFunction()->shared().kind() ==
+ FunctionKind::kArrowFunction) {
// FrameInspector is not able to get receiver for arrow function.
// So let's try to fetch it using same logic as is used to retrieve 'this'
// during DebugEvaluate::Local.
diff --git a/deps/v8/src/debug/debug-wasm-objects.cc b/deps/v8/src/debug/debug-wasm-objects.cc
index e45ed85574..2a65ffc34b 100644
--- a/deps/v8/src/debug/debug-wasm-objects.cc
+++ b/deps/v8/src/debug/debug-wasm-objects.cc
@@ -317,8 +317,10 @@ struct FunctionsProxy : NamedDebugProxy<FunctionsProxy, kFunctionsProxy> {
static Handle<Object> Get(Isolate* isolate,
Handle<WasmInstanceObject> instance,
uint32_t index) {
- return WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate,
- instance, index);
+ return handle(WasmInstanceObject::GetOrCreateWasmInternalFunction(
+ isolate, instance, index)
+ ->external(),
+ isolate);
}
static Handle<String> GetName(Isolate* isolate,
@@ -1027,6 +1029,9 @@ Handle<WasmValueObject> WasmValueObject::New(
v = ArrayProxy::Create(isolate, value, module_object);
} else if (ref->IsJSFunction() || ref->IsSmi() || ref->IsNull()) {
v = ref;
+ } else if (ref->IsWasmInternalFunction()) {
+ v = handle(Handle<WasmInternalFunction>::cast(ref)->external(),
+ isolate);
} else {
// Fail gracefully.
base::EmbeddedVector<char, 64> error;
@@ -1135,7 +1140,11 @@ Handle<ArrayList> AddWasmTableObjectInternalProperties(
int length = table->current_length();
Handle<FixedArray> entries = isolate->factory()->NewFixedArray(length);
for (int i = 0; i < length; ++i) {
- auto entry = WasmTableObject::Get(isolate, table, i);
+ Handle<Object> entry = WasmTableObject::Get(isolate, table, i);
+ if (entry->IsWasmInternalFunction()) {
+ entry = handle(Handle<WasmInternalFunction>::cast(entry)->external(),
+ isolate);
+ }
entries->set(i, *entry);
}
Handle<JSArray> final_entries = isolate->factory()->NewJSArrayWithElements(
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 4d75516571..48d0086155 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -14,6 +14,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
+#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/debug/debug-evaluate.h"
@@ -567,30 +568,58 @@ MaybeHandle<FixedArray> Debug::CheckBreakPoints(Handle<DebugInfo> debug_info,
}
bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
- RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
- HandleScope scope(isolate_);
// A break location is considered muted if break locations on the current
// statement have at least one break point, and all of these break points
// evaluate to false. Aside from not triggering a debug break event at the
// break location, we also do not trigger one for debugger statements, nor
// an exception event on exception at this location.
+ RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
+ HandleScope scope(isolate_);
+ bool has_break_points;
+ MaybeHandle<FixedArray> checked =
+ GetHitBreakpointsAtCurrentStatement(frame, &has_break_points);
+ return has_break_points && checked.is_null();
+}
+
+MaybeHandle<FixedArray> Debug::GetHitBreakpointsAtCurrentStatement(
+ JavaScriptFrame* frame, bool* has_break_points) {
+ RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
FrameSummary summary = FrameSummary::GetTop(frame);
Handle<JSFunction> function = summary.AsJavaScript().function();
- if (!function->shared().HasBreakInfo()) return false;
+ if (!function->shared().HasBreakInfo()) {
+ *has_break_points = false;
+ return {};
+ }
Handle<DebugInfo> debug_info(function->shared().GetDebugInfo(), isolate_);
// Enter the debugger.
DebugScope debug_scope(this);
std::vector<BreakLocation> break_locations;
BreakLocation::AllAtCurrentStatement(debug_info, frame, &break_locations);
+
+ Handle<FixedArray> break_points_hit = isolate_->factory()->NewFixedArray(
+ debug_info->GetBreakPointCount(isolate_));
+ int break_points_hit_count = 0;
bool has_break_points_at_all = false;
for (size_t i = 0; i < break_locations.size(); i++) {
- bool has_break_points;
- MaybeHandle<FixedArray> check_result =
- CheckBreakPoints(debug_info, &break_locations[i], &has_break_points);
- has_break_points_at_all |= has_break_points;
- if (has_break_points && !check_result.is_null()) return false;
+ bool location_has_break_points;
+ MaybeHandle<FixedArray> check_result = CheckBreakPoints(
+ debug_info, &break_locations[i], &location_has_break_points);
+ has_break_points_at_all |= location_has_break_points;
+ if (!check_result.is_null()) {
+ Handle<FixedArray> break_points_current_hit =
+ check_result.ToHandleChecked();
+ int num_objects = break_points_current_hit->length();
+ for (int j = 0; j < num_objects; ++j) {
+ break_points_hit->set(break_points_hit_count++,
+ break_points_current_hit->get(j));
+ }
+ }
}
- return has_break_points_at_all;
+ *has_break_points = has_break_points_at_all;
+ if (break_points_hit_count == 0) return {};
+
+ break_points_hit->Shrink(isolate_, break_points_hit_count);
+ return break_points_hit;
}
// Check whether a single break point object is triggered.
@@ -824,17 +853,17 @@ void Debug::RemoveBreakpointForWasmScript(Handle<Script> script, int id) {
void Debug::RecordWasmScriptWithBreakpoints(Handle<Script> script) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
- if (wasm_scripts_with_breakpoints_.is_null()) {
+ if (wasm_scripts_with_break_points_.is_null()) {
Handle<WeakArrayList> new_list = isolate_->factory()->NewWeakArrayList(4);
- wasm_scripts_with_breakpoints_ =
+ wasm_scripts_with_break_points_ =
isolate_->global_handles()->Create(*new_list);
}
{
DisallowGarbageCollection no_gc;
- for (int idx = wasm_scripts_with_breakpoints_->length() - 1; idx >= 0;
+ for (int idx = wasm_scripts_with_break_points_->length() - 1; idx >= 0;
--idx) {
HeapObject wasm_script;
- if (wasm_scripts_with_breakpoints_->Get(idx).GetHeapObject(
+ if (wasm_scripts_with_break_points_->Get(idx).GetHeapObject(
&wasm_script) &&
wasm_script == *script) {
return;
@@ -842,11 +871,11 @@ void Debug::RecordWasmScriptWithBreakpoints(Handle<Script> script) {
}
}
Handle<WeakArrayList> new_list = WeakArrayList::Append(
- isolate_, wasm_scripts_with_breakpoints_, MaybeObjectHandle{script});
- if (*new_list != *wasm_scripts_with_breakpoints_) {
+ isolate_, wasm_scripts_with_break_points_, MaybeObjectHandle{script});
+ if (*new_list != *wasm_scripts_with_break_points_) {
isolate_->global_handles()->Destroy(
- wasm_scripts_with_breakpoints_.location());
- wasm_scripts_with_breakpoints_ =
+ wasm_scripts_with_break_points_.location());
+ wasm_scripts_with_break_points_ =
isolate_->global_handles()->Create(*new_list);
}
}
@@ -861,12 +890,12 @@ void Debug::ClearAllBreakPoints() {
});
#if V8_ENABLE_WEBASSEMBLY
// Clear all wasm breakpoints.
- if (!wasm_scripts_with_breakpoints_.is_null()) {
+ if (!wasm_scripts_with_break_points_.is_null()) {
DisallowGarbageCollection no_gc;
- for (int idx = wasm_scripts_with_breakpoints_->length() - 1; idx >= 0;
+ for (int idx = wasm_scripts_with_break_points_->length() - 1; idx >= 0;
--idx) {
HeapObject raw_wasm_script;
- if (wasm_scripts_with_breakpoints_->Get(idx).GetHeapObject(
+ if (wasm_scripts_with_break_points_->Get(idx).GetHeapObject(
&raw_wasm_script)) {
Script wasm_script = Script::cast(raw_wasm_script);
WasmScript::ClearAllBreakpoints(wasm_script);
@@ -874,7 +903,7 @@ void Debug::ClearAllBreakPoints() {
isolate_);
}
}
- wasm_scripts_with_breakpoints_ = Handle<WeakArrayList>{};
+ wasm_scripts_with_break_points_ = Handle<WeakArrayList>{};
}
#endif // V8_ENABLE_WEBASSEMBLY
}
@@ -1179,14 +1208,14 @@ void Debug::PrepareStep(StepAction step_action) {
return;
}
#endif // V8_ENABLE_WEBASSEMBLY
- JavaScriptFrame* frame = JavaScriptFrame::cast(frames_it.frame());
+ JavaScriptFrame* js_frame = JavaScriptFrame::cast(frames_it.frame());
if (last_step_action() == StepInto) {
// Deoptimize frame to ensure calls are checked for step-in.
- Deoptimizer::DeoptimizeFunction(frame->function());
+ Deoptimizer::DeoptimizeFunction(js_frame->function());
}
- HandleScope scope(isolate_);
+ HandleScope inner_scope(isolate_);
std::vector<Handle<SharedFunctionInfo>> infos;
- frame->GetFunctions(&infos);
+ js_frame->GetFunctions(&infos);
for (; !infos.empty(); current_frame_count--) {
Handle<SharedFunctionInfo> info = infos.back();
infos.pop_back();
@@ -1281,6 +1310,7 @@ class DiscardBaselineCodeVisitor : public ThreadVisitor {
DiscardBaselineCodeVisitor() : shared_(SharedFunctionInfo()) {}
void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
+ DisallowGarbageCollection diallow_gc;
bool deopt_all = shared_ == SharedFunctionInfo();
for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
if (!deopt_all && it.frame()->function().shared() != shared_) continue;
@@ -1300,7 +1330,7 @@ class DiscardBaselineCodeVisitor : public ThreadVisitor {
// we deoptimized in the debugger and are stepping into it.
JavaScriptFrame* frame = it.frame();
Address pc = frame->pc();
- Builtin builtin = InstructionStream::TryLookupCode(isolate, pc);
+ Builtin builtin = OffHeapInstructionStream::TryLookupCode(isolate, pc);
if (builtin == Builtin::kBaselineOrInterpreterEnterAtBytecode ||
builtin == Builtin::kBaselineOrInterpreterEnterAtNextBytecode) {
Address* pc_addr = frame->pc_address();
@@ -1319,7 +1349,6 @@ class DiscardBaselineCodeVisitor : public ThreadVisitor {
private:
SharedFunctionInfo shared_;
- DISALLOW_GARBAGE_COLLECTION(no_gc_)
};
} // namespace
@@ -1516,10 +1545,11 @@ void FindBreakablePositions(Handle<DebugInfo> debug_info, int start_position,
}
bool CompileTopLevel(Isolate* isolate, Handle<Script> script) {
- UnoptimizedCompileState compile_state(isolate);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
UnoptimizedCompileFlags flags =
UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
- ParseInfo parse_info(isolate, flags, &compile_state);
+ ParseInfo parse_info(isolate, flags, &compile_state, &reusable_state);
IsCompiledScope is_compiled_scope;
const MaybeHandle<SharedFunctionInfo> maybe_result =
Compiler::CompileToplevel(&parse_info, script, isolate,
@@ -1810,7 +1840,7 @@ bool Debug::EnsureBreakInfo(Handle<SharedFunctionInfo> shared) {
IsCompiledScope is_compiled_scope = shared->is_compiled_scope(isolate_);
if (!is_compiled_scope.is_compiled() &&
!Compiler::Compile(isolate_, shared, Compiler::CLEAR_EXCEPTION,
- &is_compiled_scope)) {
+ &is_compiled_scope, CreateSourcePositions::kYes)) {
return false;
}
CreateBreakInfo(shared);
@@ -2116,7 +2146,8 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise,
}
void Debug::OnDebugBreak(Handle<FixedArray> break_points_hit,
- StepAction lastStepAction) {
+ StepAction lastStepAction,
+ v8::debug::BreakReasons break_reasons) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
DCHECK(!break_points_hit.is_null());
// The caller provided for DebugScope.
@@ -2141,18 +2172,19 @@ void Debug::OnDebugBreak(Handle<FixedArray> break_points_hit,
}
std::vector<int> inspector_break_points_hit;
- int inspector_break_points_count = 0;
// This array contains breakpoints installed using JS debug API.
for (int i = 0; i < break_points_hit->length(); ++i) {
BreakPoint break_point = BreakPoint::cast(break_points_hit->get(i));
inspector_break_points_hit.push_back(break_point.id());
- ++inspector_break_points_count;
}
{
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebuggerCallback);
Handle<Context> native_context(isolate_->native_context());
+ if (lastStepAction != StepAction::StepNone)
+ break_reasons.Add(debug::BreakReason::kStep);
debug_delegate_->BreakProgramRequested(v8::Utils::ToLocal(native_context),
- inspector_break_points_hit);
+ inspector_break_points_hit,
+ break_reasons);
}
}
@@ -2340,7 +2372,8 @@ void Debug::UpdateHookOnFunctionCall() {
thread_local_.break_on_next_function_call_;
}
-void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode) {
+void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode,
+ v8::debug::BreakReasons break_reasons) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
// Ignore debug break during bootstrapping.
if (isolate_->bootstrapper()->IsActive()) return;
@@ -2352,12 +2385,13 @@ void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode) {
StackLimitCheck check(isolate_);
if (check.HasOverflowed()) return;
+ HandleScope scope(isolate_);
+ MaybeHandle<FixedArray> break_points;
{
JavaScriptFrameIterator it(isolate_);
DCHECK(!it.done());
Object fun = it.frame()->function();
if (fun.IsJSFunction()) {
- HandleScope scope(isolate_);
Handle<JSFunction> function(JSFunction::cast(fun), isolate_);
// Don't stop in builtin and blackboxed functions.
Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
@@ -2366,7 +2400,11 @@ void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode) {
: AllFramesOnStackAreBlackboxed();
if (ignore_break) return;
// Don't stop if the break location is muted.
- if (IsMutedAtCurrentLocation(it.frame())) return;
+ bool has_break_points;
+ break_points =
+ GetHitBreakpointsAtCurrentStatement(it.frame(), &has_break_points);
+ bool is_muted = has_break_points && break_points.is_null();
+ if (is_muted) return;
}
}
@@ -2375,10 +2413,10 @@ void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode) {
// Clear stepping to avoid duplicate breaks.
ClearStepping();
- HandleScope scope(isolate_);
DebugScope debug_scope(this);
-
- OnDebugBreak(isolate_->factory()->empty_fixed_array(), lastStepAction);
+ OnDebugBreak(break_points.is_null() ? isolate_->factory()->empty_fixed_array()
+ : break_points.ToHandleChecked(),
+ lastStepAction, break_reasons);
}
#ifdef DEBUG
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 42c89c2c98..eba382258e 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -8,6 +8,7 @@
#include <memory>
#include <vector>
+#include "src/base/enum-set.h"
#include "src/codegen/source-position-table.h"
#include "src/common/globals.h"
#include "src/debug/debug-interface.h"
@@ -215,7 +216,8 @@ class V8_EXPORT_PRIVATE Debug {
Debug& operator=(const Debug&) = delete;
// Debug event triggers.
- void OnDebugBreak(Handle<FixedArray> break_points_hit, StepAction stepAction);
+ void OnDebugBreak(Handle<FixedArray> break_points_hit, StepAction stepAction,
+ debug::BreakReasons break_reasons = {});
base::Optional<Object> OnThrow(Handle<Object> exception)
V8_WARN_UNUSED_RESULT;
@@ -223,7 +225,8 @@ class V8_EXPORT_PRIVATE Debug {
void OnCompileError(Handle<Script> script);
void OnAfterCompile(Handle<Script> script);
- void HandleDebugBreak(IgnoreBreakMode ignore_break_mode);
+ void HandleDebugBreak(IgnoreBreakMode ignore_break_mode,
+ debug::BreakReasons break_reasons);
// The break target may not be the top-most frame, since we may be
// breaking before entering a function that cannot contain break points.
@@ -448,6 +451,9 @@ class V8_EXPORT_PRIVATE Debug {
MaybeHandle<FixedArray> CheckBreakPoints(Handle<DebugInfo> debug_info,
BreakLocation* location,
bool* has_break_points = nullptr);
+ MaybeHandle<FixedArray> GetHitBreakpointsAtCurrentStatement(
+ JavaScriptFrame* frame, bool* hasBreakpoints);
+
bool IsMutedAtCurrentLocation(JavaScriptFrame* frame);
// Check whether a BreakPoint object is hit. Evaluate condition depending
// on whether this is a regular break location or a break at function entry.
@@ -555,7 +561,7 @@ class V8_EXPORT_PRIVATE Debug {
#if V8_ENABLE_WEBASSEMBLY
// This is a global handle, lazily initialized.
- Handle<WeakArrayList> wasm_scripts_with_breakpoints_;
+ Handle<WeakArrayList> wasm_scripts_with_break_points_;
#endif // V8_ENABLE_WEBASSEMBLY
Isolate* isolate_;
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index a4c297ec5b..4f090e918d 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -984,22 +984,25 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
return;
}
- UnoptimizedCompileState compile_state(isolate);
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+
+ UnoptimizedCompileState compile_state;
UnoptimizedCompileFlags flags =
UnoptimizedCompileFlags::ForScriptCompile(isolate, *script);
flags.set_is_eager(true);
- ParseInfo parse_info(isolate, flags, &compile_state);
+ ParseInfo parse_info(isolate, flags, &compile_state, &reusable_state);
std::vector<FunctionLiteral*> literals;
if (!ParseScript(isolate, script, &parse_info, false, &literals, result))
return;
Handle<Script> new_script = isolate->factory()->CloneScript(script);
new_script->set_source(*new_source);
- UnoptimizedCompileState new_compile_state(isolate);
+ UnoptimizedCompileState new_compile_state;
UnoptimizedCompileFlags new_flags =
UnoptimizedCompileFlags::ForScriptCompile(isolate, *new_script);
new_flags.set_is_eager(true);
- ParseInfo new_parse_info(isolate, new_flags, &new_compile_state);
+ ParseInfo new_parse_info(isolate, new_flags, &new_compile_state,
+ &reusable_state);
std::vector<FunctionLiteral*> new_literals;
if (!ParseScript(isolate, new_script, &new_parse_info, true, &new_literals,
result)) {
@@ -1085,7 +1088,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
FixedArray constants = sfi->GetBytecodeArray(isolate).constant_pool();
for (int i = 0; i < constants.length(); ++i) {
if (!constants.get(i).IsSharedFunctionInfo()) continue;
- FunctionData* data = nullptr;
+ data = nullptr;
if (!function_data_map.Lookup(SharedFunctionInfo::cast(constants.get(i)),
&data)) {
continue;
@@ -1158,11 +1161,12 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
// unique.
DisallowGarbageCollection no_gc;
- SharedFunctionInfo::ScriptIterator it(isolate, *new_script);
+ SharedFunctionInfo::ScriptIterator script_it(isolate, *new_script);
std::set<int> start_positions;
- for (SharedFunctionInfo sfi = it.Next(); !sfi.is_null(); sfi = it.Next()) {
+ for (SharedFunctionInfo sfi = script_it.Next(); !sfi.is_null();
+ sfi = script_it.Next()) {
DCHECK_EQ(sfi.script(), *new_script);
- DCHECK_EQ(sfi.function_literal_id(), it.CurrentIndex());
+ DCHECK_EQ(sfi.function_literal_id(), script_it.CurrentIndex());
// Don't check the start position of the top-level function, as it can
// overlap with a function in the script.
if (sfi.is_toplevel()) {
diff --git a/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc b/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc
index 91b531f41f..b6cb5a98d2 100644
--- a/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc
+++ b/deps/v8/src/debug/wasm/gdb-server/gdb-server.cc
@@ -419,7 +419,8 @@ void GdbServer::DebugDelegate::ScriptCompiled(Local<debug::Script> script,
void GdbServer::DebugDelegate::BreakProgramRequested(
// Executed in the isolate thread.
Local<v8::Context> paused_context,
- const std::vector<debug::BreakpointId>& inspector_break_points_hit) {
+ const std::vector<debug::BreakpointId>& inspector_break_points_hit,
+ v8::debug::BreakReasons break_reasons) {
gdb_server_->GetTarget().OnProgramBreak(
isolate_, WasmModuleDebug::GetCallStack(id_, isolate_));
gdb_server_->RunMessageLoopOnPause();
diff --git a/deps/v8/src/debug/wasm/gdb-server/gdb-server.h b/deps/v8/src/debug/wasm/gdb-server/gdb-server.h
index 91a4d65473..146877f73d 100644
--- a/deps/v8/src/debug/wasm/gdb-server/gdb-server.h
+++ b/deps/v8/src/debug/wasm/gdb-server/gdb-server.h
@@ -150,9 +150,10 @@ class GdbServer {
// debug::DebugDelegate
void ScriptCompiled(Local<debug::Script> script, bool is_live_edited,
bool has_compile_error) override;
- void BreakProgramRequested(Local<v8::Context> paused_context,
- const std::vector<debug::BreakpointId>&
- inspector_break_points_hit) override;
+ void BreakProgramRequested(
+ Local<v8::Context> paused_context,
+ const std::vector<debug::BreakpointId>& inspector_break_points_hit,
+ v8::debug::BreakReasons break_reasons) override;
void ExceptionThrown(Local<v8::Context> paused_context,
Local<Value> exception, Local<Value> promise,
bool is_uncaught,
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index 6bf26d5bf3..796dd072b3 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -656,7 +656,7 @@ Builtin Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind kind) {
bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
DeoptimizeKind* type_out) {
- Builtin builtin = InstructionStream::TryLookupCode(isolate, addr);
+ Builtin builtin = OffHeapInstructionStream::TryLookupCode(isolate, addr);
if (!Builtins::IsBuiltinId(builtin)) return false;
switch (builtin) {
@@ -784,7 +784,7 @@ void Deoptimizer::TraceMarkForDeoptimization(Code code, const char* reason) {
if (!FLAG_log_deopt) return;
no_gc.Release();
{
- HandleScope scope(isolate);
+ HandleScope handle_scope(isolate);
PROFILE(
isolate,
CodeDependencyChangeEvent(
@@ -1540,7 +1540,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// Set the continuation for the topmost frame.
if (is_topmost) {
- Builtins* builtins = isolate_->builtins();
DCHECK_EQ(DeoptimizeKind::kLazy, deopt_kind_);
Code continuation = builtins->code(Builtin::kNotifyDeoptimized);
output_frame->SetContinuation(
diff --git a/deps/v8/src/deoptimizer/translated-state.cc b/deps/v8/src/deoptimizer/translated-state.cc
index 721918c195..ab3a2d1275 100644
--- a/deps/v8/src/deoptimizer/translated-state.cc
+++ b/deps/v8/src/deoptimizer/translated-state.cc
@@ -28,10 +28,9 @@ using base::ReadUnalignedValue;
namespace internal {
-void TranslationArrayPrintSingleFrame(std::ostream& os,
- TranslationArray translation_array,
- int translation_index,
- FixedArray literal_array) {
+void TranslationArrayPrintSingleFrame(
+ std::ostream& os, TranslationArray translation_array, int translation_index,
+ DeoptimizationLiteralArray literal_array) {
DisallowGarbageCollection gc_oh_noes;
TranslationArrayIterator iterator(translation_array, translation_index);
disasm::NameConverter converter;
@@ -725,8 +724,8 @@ void TranslatedFrame::Handlify() {
}
TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
- TranslationArrayIterator* iterator, FixedArray literal_array, Address fp,
- FILE* trace_file) {
+ TranslationArrayIterator* iterator,
+ DeoptimizationLiteralArray literal_array, Address fp, FILE* trace_file) {
TranslationOpcode opcode = TranslationOpcodeFromInt(iterator->Next());
switch (opcode) {
case TranslationOpcode::INTERPRETED_FRAME: {
@@ -959,8 +958,8 @@ void TranslatedState::CreateArgumentsElementsTranslatedValues(
// TranslationArrayIterator.
int TranslatedState::CreateNextTranslatedValue(
int frame_index, TranslationArrayIterator* iterator,
- FixedArray literal_array, Address fp, RegisterValues* registers,
- FILE* trace_file) {
+ DeoptimizationLiteralArray literal_array, Address fp,
+ RegisterValues* registers, FILE* trace_file) {
disasm::NameConverter converter;
TranslatedFrame& frame = frames_[frame_index];
@@ -1280,11 +1279,11 @@ Address TranslatedState::DecompressIfNeeded(intptr_t value) {
TranslatedState::TranslatedState(const JavaScriptFrame* frame)
: purpose_(kFrameInspection) {
- int deopt_index = Safepoint::kNoDeoptimizationIndex;
+ int deopt_index = SafepointEntry::kNoDeoptIndex;
DeoptimizationData data =
static_cast<const OptimizedFrame*>(frame)->GetDeoptimizationData(
&deopt_index);
- DCHECK(!data.is_null() && deopt_index != Safepoint::kNoDeoptimizationIndex);
+ DCHECK(!data.is_null() && deopt_index != SafepointEntry::kNoDeoptIndex);
TranslationArrayIterator it(data.TranslationByteArray(),
data.TranslationIndex(deopt_index).value());
int actual_argc = frame->GetActualArgumentCount();
@@ -1299,8 +1298,9 @@ TranslatedState::TranslatedState(const JavaScriptFrame* frame)
void TranslatedState::Init(Isolate* isolate, Address input_frame_pointer,
Address stack_frame_pointer,
TranslationArrayIterator* iterator,
- FixedArray literal_array, RegisterValues* registers,
- FILE* trace_file, int formal_parameter_count,
+ DeoptimizationLiteralArray literal_array,
+ RegisterValues* registers, FILE* trace_file,
+ int formal_parameter_count,
int actual_argument_count) {
DCHECK(frames_.empty());
@@ -1843,9 +1843,10 @@ void TranslatedState::InitializeJSObjectAt(
Handle<Map> map, const DisallowGarbageCollection& no_gc) {
Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_);
DCHECK_EQ(TranslatedValue::kCapturedObject, slot->kind());
+ int children_count = slot->GetChildrenCount();
// The object should have at least a map and some payload.
- CHECK_GE(slot->GetChildrenCount(), 2);
+ CHECK_GE(children_count, 2);
// Notify the concurrent marker about the layout change.
isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc);
@@ -1862,8 +1863,8 @@ void TranslatedState::InitializeJSObjectAt(
// For all the other fields we first look at the fixed array and check the
// marker to see if we store an unboxed double.
DCHECK_EQ(kTaggedSize, JSObject::kPropertiesOrHashOffset);
- for (int i = 2; i < slot->GetChildrenCount(); i++) {
- TranslatedValue* slot = GetResolvedSlotAndAdvance(frame, value_index);
+ for (int i = 2; i < children_count; i++) {
+ slot = GetResolvedSlotAndAdvance(frame, value_index);
// Read out the marker and ensure the field is consistent with
// what the markers in the storage say (note that all heap numbers
// should be fully initialized by now).
@@ -1889,10 +1890,11 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
TranslatedFrame* frame, int* value_index, TranslatedValue* slot,
Handle<Map> map, const DisallowGarbageCollection& no_gc) {
Handle<HeapObject> object_storage = Handle<HeapObject>::cast(slot->storage_);
+ int children_count = slot->GetChildrenCount();
// Skip the writes if we already have the canonical empty fixed array.
if (*object_storage == ReadOnlyRoots(isolate()).empty_fixed_array()) {
- CHECK_EQ(2, slot->GetChildrenCount());
+ CHECK_EQ(2, children_count);
Handle<Object> length_value = GetValueAndAdvance(frame, value_index);
CHECK_EQ(*length_value, Smi::FromInt(0));
return;
@@ -1902,8 +1904,8 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
isolate()->heap()->NotifyObjectLayoutChange(*object_storage, no_gc);
// Write the fields to the object.
- for (int i = 1; i < slot->GetChildrenCount(); i++) {
- TranslatedValue* slot = GetResolvedSlotAndAdvance(frame, value_index);
+ for (int i = 1; i < children_count; i++) {
+ slot = GetResolvedSlotAndAdvance(frame, value_index);
int offset = i * kTaggedSize;
uint8_t marker = object_storage->ReadField<uint8_t>(offset);
Handle<Object> field_value;
@@ -2118,9 +2120,9 @@ bool TranslatedState::DoUpdateFeedback() {
return false;
}
-void TranslatedState::ReadUpdateFeedback(TranslationArrayIterator* iterator,
- FixedArray literal_array,
- FILE* trace_file) {
+void TranslatedState::ReadUpdateFeedback(
+ TranslationArrayIterator* iterator,
+ DeoptimizationLiteralArray literal_array, FILE* trace_file) {
CHECK_EQ(TranslationOpcode::UPDATE_FEEDBACK,
TranslationOpcodeFromInt(iterator->Next()));
feedback_vector_ = FeedbackVector::cast(literal_array.get(iterator->Next()));
diff --git a/deps/v8/src/deoptimizer/translated-state.h b/deps/v8/src/deoptimizer/translated-state.h
index 799cb5b18c..6fd2936520 100644
--- a/deps/v8/src/deoptimizer/translated-state.h
+++ b/deps/v8/src/deoptimizer/translated-state.h
@@ -30,7 +30,7 @@ class TranslatedState;
void TranslationArrayPrintSingleFrame(std::ostream& os,
TranslationArray translation_array,
int translation_index,
- FixedArray literal_array);
+ DeoptimizationLiteralArray literal_array);
// The Translated{Value,Frame,State} class hierarchy are a set of utility
// functions to work with the combination of translations (built from a
@@ -382,7 +382,7 @@ class TranslatedState {
void Init(Isolate* isolate, Address input_frame_pointer,
Address stack_frame_pointer, TranslationArrayIterator* iterator,
- FixedArray literal_array, RegisterValues* registers,
+ DeoptimizationLiteralArray literal_array, RegisterValues* registers,
FILE* trace_file, int parameter_count, int actual_argument_count);
void VerifyMaterializedObjects();
@@ -397,13 +397,14 @@ class TranslatedState {
// details, see the code around ReplaceElementsArrayWithCopy.
enum Purpose { kDeoptimization, kFrameInspection };
- TranslatedFrame CreateNextTranslatedFrame(TranslationArrayIterator* iterator,
- FixedArray literal_array,
- Address fp, FILE* trace_file);
+ TranslatedFrame CreateNextTranslatedFrame(
+ TranslationArrayIterator* iterator,
+ DeoptimizationLiteralArray literal_array, Address fp, FILE* trace_file);
int CreateNextTranslatedValue(int frame_index,
TranslationArrayIterator* iterator,
- FixedArray literal_array, Address fp,
- RegisterValues* registers, FILE* trace_file);
+ DeoptimizationLiteralArray literal_array,
+ Address fp, RegisterValues* registers,
+ FILE* trace_file);
Address DecompressIfNeeded(intptr_t value);
void CreateArgumentsElementsTranslatedValues(int frame_index,
Address input_frame_pointer,
@@ -439,7 +440,8 @@ class TranslatedState {
Handle<Map> map, const DisallowGarbageCollection& no_gc);
void ReadUpdateFeedback(TranslationArrayIterator* iterator,
- FixedArray literal_array, FILE* trace_file);
+ DeoptimizationLiteralArray literal_array,
+ FILE* trace_file);
TranslatedValue* ResolveCapturedObject(TranslatedValue* slot);
TranslatedValue* GetValueByObjectIndex(int object_index);
diff --git a/deps/v8/src/diagnostics/basic-block-profiler.cc b/deps/v8/src/diagnostics/basic-block-profiler.cc
index 20b6e567ea..ded3e544ec 100644
--- a/deps/v8/src/diagnostics/basic-block-profiler.cc
+++ b/deps/v8/src/diagnostics/basic-block-profiler.cc
@@ -174,8 +174,8 @@ std::vector<bool> BasicBlockProfiler::GetCoverageBitmap(Isolate* isolate) {
for (int i = 0; i < list_length; ++i) {
BasicBlockProfilerData data(
OnHeapBasicBlockProfilerData::cast(list.Get(i)));
- for (size_t i = 0; i < data.n_blocks(); ++i) {
- out.push_back(data.counts_[i] > 0);
+ for (size_t j = 0; j < data.n_blocks(); ++j) {
+ out.push_back(data.counts_[j] > 0);
}
}
return out;
diff --git a/deps/v8/src/diagnostics/disassembler.cc b/deps/v8/src/diagnostics/disassembler.cc
index ddf990da8d..81b5893226 100644
--- a/deps/v8/src/diagnostics/disassembler.cc
+++ b/deps/v8/src/diagnostics/disassembler.cc
@@ -302,8 +302,8 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
CodeCommentsIterator cit(code.code_comments(), code.code_comments_size());
// Relocation exists if we either have no isolate (wasm code),
// or we have an isolate and it is not an off-heap instruction stream.
- if (!isolate ||
- !InstructionStream::PcIsOffHeap(isolate, bit_cast<Address>(begin))) {
+ if (!isolate || !OffHeapInstructionStream::PcIsOffHeap(
+ isolate, bit_cast<Address>(begin))) {
it = new RelocIterator(code);
} else {
// No relocation information when printing code stubs.
@@ -421,8 +421,8 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
// bytes, a constant could accidentally match with the bit-pattern checked
// by IsInConstantPool() below.
if (pcs.empty() && !code.is_null() && !decoding_constant_pool) {
- RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc), RelocInfo::NONE,
- 0, Code());
+ RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc),
+ RelocInfo::NO_INFO, 0, Code());
if (dummy_rinfo.IsInConstantPool()) {
Address constant_pool_entry_address =
dummy_rinfo.constant_pool_entry_address();
diff --git a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
index fbcba1a4b2..3817472650 100644
--- a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -10,6 +10,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/strings.h"
+#include "src/codegen/ia32/fma-instr.h"
#include "src/codegen/ia32/sse-instr.h"
#include "src/diagnostics/disasm.h"
@@ -702,66 +703,6 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
- case 0x99:
- AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0xA9:
- AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0xB9:
- AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x9B:
- AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0xAB:
- AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0xBB:
- AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x9D:
- AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0xAD:
- AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0xBD:
- AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x9F:
- AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0xAF:
- AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0xBF:
- AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
case 0xF7:
AppendToBuffer("shlx %s,", NameOfCPURegister(regop));
current += PrintRightOperand(current);
@@ -799,8 +740,35 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AVX2_BROADCAST_LIST(DISASSEMBLE_AVX2_BROADCAST)
#undef DISASSEMBLE_AVX2_BROADCAST
- default:
- UnimplementedInstruction();
+ default: {
+#define DECLARE_FMA_DISASM(instruction, _1, _2, _3, _4, _5, code) \
+ case 0x##code: { \
+ AppendToBuffer(#instruction " %s,%s,", NameOfXMMRegister(regop), \
+ NameOfXMMRegister(vvvv)); \
+ current += PrintRightXMMOperand(current); \
+ break; \
+ }
+ // Handle all the fma instructions here in the default branch since they
+ // have the same opcodes but differ by rex_w.
+ if (vex_w()) {
+ switch (opcode) {
+ FMA_SS_INSTRUCTION_LIST(DECLARE_FMA_DISASM)
+ FMA_PS_INSTRUCTION_LIST(DECLARE_FMA_DISASM)
+ default: {
+ UnimplementedInstruction();
+ }
+ }
+ } else {
+ switch (opcode) {
+ FMA_SD_INSTRUCTION_LIST(DECLARE_FMA_DISASM)
+ FMA_PD_INSTRUCTION_LIST(DECLARE_FMA_DISASM)
+ default: {
+ UnimplementedInstruction();
+ }
+ }
+ }
+#undef DECLARE_FMA_DISASM
+ }
}
} else if (vex_66() && vex_0f3a()) {
int mod, regop, rm, vvvv = vex_vreg();
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index 07a7eaf987..e32309923d 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -170,22 +170,23 @@ void TaggedIndex::TaggedIndexVerify(Isolate* isolate) {
void HeapObject::HeapObjectVerify(Isolate* isolate) {
CHECK(IsHeapObject());
- VerifyPointer(isolate, map(isolate));
- CHECK(map(isolate).IsMap());
+ PtrComprCageBase cage_base(isolate);
+ VerifyPointer(isolate, map(cage_base));
+ CHECK(map(cage_base).IsMap(cage_base));
- switch (map().instance_type()) {
+ switch (map(cage_base).instance_type()) {
#define STRING_TYPE_CASE(TYPE, size, name, CamelName) case TYPE:
STRING_TYPE_LIST(STRING_TYPE_CASE)
#undef STRING_TYPE_CASE
- if (IsConsString()) {
+ if (IsConsString(cage_base)) {
ConsString::cast(*this).ConsStringVerify(isolate);
- } else if (IsSlicedString()) {
+ } else if (IsSlicedString(cage_base)) {
SlicedString::cast(*this).SlicedStringVerify(isolate);
- } else if (IsThinString()) {
+ } else if (IsThinString(cage_base)) {
ThinString::cast(*this).ThinStringVerify(isolate);
- } else if (IsSeqString()) {
+ } else if (IsSeqString(cage_base)) {
SeqString::cast(*this).SeqStringVerify(isolate);
- } else if (IsExternalString()) {
+ } else if (IsExternalString(cage_base)) {
ExternalString::cast(*this).ExternalStringVerify(isolate);
} else {
String::cast(*this).StringVerify(isolate);
@@ -425,7 +426,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
for (InternalIndex i : map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == PropertyLocation::kField) {
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
Representation r = details.representation();
FieldIndex index = FieldIndex::ForDescriptor(map(), i);
if (COMPRESS_POINTERS_BOOL && index.is_inobject()) {
@@ -616,6 +617,7 @@ void Context::ContextVerify(Isolate* isolate) {
void NativeContext::NativeContextVerify(Isolate* isolate) {
ContextVerify(isolate);
+ CHECK(retained_maps() == Smi::zero() || retained_maps().IsWeakArrayList());
CHECK_EQ(length(), NativeContext::NATIVE_CONTEXT_SLOTS);
CHECK_EQ(kVariableSizeSentinel, map().instance_size());
}
@@ -802,27 +804,27 @@ void String::StringVerify(Isolate* isolate) {
void ConsString::ConsStringVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::ConsStringVerify(*this, isolate);
- CHECK_GE(this->length(), ConsString::kMinLength);
- CHECK(this->length() == this->first().length() + this->second().length());
- if (this->IsFlat()) {
+ CHECK_GE(length(), ConsString::kMinLength);
+ CHECK(length() == first().length() + second().length());
+ if (IsFlat(isolate)) {
// A flat cons can only be created by String::SlowFlatten.
// Afterwards, the first part may be externalized or internalized.
- CHECK(this->first().IsSeqString() || this->first().IsExternalString() ||
- this->first().IsThinString());
+ CHECK(first().IsSeqString() || first().IsExternalString() ||
+ first().IsThinString());
}
}
void ThinString::ThinStringVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::ThinStringVerify(*this, isolate);
- CHECK(this->actual().IsInternalizedString());
- CHECK(this->actual().IsSeqString() || this->actual().IsExternalString());
+ CHECK(actual().IsInternalizedString());
+ CHECK(actual().IsSeqString() || actual().IsExternalString());
}
void SlicedString::SlicedStringVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::SlicedStringVerify(*this, isolate);
- CHECK(!this->parent().IsConsString());
- CHECK(!this->parent().IsSlicedString());
- CHECK_GE(this->length(), SlicedString::kMinLength);
+ CHECK(!parent().IsConsString());
+ CHECK(!parent().IsSlicedString());
+ CHECK_GE(length(), SlicedString::kMinLength);
}
USE_TORQUE_VERIFIER(ExternalString)
@@ -1971,7 +1973,7 @@ bool DescriptorArray::IsSortedNoDuplicates() {
bool TransitionArray::IsSortedNoDuplicates() {
Name prev_key;
- PropertyKind prev_kind = kData;
+ PropertyKind prev_kind = PropertyKind::kData;
PropertyAttributes prev_attributes = NONE;
uint32_t prev_hash = 0;
@@ -1979,7 +1981,7 @@ bool TransitionArray::IsSortedNoDuplicates() {
Name key = GetSortedKey(i);
CHECK(key.HasHashCode());
uint32_t hash = key.hash();
- PropertyKind kind = kData;
+ PropertyKind kind = PropertyKind::kData;
PropertyAttributes attributes = NONE;
if (!TransitionsAccessor::IsSpecialTransition(key.GetReadOnlyRoots(),
key)) {
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index f3a215c8c3..cd44c9ff31 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -56,11 +56,12 @@ namespace {
void PrintHeapObjectHeaderWithoutMap(HeapObject object, std::ostream& os,
const char* id) {
+ PtrComprCageBase cage_base = GetPtrComprCageBaseSlow(object);
os << reinterpret_cast<void*>(object.ptr()) << ": [";
if (id != nullptr) {
os << id;
} else {
- os << object.map().instance_type();
+ os << object.map(cage_base).instance_type();
}
os << "]";
if (ReadOnlyHeap::Contains(object)) {
@@ -101,11 +102,14 @@ void PrintDictionaryContents(std::ostream& os, T dict) {
void HeapObject::PrintHeader(std::ostream& os, const char* id) {
PrintHeapObjectHeaderWithoutMap(*this, os, id);
- if (!IsMap()) os << "\n - map: " << Brief(map());
+ PtrComprCageBase cage_base = GetPtrComprCageBaseSlow(*this);
+ if (!IsMap(cage_base)) os << "\n - map: " << Brief(map(cage_base));
}
void HeapObject::HeapObjectPrint(std::ostream& os) {
- InstanceType instance_type = map().instance_type();
+ PtrComprCageBase cage_base = GetPtrComprCageBaseSlow(*this);
+
+ InstanceType instance_type = map(cage_base).instance_type();
if (instance_type < FIRST_NONSTRING_TYPE) {
String::cast(*this).StringPrint(os);
@@ -264,6 +268,10 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {
case THIN_ONE_BYTE_STRING_TYPE:
case UNCACHED_EXTERNAL_STRING_TYPE:
case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
+ case SHARED_STRING_TYPE:
+ case SHARED_ONE_BYTE_STRING_TYPE:
+ case SHARED_THIN_STRING_TYPE:
+ case SHARED_THIN_ONE_BYTE_STRING_TYPE:
case JS_LAST_DUMMY_API_OBJECT_TYPE:
// TODO(all): Handle these types too.
os << "UNKNOWN TYPE " << map().instance_type();
@@ -1847,8 +1855,13 @@ void WasmContinuationObject::WasmContinuationObjectPrint(std::ostream& os) {
PrintHeader(os, "WasmContinuationObject");
os << "\n - parent: " << parent();
os << "\n - jmpbuf: " << jmpbuf();
- os << "\n - managed_stack: " << managed_stack();
- os << "\n - managed_jmpbuf: " << managed_jmpbuf();
+ os << "\n - stack: " << stack();
+ os << "\n";
+}
+
+void WasmSuspenderObject::WasmSuspenderObjectPrint(std::ostream& os) {
+ PrintHeader(os, "WasmSuspenderObject");
+ os << "\n - continuation: " << continuation();
os << "\n";
}
@@ -1905,8 +1918,7 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) {
// Never called directly, as WasmFunctionData is an "abstract" class.
void WasmFunctionData::WasmFunctionDataPrint(std::ostream& os) {
- os << "\n - target: " << reinterpret_cast<void*>(foreign_address());
- os << "\n - ref: " << Brief(ref());
+ os << "\n - internal: " << Brief(internal());
os << "\n - wrapper_code: " << Brief(TorqueGeneratedClass::wrapper_code());
}
@@ -1923,8 +1935,6 @@ void WasmExportedFunctionData::WasmExportedFunctionDataPrint(std::ostream& os) {
void WasmJSFunctionData::WasmJSFunctionDataPrint(std::ostream& os) {
PrintHeader(os, "WasmJSFunctionData");
WasmFunctionDataPrint(os);
- os << "\n - wasm_to_js_wrapper_code: "
- << Brief(raw_wasm_to_js_wrapper_code());
os << "\n - serialized_return_count: " << serialized_return_count();
os << "\n - serialized_parameter_count: " << serialized_parameter_count();
os << "\n - serialized_signature: " << Brief(serialized_signature());
@@ -1933,12 +1943,21 @@ void WasmJSFunctionData::WasmJSFunctionDataPrint(std::ostream& os) {
void WasmApiFunctionRef::WasmApiFunctionRefPrint(std::ostream& os) {
PrintHeader(os, "WasmApiFunctionRef");
- os << "\n - isolate_root: " << reinterpret_cast<void*>(foreign_address());
+ os << "\n - isolate_root: " << reinterpret_cast<void*>(isolate_root());
os << "\n - native_context: " << Brief(native_context());
os << "\n - callable: " << Brief(callable());
os << "\n";
}
+void WasmInternalFunction::WasmInternalFunctionPrint(std::ostream& os) {
+ PrintHeader(os, "WasmInternalFunction");
+ os << "\n - call target: " << reinterpret_cast<void*>(foreign_address());
+ os << "\n - ref: " << Brief(ref());
+ os << "\n - external: " << Brief(external());
+ os << "\n - code: " << Brief(code());
+ os << "\n";
+}
+
void WasmCapiFunctionData::WasmCapiFunctionDataPrint(std::ostream& os) {
PrintHeader(os, "WasmCapiFunctionData");
WasmFunctionDataPrint(os);
@@ -2793,7 +2812,7 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
if (!isolate->heap()->InSpaceSlow(address, i::CODE_SPACE) &&
!isolate->heap()->InSpaceSlow(address, i::CODE_LO_SPACE) &&
- !i::InstructionStream::PcIsOffHeap(isolate, address) &&
+ !i::OffHeapInstructionStream::PcIsOffHeap(isolate, address) &&
!i::ReadOnlyHeap::Contains(address)) {
i::PrintF(
"%p is not within the current isolate's code, read_only or embedded "
diff --git a/deps/v8/src/diagnostics/perf-jit.cc b/deps/v8/src/diagnostics/perf-jit.cc
index ebffa5ee15..35b47e2b1c 100644
--- a/deps/v8/src/diagnostics/perf-jit.cc
+++ b/deps/v8/src/diagnostics/perf-jit.cc
@@ -393,8 +393,6 @@ void PerfJitLogger::LogWriteDebugInfo(Handle<Code> code,
entry.line_number_ = info.line + 1;
entry.column_ = info.column + 1;
LogWriteBytes(reinterpret_cast<const char*>(&entry), sizeof(entry));
- // The extracted name may point into heap-objects, thus disallow GC.
- DisallowGarbageCollection no_gc;
std::unique_ptr<char[]> name_storage;
base::Vector<const char> name_string =
GetScriptName(info, &name_storage, no_gc);
diff --git a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
index 7d366a6ba1..03868d5357 100644
--- a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
+++ b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
@@ -1150,6 +1150,10 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "ldbrx 'rt, 'ra, 'rb");
return;
}
+ case LHBRX: {
+ Format(instr, "lhbrx 'rt, 'ra, 'rb");
+ return;
+ }
case LWBRX: {
Format(instr, "lwbrx 'rt, 'ra, 'rb");
return;
diff --git a/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc b/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
index dc6d0572ea..02d3bbd9cd 100644
--- a/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
+++ b/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
@@ -398,7 +398,7 @@ void Decoder::PrintRvcImm8B(Instruction* instr) {
void Decoder::PrintRvvVm(Instruction* instr) {
uint8_t imm = instr->RvvVM();
if (imm == 0) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, " vm");
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, " v0.t");
}
}
@@ -1923,6 +1923,9 @@ void Decoder::DecodeRvvIVV(Instruction* instr) {
case RO_V_VSSUB_VV:
Format(instr, "vssub.vv 'vd, 'vs2, 'vs1'vm");
break;
+ case RO_V_VSSUBU_VV:
+ Format(instr, "vssubu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
case RO_V_VMIN_VV:
Format(instr, "vmin.vv 'vd, 'vs2, 'vs1'vm");
break;
@@ -1992,6 +1995,18 @@ void Decoder::DecodeRvvIVV(Instruction* instr) {
case RO_V_VNCLIPU_WV:
Format(instr, "vnclipu.wv 'vd, 'vs2, 'vs1");
break;
+ case RO_V_VSLL_VV:
+ Format(instr, "vsll.vv 'vd, 'vs2, 'vs1");
+ break;
+ case RO_V_VSRL_VV:
+ Format(instr, "vsrl.vv 'vd, 'vs2, 'vs1");
+ break;
+ case RO_V_VSRA_VV:
+ Format(instr, "vsra.vv 'vd, 'vs2, 'vs1");
+ break;
+ case RO_V_VSMUL_VV:
+ Format(instr, "vsmul.vv 'vd, 'vs2, 'vs1");
+ break;
default:
UNSUPPORTED_RISCV();
break;
@@ -2056,6 +2071,9 @@ void Decoder::DecodeRvvIVI(Instruction* instr) {
case RO_V_VSRL_VI:
Format(instr, "vsrl.vi 'vd, 'vs2, 'uimm5'vm");
break;
+ case RO_V_VSRA_VI:
+ Format(instr, "vsra.vi 'vd, 'vs2, 'uimm5'vm");
+ break;
case RO_V_VSLL_VI:
Format(instr, "vsll.vi 'vd, 'vs2, 'uimm5'vm");
break;
@@ -2184,12 +2202,18 @@ void Decoder::DecodeRvvIVX(Instruction* instr) {
case RO_V_VSRL_VX:
Format(instr, "vsrl.vx 'vd, 'vs2, 'rs1");
break;
+ case RO_V_VSRA_VX:
+ Format(instr, "vsra.vx 'vd, 'vs2, 'rs1");
+ break;
case RO_V_VNCLIP_WX:
Format(instr, "vnclip.wx 'vd, 'vs2, 'rs1");
break;
case RO_V_VNCLIPU_WX:
Format(instr, "vnclipu.wx 'vd, 'vs2, 'rs1");
break;
+ case RO_V_VSMUL_VX:
+ Format(instr, "vsmul.vx 'vd, 'vs2, 'vs1");
+ break;
default:
UNSUPPORTED_RISCV();
break;
@@ -2199,6 +2223,14 @@ void Decoder::DecodeRvvIVX(Instruction* instr) {
void Decoder::DecodeRvvMVV(Instruction* instr) {
DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVV);
switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VMUNARY0: {
+ if (instr->Vs1Value() == VID_V) {
+ Format(instr, "vid.v 'rd, 'vs2'vm");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ }
case RO_V_VWXUNARY0:
if (instr->Vs1Value() == 0x0) {
Format(instr, "vmv.x.s 'rd, 'vs2");
@@ -2218,6 +2250,50 @@ void Decoder::DecodeRvvMVV(Instruction* instr) {
case RO_V_VREDMINU:
Format(instr, "vredminu.vs 'vd, 'vs2, 'vs1'vm");
break;
+ case RO_V_VXUNARY0:
+ if (instr->Vs1Value() == 0b00010) {
+ Format(instr, "vzext.vf8 'vd, 'vs2'vm");
+ } else if (instr->Vs1Value() == 0b00011) {
+ Format(instr, "vsext.vf8 'vd, 'vs2'vm");
+ } else if (instr->Vs1Value() == 0b00100) {
+ Format(instr, "vzext.vf4 'vd, 'vs2'vm");
+ } else if (instr->Vs1Value() == 0b00101) {
+ Format(instr, "vsext.vf4 'vd, 'vs2'vm");
+ } else if (instr->Vs1Value() == 0b00110) {
+ Format(instr, "vzext.vf2 'vd, 'vs2'vm");
+ } else if (instr->Vs1Value() == 0b00111) {
+ Format(instr, "vsext.vf2 'vd, 'vs2'vm");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ case RO_V_VWMUL_VV:
+ Format(instr, "vwmul.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VWMULU_VV:
+ Format(instr, "vwmulu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMUL_VV:
+ Format(instr, "vmul.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMULHU_VV:
+ Format(instr, "vmulhu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VDIV_VV:
+ Format(instr, "vdiv.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VDIVU_VV:
+ Format(instr, "vdivu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VWADDU_VV:
+ Format(instr, "vwaddu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VWADD_VV:
+ Format(instr, "vwadd.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VCOMPRESS_VV:
+ Format(instr, "vcompress.vm 'vd, 'vs2, 'vs1'vm");
+ break;
default:
UNSUPPORTED_RISCV();
break;
@@ -2234,6 +2310,33 @@ void Decoder::DecodeRvvMVX(Instruction* instr) {
UNSUPPORTED_RISCV();
}
break;
+ case RO_V_VWMUL_VX:
+ Format(instr, "vwmul.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VWMULU_VX:
+ Format(instr, "vwmulu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMUL_VX:
+ Format(instr, "vmul.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMULHU_VX:
+ Format(instr, "vmulhu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VDIV_VX:
+ Format(instr, "vdiv.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VDIVU_VX:
+ Format(instr, "vdivu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VWADDUW_VX:
+ Format(instr, "vwaddu.wx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VWADDU_VX:
+ Format(instr, "vwaddu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VWADD_VX:
+ Format(instr, "vwadd.vx 'vd, 'vs2, 'rs1'vm");
+ break;
default:
UNSUPPORTED_RISCV();
break;
@@ -2254,12 +2357,33 @@ void Decoder::DecodeRvvFVV(Instruction* instr) {
case VFNCVT_F_F_W:
Format(instr, "vfncvt.f.f.w 'vd, 'vs2'vm");
break;
+ case VFNCVT_X_F_W:
+ Format(instr, "vfncvt.x.f.w 'vd, 'vs2'vm");
+ break;
+ case VFNCVT_XU_F_W:
+ Format(instr, "vfncvt.xu.f.w 'vd, 'vs2'vm");
+ break;
case VFCVT_F_X_V:
Format(instr, "vfcvt.f.x.v 'vd, 'vs2'vm");
break;
case VFCVT_F_XU_V:
Format(instr, "vfcvt.f.xu.v 'vd, 'vs2'vm");
break;
+ case VFWCVT_XU_F_V:
+ Format(instr, "vfwcvt.xu.f.v 'vd, 'vs2'vm");
+ break;
+ case VFWCVT_X_F_V:
+ Format(instr, "vfwcvt.x.f.v 'vd, 'vs2'vm");
+ break;
+ case VFWCVT_F_X_V:
+ Format(instr, "vfwcvt.f.x.v 'vd, 'vs2'vm");
+ break;
+ case VFWCVT_F_XU_V:
+ Format(instr, "vfwcvt.f.xu.v 'vd, 'vs2'vm");
+ break;
+ case VFWCVT_F_F_V:
+ Format(instr, "vfwcvt.f.f.v 'vd, 'vs2'vm");
+ break;
default:
UNSUPPORTED_RISCV();
break;
@@ -2270,6 +2394,9 @@ void Decoder::DecodeRvvFVV(Instruction* instr) {
case VFCLASS_V:
Format(instr, "vfclass.v 'vd, 'vs2'vm");
break;
+ case VFSQRT_V:
+ Format(instr, "vfsqrt.v 'vd, 'vs2'vm");
+ break;
default:
break;
}
@@ -2289,6 +2416,9 @@ void Decoder::DecodeRvvFVV(Instruction* instr) {
case RO_V_VFMAX_VV:
Format(instr, "vfmax.vv 'vd, 'vs2, 'vs1'vm");
break;
+ case RO_V_VFREDMAX_VV:
+ Format(instr, "vfredmax.vs 'vd, 'vs2, 'vs1'vm");
+ break;
case RO_V_VFMIN_VV:
Format(instr, "vfmin.vv 'vd, 'vs2, 'vs1'vm");
break;
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 2a0cf4ff02..d50767421a 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -22,36 +22,6 @@
// This has to come after windows.h.
#include <versionhelpers.h> // For IsWindows8OrGreater().
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index 0dd0e4bdc7..40c435f0f3 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -14,6 +14,7 @@
#include "src/base/memory.h"
#include "src/base/strings.h"
#include "src/base/v8-fallthrough.h"
+#include "src/codegen/x64/fma-instr.h"
#include "src/codegen/x64/register-x64.h"
#include "src/codegen/x64/sse-instr.h"
#include "src/common/globals.h"
@@ -893,96 +894,6 @@ int DisassemblerX64::AVXInstruction(byte* data) {
AppendToBuffer("vbroadcastss %s,", NameOfAVXRegister(regop));
current += PrintRightXMMOperand(current);
break;
- case 0x98:
- AppendToBuffer("vfmadd132p%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0x99:
- AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0xA8:
- AppendToBuffer("vfmadd213p%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0xA9:
- AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0xB8:
- AppendToBuffer("vfmadd231p%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0xB9:
- AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0x9B:
- AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0x9C:
- AppendToBuffer("vfnmadd132p%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0xAB:
- AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0xAC:
- AppendToBuffer("vfnmadd213p%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- break;
- case 0xBB:
- AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0xBC:
- AppendToBuffer("vfnmadd231p%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0x9D:
- AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0xAD:
- AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0xBD:
- AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0x9F:
- AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0xAF:
- AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
- case 0xBF:
- AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
- NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
- current += PrintRightAVXOperand(current);
- break;
case 0xF7:
AppendToBuffer("shlx%c %s,", operand_size_code(),
NameOfCPURegister(regop));
@@ -1022,8 +933,35 @@ int DisassemblerX64::AVXInstruction(byte* data) {
AVX2_BROADCAST_LIST(DISASSEMBLE_AVX2_BROADCAST)
#undef DISASSEMBLE_AVX2_BROADCAST
- default:
- UnimplementedInstruction();
+ default: {
+#define DECLARE_FMA_DISASM(instruction, _1, _2, _3, _4, _5, code) \
+ case 0x##code: { \
+ AppendToBuffer(#instruction " %s,%s,", NameOfAVXRegister(regop), \
+ NameOfAVXRegister(vvvv)); \
+ current += PrintRightAVXOperand(current); \
+ break; \
+ }
+ // Handle all the fma instructions here in the default branch since they
+ // have the same opcodes but differ by rex_w.
+ if (rex_w()) {
+ switch (opcode) {
+ FMA_SS_INSTRUCTION_LIST(DECLARE_FMA_DISASM)
+ FMA_PS_INSTRUCTION_LIST(DECLARE_FMA_DISASM)
+ default: {
+ UnimplementedInstruction();
+ }
+ }
+ } else {
+ switch (opcode) {
+ FMA_SD_INSTRUCTION_LIST(DECLARE_FMA_DISASM)
+ FMA_PD_INSTRUCTION_LIST(DECLARE_FMA_DISASM)
+ default: {
+ UnimplementedInstruction();
+ }
+ }
+ }
+#undef DECLARE_FMA_DISASM
+ }
}
} else if (vex_66() && vex_0f3a()) {
int mod, regop, rm, vvvv = vex_vreg();
@@ -1956,7 +1894,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0xD7) {
current += PrintOperands("pmovmskb", OPER_XMMREG_OP_ORDER, current);
} else {
- const char* mnemonic;
#define SSE2_CASE(instruction, notUsed1, notUsed2, opcode) \
case 0x##opcode: \
mnemonic = "" #instruction; \
diff --git a/deps/v8/src/execution/DEPS b/deps/v8/src/execution/DEPS
new file mode 100644
index 0000000000..39477e262d
--- /dev/null
+++ b/deps/v8/src/execution/DEPS
@@ -0,0 +1,5 @@
+specific_include_rules = {
+ "isolate-data\.h": [
+ "+src/heap/linear-allocation-area.h",
+ ],
+} \ No newline at end of file
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index 8f1feca214..d48789969e 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -14,7 +14,6 @@
#include <cstdarg>
#include <type_traits>
-#include "src/base/lazy-instance.h"
#include "src/base/overflowing-math.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
@@ -503,6 +502,126 @@ void UnsafeDirectGetterCall(int64_t function, int64_t arg0, int64_t arg1) {
target(arg0, arg1);
}
+using MixedRuntimeCall_0 = AnyCType (*)();
+
+#define BRACKETS(ident, N) ident[N]
+
+#define REP_0(expr, FMT)
+#define REP_1(expr, FMT) FMT(expr, 0)
+#define REP_2(expr, FMT) REP_1(expr, FMT), FMT(expr, 1)
+#define REP_3(expr, FMT) REP_2(expr, FMT), FMT(expr, 2)
+#define REP_4(expr, FMT) REP_3(expr, FMT), FMT(expr, 3)
+#define REP_5(expr, FMT) REP_4(expr, FMT), FMT(expr, 4)
+#define REP_6(expr, FMT) REP_5(expr, FMT), FMT(expr, 5)
+#define REP_7(expr, FMT) REP_6(expr, FMT), FMT(expr, 6)
+#define REP_8(expr, FMT) REP_7(expr, FMT), FMT(expr, 7)
+#define REP_9(expr, FMT) REP_8(expr, FMT), FMT(expr, 8)
+#define REP_10(expr, FMT) REP_9(expr, FMT), FMT(expr, 9)
+#define REP_11(expr, FMT) REP_10(expr, FMT), FMT(expr, 10)
+#define REP_12(expr, FMT) REP_11(expr, FMT), FMT(expr, 11)
+#define REP_13(expr, FMT) REP_12(expr, FMT), FMT(expr, 12)
+#define REP_14(expr, FMT) REP_13(expr, FMT), FMT(expr, 13)
+#define REP_15(expr, FMT) REP_14(expr, FMT), FMT(expr, 14)
+#define REP_16(expr, FMT) REP_15(expr, FMT), FMT(expr, 15)
+#define REP_17(expr, FMT) REP_16(expr, FMT), FMT(expr, 16)
+#define REP_18(expr, FMT) REP_17(expr, FMT), FMT(expr, 17)
+#define REP_19(expr, FMT) REP_18(expr, FMT), FMT(expr, 18)
+#define REP_20(expr, FMT) REP_19(expr, FMT), FMT(expr, 19)
+
+#define GEN_MAX_PARAM_COUNT(V) \
+ V(0) \
+ V(1) \
+ V(2) \
+ V(3) \
+ V(4) \
+ V(5) \
+ V(6) \
+ V(7) \
+ V(8) \
+ V(9) \
+ V(10) \
+ V(11) \
+ V(12) \
+ V(13) \
+ V(14) \
+ V(15) \
+ V(16) \
+ V(17) \
+ V(18) \
+ V(19) \
+ V(20)
+
+#define MIXED_RUNTIME_CALL(N) \
+ using MixedRuntimeCall_##N = AnyCType (*)(REP_##N(AnyCType arg, CONCAT));
+
+GEN_MAX_PARAM_COUNT(MIXED_RUNTIME_CALL)
+#undef MIXED_RUNTIME_CALL
+
+#define CALL_ARGS(N) REP_##N(args, BRACKETS)
+#define CALL_TARGET_VARARG(N) \
+ if (signature.ParameterCount() == N) { /* NOLINT */ \
+ MixedRuntimeCall_##N target = \
+ reinterpret_cast<MixedRuntimeCall_##N>(target_address); \
+ result = target(CALL_ARGS(N)); \
+ } else /* NOLINT */
+
+void Simulator::CallAnyCTypeFunction(Address target_address,
+ const EncodedCSignature& signature) {
+ TraceSim("Type: mixed types BUILTIN_CALL\n");
+
+ const int64_t* stack_pointer = reinterpret_cast<int64_t*>(sp());
+ const double* double_stack_pointer = reinterpret_cast<double*>(sp());
+ int num_gp_params = 0, num_fp_params = 0, num_stack_params = 0;
+
+ CHECK_LE(signature.ParameterCount(), kMaxCParameters);
+ static_assert(sizeof(AnyCType) == 8, "AnyCType is assumed to be 64-bit.");
+ AnyCType args[kMaxCParameters];
+ // The first 8 parameters of each type (GP or FP) are placed in corresponding
+ // registers. The rest are expected to be on the stack, where each parameter
+ // type counts on its own. For example a function like:
+ // foo(int i1, ..., int i9, float f1, float f2) will use up all 8 GP
+ // registers, place i9 on the stack, and place f1 and f2 in FP registers.
+ // Source: https://developer.arm.com/documentation/ihi0055/d/, section
+ // "Parameter Passing".
+ for (int i = 0; i < signature.ParameterCount(); ++i) {
+ if (signature.IsFloat(i)) {
+ if (num_fp_params < 8) {
+ args[i].double_value = dreg(num_fp_params++);
+ } else {
+ args[i].double_value = double_stack_pointer[num_stack_params++];
+ }
+ } else {
+ if (num_gp_params < 8) {
+ args[i].int64_value = xreg(num_gp_params++);
+ } else {
+ args[i].int64_value = stack_pointer[num_stack_params++];
+ }
+ }
+ }
+ AnyCType result;
+ GEN_MAX_PARAM_COUNT(CALL_TARGET_VARARG)
+ /* else */ {
+ UNREACHABLE();
+ }
+ static_assert(20 == kMaxCParameters,
+ "If you've changed kMaxCParameters, please change the "
+ "GEN_MAX_PARAM_COUNT macro.");
+
+#undef CALL_TARGET_VARARG
+#undef CALL_ARGS
+#undef GEN_MAX_PARAM_COUNT
+
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+
+ if (signature.IsReturnFloat()) {
+ set_dreg(0, result.double_value);
+ } else {
+ set_xreg(0, result.int64_value);
+ }
+}
+
void Simulator::DoRuntimeCall(Instruction* instr) {
Redirection* redirection = Redirection::FromInstruction(instr);
@@ -523,6 +642,20 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
FATAL("ALIGNMENT EXCEPTION");
}
+ Address func_addr =
+ reinterpret_cast<Address>(redirection->external_function());
+ SimulatorData* simulator_data = isolate_->simulator_data();
+ DCHECK_NOT_NULL(simulator_data);
+ const EncodedCSignature& signature =
+ simulator_data->GetSignatureForTarget(func_addr);
+ if (signature.IsValid()) {
+ CHECK(redirection->type() == ExternalReference::FAST_C_CALL);
+ CallAnyCTypeFunction(external, signature);
+ set_lr(return_address);
+ set_pc(return_address);
+ return;
+ }
+
int64_t* stack_pointer = reinterpret_cast<int64_t*>(sp());
const int64_t arg0 = xreg(0);
@@ -552,17 +685,6 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
TraceSim("Type: Unknown.\n");
UNREACHABLE();
- // FAST_C_CALL is temporarily handled here as well, because we lack
- // proper support for direct C calls with FP params in the simulator.
- // The generic BUILTIN_CALL path assumes all parameters are passed in
- // the GP registers, thus supporting calling the slow callback without
- // crashing. The reason for that is that in the mjsunit tests we check
- // the `fast_c_api.supports_fp_params` (which is false on non-simulator
- // builds for arm/arm64), thus we expect that the slow path will be
- // called. And since the slow path passes the arguments as a `const
- // FunctionCallbackInfo<Value>&` (which is a GP argument), the call is
- // made correctly.
- case ExternalReference::FAST_C_CALL:
case ExternalReference::BUILTIN_CALL:
#if defined(V8_OS_WIN)
{
@@ -6264,4 +6386,6 @@ V8_EXPORT_PRIVATE extern bool _v8_internal_Simulator_ExecDebugCommand(
return simulator->ExecDebugCommand(std::move(command_copy));
}
+#undef BRACKETS
+
#endif // USE_SIMULATOR
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.h b/deps/v8/src/execution/arm64/simulator-arm64.h
index 73f3c2d62c..e06653c1b9 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.h
+++ b/deps/v8/src/execution/arm64/simulator-arm64.h
@@ -2449,6 +2449,9 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
V8_EXPORT_PRIVATE void CallImpl(Address entry, CallArgument* args);
+ void CallAnyCTypeFunction(Address target_address,
+ const EncodedCSignature& signature);
+
// Read floating point return values.
template <typename T>
typename std::enable_if<std::is_floating_point<T>::value, T>::type
diff --git a/deps/v8/src/execution/embedder-state.cc b/deps/v8/src/execution/embedder-state.cc
new file mode 100644
index 0000000000..1c4fa2ff11
--- /dev/null
+++ b/deps/v8/src/execution/embedder-state.cc
@@ -0,0 +1,45 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/embedder-state.h"
+
+#include "src/api/api-inl.h"
+#include "src/base/logging.h"
+
+namespace v8 {
+
+namespace internal {
+
+EmbedderState::EmbedderState(v8::Isolate* isolate, Local<v8::Context> context,
+ EmbedderStateTag tag)
+ : isolate_(reinterpret_cast<i::Isolate*>(isolate)),
+ tag_(tag),
+ previous_embedder_state_(isolate_->current_embedder_state()) {
+ if (!context.IsEmpty()) {
+ native_context_address_ =
+ v8::Utils::OpenHandle(*context)->native_context().address();
+ }
+
+ DCHECK_NE(this, isolate_->current_embedder_state());
+ isolate_->set_current_embedder_state(this);
+}
+
+EmbedderState::~EmbedderState() {
+ DCHECK_EQ(this, isolate_->current_embedder_state());
+ isolate_->set_current_embedder_state(previous_embedder_state_);
+}
+
+void EmbedderState::OnMoveEvent(Address from, Address to) {
+ EmbedderState* state = this;
+ do {
+ if (state->native_context_address_ == from) {
+ native_context_address_ = to;
+ }
+ state = state->previous_embedder_state_;
+ } while (state != nullptr);
+}
+
+} // namespace internal
+
+} // namespace v8
diff --git a/deps/v8/src/execution/embedder-state.h b/deps/v8/src/execution/embedder-state.h
new file mode 100644
index 0000000000..3bd439c1e6
--- /dev/null
+++ b/deps/v8/src/execution/embedder-state.h
@@ -0,0 +1,39 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_EMBEDDER_STATE_H_
+#define V8_EXECUTION_EMBEDDER_STATE_H_
+
+#include "include/v8-local-handle.h"
+#include "src/execution/isolate.h"
+
+namespace v8 {
+
+enum class EmbedderStateTag : uint8_t;
+
+namespace internal {
+class V8_EXPORT_PRIVATE EmbedderState {
+ public:
+ EmbedderState(v8::Isolate* isolate, Local<v8::Context> context,
+ EmbedderStateTag tag);
+
+ ~EmbedderState();
+
+ EmbedderStateTag GetState() const { return tag_; }
+
+ Address native_context_address() const { return native_context_address_; }
+
+ void OnMoveEvent(Address from, Address to);
+
+ private:
+ Isolate* isolate_;
+ EmbedderStateTag tag_;
+ Address native_context_address_ = kNullAddress;
+ EmbedderState* previous_embedder_state_;
+};
+} // namespace internal
+
+} // namespace v8
+
+#endif // V8_EXECUTION_EMBEDDER_STATE_H_
diff --git a/deps/v8/src/execution/encoded-c-signature.cc b/deps/v8/src/execution/encoded-c-signature.cc
new file mode 100644
index 0000000000..45e9c247f1
--- /dev/null
+++ b/deps/v8/src/execution/encoded-c-signature.cc
@@ -0,0 +1,41 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/encoded-c-signature.h"
+
+#include "include/v8-fast-api-calls.h"
+#include "src/base/bits.h"
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+int EncodedCSignature::FPParameterCount() const {
+ CHECK(IsValid());
+ return base::bits::CountPopulation(bitfield_ & ~(1 << kReturnIndex));
+}
+
+EncodedCSignature::EncodedCSignature(const CFunctionInfo* signature) {
+ parameter_count_ = static_cast<int>(signature->ArgumentCount());
+ for (int i = 0; i < parameter_count_; ++i) {
+ if (signature->ArgumentInfo(i).GetSequenceType() ==
+ CTypeInfo::SequenceType::kScalar &&
+ CTypeInfo::IsFloatingPointType(signature->ArgumentInfo(i).GetType())) {
+ SetFloat(i);
+ }
+ }
+ // The struct holding the options of the CFunction (e.g. callback) is not
+ // included in the number of regular parameters, so we add it manually here.
+ if (signature->HasOptions()) {
+ parameter_count_++;
+ }
+ if (signature->ReturnInfo().GetSequenceType() ==
+ CTypeInfo::SequenceType::kScalar &&
+ CTypeInfo::IsFloatingPointType(signature->ReturnInfo().GetType())) {
+ SetFloat(EncodedCSignature::kReturnIndex);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution/encoded-c-signature.h b/deps/v8/src/execution/encoded-c-signature.h
new file mode 100644
index 0000000000..c53283476d
--- /dev/null
+++ b/deps/v8/src/execution/encoded-c-signature.h
@@ -0,0 +1,60 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_ENCODED_C_SIGNATURE_H_
+#define V8_EXECUTION_ENCODED_C_SIGNATURE_H_
+
+#include <stdint.h>
+
+namespace v8 {
+class CFunctionInfo;
+
+namespace internal {
+
+namespace compiler {
+class CallDescriptor;
+} // namespace compiler
+
+// This structure represents whether the parameters for a given function
+// should be read from general purpose or FP registers. parameter_count =
+// kInvalidParamCount represents "invalid" signature, a placeholder for
+// non-existing elements in the mapping.
+struct EncodedCSignature {
+ public:
+ EncodedCSignature() = default;
+ EncodedCSignature(uint32_t bitfield, int parameter_count)
+ : bitfield_(bitfield), parameter_count_(parameter_count) {}
+ explicit EncodedCSignature(int parameter_count)
+ : parameter_count_(parameter_count) {}
+ explicit EncodedCSignature(const CFunctionInfo* signature);
+
+ bool IsFloat(int index) const {
+ return (bitfield_ & (static_cast<uint32_t>(1) << index)) != 0;
+ }
+ bool IsReturnFloat() const { return IsFloat(kReturnIndex); }
+ void SetFloat(int index) { bitfield_ |= (static_cast<uint32_t>(1) << index); }
+
+ bool IsValid() const { return parameter_count_ < kInvalidParamCount; }
+
+ int ParameterCount() const { return parameter_count_; }
+ int FPParameterCount() const;
+
+ static const EncodedCSignature& Invalid() {
+ static EncodedCSignature kInvalid = {0, kInvalidParamCount};
+ return kInvalid;
+ }
+
+ static const int kReturnIndex = 31;
+ static const int kInvalidParamCount = kReturnIndex + 1;
+
+ private:
+ uint32_t bitfield_ = 0; // Bit i is set if floating point, unset if not.
+
+ int parameter_count_ = kInvalidParamCount;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_ENCODED_C_SIGNATURE_H_
diff --git a/deps/v8/src/execution/execution.cc b/deps/v8/src/execution/execution.cc
index f85bfcff22..3683f80a4d 100644
--- a/deps/v8/src/execution/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -57,6 +57,15 @@ struct InvokeParams {
return function->shared().is_script();
}
+ Handle<FixedArray> GetAndResetHostDefinedOptions() {
+ DCHECK(IsScript());
+ DCHECK_EQ(argc, 1);
+ auto options = Handle<FixedArray>::cast(argv[0]);
+ argv = nullptr;
+ argc = 0;
+ return options;
+ }
+
Handle<Object> target;
Handle<Object> receiver;
int argc;
@@ -330,10 +339,9 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
#endif
// Set up a ScriptContext when running scripts that need it.
if (function->shared().needs_script_context()) {
- DCHECK_EQ(params.argc, 1);
Handle<Context> context;
Handle<FixedArray> host_defined_options =
- Handle<FixedArray>::cast(params.argv[0]);
+ const_cast<InvokeParams&>(params).GetAndResetHostDefinedOptions();
if (!NewScriptContext(isolate, function, host_defined_options)
.ToHandle(&context)) {
if (params.message_handling == Execution::MessageHandling::kReport) {
@@ -511,14 +519,15 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
}
// static
-MaybeHandle<Object> Execution::CallScript(
- Isolate* isolate, Handle<JSFunction> script_function,
- Handle<Object> receiver, Handle<FixedArray> host_defined_options) {
+MaybeHandle<Object> Execution::CallScript(Isolate* isolate,
+ Handle<JSFunction> script_function,
+ Handle<Object> receiver,
+ Handle<Object> host_defined_options) {
DCHECK(script_function->shared().is_script());
DCHECK(receiver->IsJSGlobalProxy() || receiver->IsJSGlobalObject());
- Handle<Object> argument = host_defined_options;
- return Invoke(isolate, InvokeParams::SetUpForCall(isolate, script_function,
- receiver, 1, &argument));
+ return Invoke(
+ isolate, InvokeParams::SetUpForCall(isolate, script_function, receiver, 1,
+ &host_defined_options));
}
MaybeHandle<Object> Execution::CallBuiltin(Isolate* isolate,
diff --git a/deps/v8/src/execution/execution.h b/deps/v8/src/execution/execution.h
index 439bf7992c..9cb6ef7253 100644
--- a/deps/v8/src/execution/execution.h
+++ b/deps/v8/src/execution/execution.h
@@ -33,7 +33,7 @@ class Execution final : public AllStatic {
// caller has to provide it at all times.
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object> CallScript(
Isolate* isolate, Handle<JSFunction> callable, Handle<Object> receiver,
- Handle<FixedArray> host_defined_options);
+ Handle<Object> host_defined_options);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> CallBuiltin(
Isolate* isolate, Handle<JSFunction> builtin, Handle<Object> receiver,
diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index d353a7092d..417350d93f 100644
--- a/deps/v8/src/execution/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -204,6 +204,22 @@ class BuiltinFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(2);
};
+class BuiltinWasmWrapperConstants : public TypedFrameConstants {
+ public:
+ // This slot contains the number of slots at the top of the frame that need to
+ // be scanned by the GC.
+ static constexpr int kGCScanSlotCountOffset =
+ TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+};
+
+class ReturnPromiseOnSuspendFrameConstants
+ : public BuiltinWasmWrapperConstants {
+ public:
+ static constexpr int kParamCountOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kSpillAreaSize =
+ -(kParamCountOffset - TypedFrameConstants::kFixedFrameSizeFromFp);
+};
+
class ConstructFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
diff --git a/deps/v8/src/execution/frames-inl.h b/deps/v8/src/execution/frames-inl.h
index ba2a7bce9a..8db9f4dce4 100644
--- a/deps/v8/src/execution/frames-inl.h
+++ b/deps/v8/src/execution/frames-inl.h
@@ -239,6 +239,10 @@ inline WasmToJsFrame::WasmToJsFrame(StackFrameIteratorBase* iterator)
inline JsToWasmFrame::JsToWasmFrame(StackFrameIteratorBase* iterator)
: StubFrame(iterator) {}
+inline ReturnPromiseOnSuspendFrame::ReturnPromiseOnSuspendFrame(
+ StackFrameIteratorBase* iterator)
+ : ExitFrame(iterator) {}
+
inline CWasmEntryFrame::CWasmEntryFrame(StackFrameIteratorBase* iterator)
: StubFrame(iterator) {}
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index 26dbd457b0..b102164b61 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -62,13 +62,19 @@ class StackHandlerIterator {
public:
StackHandlerIterator(const StackFrame* frame, StackHandler* handler)
: limit_(frame->fp()), handler_(handler) {
- // Make sure the handler has already been unwound to this frame.
- DCHECK(frame->sp() <= AddressOf(handler));
#if V8_ENABLE_WEBASSEMBLY
+ // Make sure the handler has already been unwound to this frame. With stack
+ // switching this is not equivalent to the inequality below, because the
+ // frame and the handler could be in different stacks.
+ DCHECK_IMPLIES(!FLAG_experimental_wasm_stack_switching,
+ frame->sp() <= AddressOf(handler));
// For CWasmEntry frames, the handler was registered by the last C++
// frame (Execution::CallWasm), so even though its address is already
// beyond the limit, we know we always want to unwind one handler.
if (frame->is_c_wasm_entry()) handler_ = handler_->next();
+#else
+ // Make sure the handler has already been unwound to this frame.
+ DCHECK_LE(frame->sp(), AddressOf(handler));
#endif // V8_ENABLE_WEBASSEMBLY
}
@@ -103,6 +109,13 @@ StackFrameIterator::StackFrameIterator(Isolate* isolate, ThreadLocalTop* t)
: StackFrameIteratorBase(isolate, true) {
Reset(t);
}
+#if V8_ENABLE_WEBASSEMBLY
+StackFrameIterator::StackFrameIterator(Isolate* isolate,
+ wasm::StackMemory* stack)
+ : StackFrameIterator(isolate) {
+ Reset(isolate->thread_local_top(), stack);
+}
+#endif
void StackFrameIterator::Advance() {
DCHECK(!done());
@@ -122,8 +135,14 @@ void StackFrameIterator::Advance() {
frame_ = SingletonFor(type, &state);
// When we're done iterating over the stack frames, the handler
- // chain must have been completely unwound.
- DCHECK(!done() || handler_ == nullptr);
+ // chain must have been completely unwound. Except for wasm stack-switching:
+ // we stop at the end of the current segment.
+#if V8_ENABLE_WEBASSEMBLY
+ DCHECK_IMPLIES(done() && !FLAG_experimental_wasm_stack_switching,
+ handler_ == nullptr);
+#else
+ DCHECK_IMPLIES(done(), handler_ == nullptr);
+#endif
}
StackFrame* StackFrameIterator::Reframe() {
@@ -140,6 +159,19 @@ void StackFrameIterator::Reset(ThreadLocalTop* top) {
frame_ = SingletonFor(type, &state);
}
+#if V8_ENABLE_WEBASSEMBLY
+void StackFrameIterator::Reset(ThreadLocalTop* top, wasm::StackMemory* stack) {
+ if (stack->jmpbuf()->sp == stack->base()) {
+ // Empty stack.
+ return;
+ }
+ StackFrame::State state;
+ ReturnPromiseOnSuspendFrame::GetStateForJumpBuffer(stack->jmpbuf(), &state);
+ handler_ = StackHandler::FromAddress(Isolate::handler(top));
+ frame_ = SingletonFor(StackFrame::RETURN_PROMISE_ON_SUSPEND, &state);
+}
+#endif
+
StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type,
StackFrame::State* state) {
StackFrame* result = SingletonFor(type);
@@ -249,7 +281,7 @@ namespace {
bool IsInterpreterFramePc(Isolate* isolate, Address pc,
StackFrame::State* state) {
- Builtin builtin = InstructionStream::TryLookupCode(isolate, pc);
+ Builtin builtin = OffHeapInstructionStream::TryLookupCode(isolate, pc);
if (builtin != Builtin::kNoBuiltinId &&
(builtin == Builtin::kInterpreterEntryTrampoline ||
builtin == Builtin::kInterpreterEnterAtBytecode ||
@@ -543,7 +575,7 @@ void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
holder.GetHeap()->GcSafeCodeContains(holder, old_pc));
unsigned pc_offset = holder.GetOffsetFromInstructionStart(isolate_, old_pc);
Object code = holder;
- v->VisitRootPointer(Root::kStackRoots, nullptr, FullObjectSlot(&code));
+ v->VisitRunningCode(FullObjectSlot(&code));
if (code == holder) return;
holder = Code::unchecked_cast(code);
Address pc = holder.InstructionStart(isolate_, old_pc) + pc_offset;
@@ -562,7 +594,12 @@ void StackFrame::SetReturnAddressLocationResolver(
StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
State* state) {
- DCHECK_NE(state->fp, kNullAddress);
+#if V8_ENABLE_WEBASSEMBLY
+ if (state->fp == kNullAddress) {
+ DCHECK(FLAG_experimental_wasm_stack_switching);
+ return NO_FRAME_TYPE;
+ }
+#endif
MSAN_MEMORY_IS_INITIALIZED(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset,
@@ -680,6 +717,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case WASM_EXIT:
case WASM_DEBUG_BREAK:
case JS_TO_WASM:
+ case RETURN_PROMISE_ON_SUSPEND:
#endif // V8_ENABLE_WEBASSEMBLY
return candidate;
case OPTIMIZED:
@@ -791,11 +829,11 @@ StackFrame::Type ExitFrame::ComputeFrameType(Address fp) {
StackFrame::Type frame_type = static_cast<StackFrame::Type>(marker_int >> 1);
switch (frame_type) {
case BUILTIN_EXIT:
- return BUILTIN_EXIT;
#if V8_ENABLE_WEBASSEMBLY
case WASM_EXIT:
- return WASM_EXIT;
+ case RETURN_PROMISE_ON_SUSPEND:
#endif // V8_ENABLE_WEBASSEMBLY
+ return frame_type;
default:
return EXIT;
}
@@ -938,8 +976,16 @@ int CommonFrame::ComputeExpressionsCount() const {
}
void CommonFrame::ComputeCallerState(State* state) const {
- state->sp = caller_sp();
state->fp = caller_fp();
+#if V8_ENABLE_WEBASSEMBLY
+ if (state->fp == kNullAddress) {
+ // An empty FP signals the first frame of a stack segment. The caller is
+ // on a different stack, or is unbound (suspended stack).
+ DCHECK(FLAG_experimental_wasm_stack_switching);
+ return;
+ }
+#endif
+ state->sp = caller_sp();
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(ComputePCAddress(fp())));
state->callee_fp = fp();
@@ -994,8 +1040,8 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
entry->code.GetSafepointEntry(isolate(), inner_pointer);
DCHECK(entry->safepoint_entry.is_valid());
} else {
- DCHECK(entry->safepoint_entry.Equals(
- entry->code.GetSafepointEntry(isolate(), inner_pointer)));
+ DCHECK_EQ(entry->safepoint_entry,
+ entry->code.GetSafepointEntry(isolate(), inner_pointer));
}
code = entry->code;
@@ -1036,6 +1082,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
case CONSTRUCT:
#if V8_ENABLE_WEBASSEMBLY
case JS_TO_WASM:
+ case RETURN_PROMISE_ON_SUSPEND:
case C_WASM_ENTRY:
case WASM_DEBUG_BREAK:
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1088,10 +1135,10 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
// Visit pointer spill slots and locals.
DCHECK_GE((stack_slots + kBitsPerByte) / kBitsPerByte,
- safepoint_entry.entry_size());
+ safepoint_entry.tagged_slots().size());
int slot_offset = 0;
PtrComprCageBase cage_base(isolate());
- for (uint8_t bits : safepoint_entry.iterate_bits()) {
+ for (uint8_t bits : safepoint_entry.tagged_slots()) {
while (bits) {
int bit = base::bits::CountTrailingZeros(bits);
bits &= ~(1 << bit);
@@ -1124,10 +1171,15 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
// We don't need to update smi values or full pointers.
*spill_slot.location() =
DecompressTaggedPointer(cage_base, static_cast<Tagged_t>(value));
- // Ensure that the spill slot contains correct heap object.
- DCHECK(HeapObject::cast(Object(*spill_slot.location()))
- .map(cage_base)
- .IsMap());
+ if (DEBUG_BOOL) {
+ // Ensure that the spill slot contains correct heap object.
+ HeapObject raw = HeapObject::cast(Object(*spill_slot.location()));
+ MapWord map_word = raw.map_word(cage_base, kRelaxedLoad);
+ HeapObject forwarded = map_word.IsForwardingAddress()
+ ? map_word.ToForwardingAddress()
+ : raw;
+ CHECK(forwarded.map(cage_base).IsMap());
+ }
}
} else {
Tagged_t compressed_value =
@@ -1635,9 +1687,9 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
return JavaScriptFrame::Summarize(frames);
}
- int deopt_index = Safepoint::kNoDeoptimizationIndex;
+ int deopt_index = SafepointEntry::kNoDeoptIndex;
DeoptimizationData const data = GetDeoptimizationData(&deopt_index);
- if (deopt_index == Safepoint::kNoDeoptimizationIndex) {
+ if (deopt_index == SafepointEntry::kNoDeoptIndex) {
CHECK(data.is_null());
FATAL("Missing deoptimization information for OptimizedFrame::Summarize.");
}
@@ -1747,7 +1799,7 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData(
*deopt_index = safepoint_entry.deoptimization_index();
return DeoptimizationData::cast(code.deoptimization_data());
}
- *deopt_index = Safepoint::kNoDeoptimizationIndex;
+ *deopt_index = SafepointEntry::kNoDeoptIndex;
return DeoptimizationData();
}
@@ -1764,11 +1816,11 @@ void OptimizedFrame::GetFunctions(
}
DisallowGarbageCollection no_gc;
- int deopt_index = Safepoint::kNoDeoptimizationIndex;
+ int deopt_index = SafepointEntry::kNoDeoptIndex;
DeoptimizationData const data = GetDeoptimizationData(&deopt_index);
DCHECK(!data.is_null());
- DCHECK_NE(Safepoint::kNoDeoptimizationIndex, deopt_index);
- FixedArray const literal_array = data.LiteralArray();
+ DCHECK_NE(SafepointEntry::kNoDeoptIndex, deopt_index);
+ DeoptimizationLiteralArray const literal_array = data.LiteralArray();
TranslationArrayIterator it(data.TranslationByteArray(),
data.TranslationIndex(deopt_index).value());
@@ -2035,12 +2087,11 @@ void WasmDebugBreakFrame::Iterate(RootVisitor* v) const {
DCHECK(code);
SafepointTable table(code);
SafepointEntry safepoint_entry = table.FindEntry(caller_pc());
- if (!safepoint_entry.has_register_bits()) return;
- uint32_t register_bits = safepoint_entry.register_bits();
+ uint32_t tagged_register_indexes = safepoint_entry.tagged_register_indexes();
- while (register_bits != 0) {
- int reg_code = base::bits::CountTrailingZeros(register_bits);
- register_bits &= ~(1 << reg_code);
+ while (tagged_register_indexes != 0) {
+ int reg_code = base::bits::CountTrailingZeros(tagged_register_indexes);
+ tagged_register_indexes &= ~(1 << reg_code);
FullObjectSlot spill_slot(&Memory<Address>(
fp() +
WasmDebugBreakFrameConstants::GetPushedGpRegisterOffset(reg_code)));
@@ -2079,10 +2130,10 @@ void JsToWasmFrame::Iterate(RootVisitor* v) const {
IterateCompiledFrame(v);
return;
}
- // The [fp - 2*kSystemPointerSize] on the stack is a value indicating how
- // many values should be scanned from the top.
- intptr_t scan_count =
- *reinterpret_cast<intptr_t*>(fp() - 2 * kSystemPointerSize);
+ // The [fp + BuiltinFrameConstants::kGCScanSlotCount] on the stack is a value
+ // indicating how many values should be scanned from the top.
+ intptr_t scan_count = *reinterpret_cast<intptr_t*>(
+ fp() + BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
FullObjectSlot spill_slot_base(&Memory<Address>(sp()));
FullObjectSlot spill_slot_limit(
@@ -2091,6 +2142,31 @@ void JsToWasmFrame::Iterate(RootVisitor* v) const {
spill_slot_limit);
}
+void ReturnPromiseOnSuspendFrame::Iterate(RootVisitor* v) const {
+ // See JsToWasmFrame layout.
+ // We cannot DCHECK that the pc matches the expected builtin code here,
+ // because the return address is on a different stack.
+ // The [fp + BuiltinFrameConstants::kGCScanSlotCountOffset] on the stack is a
+ // value indicating how many values should be scanned from the top.
+ intptr_t scan_count = *reinterpret_cast<intptr_t*>(
+ fp() + BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
+
+ FullObjectSlot spill_slot_base(&Memory<Address>(sp()));
+ FullObjectSlot spill_slot_limit(
+ &Memory<Address>(sp() + scan_count * kSystemPointerSize));
+ v->VisitRootPointers(Root::kStackRoots, nullptr, spill_slot_base,
+ spill_slot_limit);
+}
+
+// static
+void ReturnPromiseOnSuspendFrame::GetStateForJumpBuffer(
+ wasm::JumpBuffer* jmpbuf, State* state) {
+ DCHECK_NE(jmpbuf->fp, kNullAddress);
+ DCHECK_EQ(ComputeFrameType(jmpbuf->fp), RETURN_PROMISE_ON_SUSPEND);
+ FillState(jmpbuf->fp, jmpbuf->sp, state);
+ DCHECK_NE(*state->pc_address, kNullAddress);
+}
+
WasmInstanceObject WasmCompileLazyFrame::wasm_instance() const {
return WasmInstanceObject::cast(*wasm_instance_slot());
}
@@ -2285,8 +2361,8 @@ namespace {
// from the embedded builtins start or from respective MemoryChunk.
uint32_t PcAddressForHashing(Isolate* isolate, Address address) {
uint32_t hashable_address;
- if (InstructionStream::TryGetAddressForHashing(isolate, address,
- &hashable_address)) {
+ if (OffHeapInstructionStream::TryGetAddressForHashing(isolate, address,
+ &hashable_address)) {
return hashable_address;
}
return ObjectAddressForHashing(address);
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index 04979509a2..878c265632 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -48,6 +48,8 @@ namespace v8 {
namespace internal {
namespace wasm {
class WasmCode;
+struct JumpBuffer;
+class StackMemory;
} // namespace wasm
// Forward declarations.
@@ -100,6 +102,7 @@ class StackHandler {
IF_WASM(V, WASM, WasmFrame) \
IF_WASM(V, WASM_TO_JS, WasmToJsFrame) \
IF_WASM(V, JS_TO_WASM, JsToWasmFrame) \
+ IF_WASM(V, RETURN_PROMISE_ON_SUSPEND, ReturnPromiseOnSuspendFrame) \
IF_WASM(V, WASM_DEBUG_BREAK, WasmDebugBreakFrame) \
IF_WASM(V, C_WASM_ENTRY, CWasmEntryFrame) \
IF_WASM(V, WASM_EXIT, WasmExitFrame) \
@@ -715,7 +718,8 @@ class ConstructEntryFrame : public EntryFrame {
friend class StackFrameIteratorBase;
};
-// Exit frames are used to exit JavaScript execution and go to C.
+// Exit frames are used to exit JavaScript execution and go to C, or to switch
+// out of the current stack for wasm stack-switching.
class ExitFrame : public TypedFrame {
public:
Type type() const override { return EXIT; }
@@ -1045,6 +1049,19 @@ class JsToWasmFrame : public StubFrame {
friend class StackFrameIteratorBase;
};
+class ReturnPromiseOnSuspendFrame : public ExitFrame {
+ public:
+ Type type() const override { return RETURN_PROMISE_ON_SUSPEND; }
+ void Iterate(RootVisitor* v) const override;
+ static void GetStateForJumpBuffer(wasm::JumpBuffer* jmpbuf, State* state);
+
+ protected:
+ inline explicit ReturnPromiseOnSuspendFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
class CWasmEntryFrame : public StubFrame {
public:
Type type() const override { return C_WASM_ENTRY; }
@@ -1221,6 +1238,11 @@ class StackFrameIterator : public StackFrameIteratorBase {
V8_EXPORT_PRIVATE explicit StackFrameIterator(Isolate* isolate);
// An iterator that iterates over a given thread's stack.
V8_EXPORT_PRIVATE StackFrameIterator(Isolate* isolate, ThreadLocalTop* t);
+#if V8_ENABLE_WEBASSEMBLY
+ // An iterator that iterates over a given wasm stack segment.
+ V8_EXPORT_PRIVATE StackFrameIterator(Isolate* isolate,
+ wasm::StackMemory* stack);
+#endif
StackFrameIterator(const StackFrameIterator&) = delete;
StackFrameIterator& operator=(const StackFrameIterator&) = delete;
@@ -1235,6 +1257,9 @@ class StackFrameIterator : public StackFrameIteratorBase {
private:
// Go back to the first frame.
void Reset(ThreadLocalTop* top);
+#if V8_ENABLE_WEBASSEMBLY
+ void Reset(ThreadLocalTop* top, wasm::StackMemory* stack);
+#endif
};
// Iterator that supports iterating through all JavaScript frames.
diff --git a/deps/v8/src/execution/isolate-data.h b/deps/v8/src/execution/isolate-data.h
index d0e5aa87d5..ca514657de 100644
--- a/deps/v8/src/execution/isolate-data.h
+++ b/deps/v8/src/execution/isolate-data.h
@@ -10,6 +10,7 @@
#include "src/codegen/external-reference-table.h"
#include "src/execution/stack-guard.h"
#include "src/execution/thread-local-top.h"
+#include "src/heap/linear-allocation-area.h"
#include "src/roots/roots.h"
#include "src/security/external-pointer-table.h"
#include "src/utils/utils.h"
@@ -48,9 +49,22 @@ class Isolate;
builtin_entry_table) \
V(kBuiltinTableOffset, Builtins::kBuiltinCount* kSystemPointerSize, \
builtin_table) \
+ /* Linear allocation areas for the heap's new and old space */ \
+ V(kNewAllocationInfo, LinearAllocationArea::kSize, new_allocation_info) \
+ V(kOldAllocationInfo, LinearAllocationArea::kSize, old_allocation_info) \
+ ISOLATE_DATA_FIELDS_EXTERNAL_CODE_SPACE(V) \
ISOLATE_DATA_FIELDS_HEAP_SANDBOX(V) \
V(kStackIsIterableOffset, kUInt8Size, stack_is_iterable)
+#ifdef V8_EXTERNAL_CODE_SPACE
+#define ISOLATE_DATA_FIELDS_EXTERNAL_CODE_SPACE(V) \
+ V(kBuiltinCodeDataContainerTableOffset, \
+ Builtins::kBuiltinCount* kSystemPointerSize, \
+ builtin_code_data_container_table)
+#else
+#define ISOLATE_DATA_FIELDS_EXTERNAL_CODE_SPACE(V)
+#endif // V8_EXTERNAL_CODE_SPACE
+
#ifdef V8_HEAP_SANDBOX
#define ISOLATE_DATA_FIELDS_HEAP_SANDBOX(V) \
V(kExternalPointerTableOffset, kSystemPointerSize * 3, external_pointer_table)
@@ -104,6 +118,17 @@ class IsolateData final {
Builtins::ToInt(id) * kSystemPointerSize;
}
+ static int BuiltinCodeDataContainerSlotOffset(Builtin id) {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ // TODO(v8:11880): implement table tiering once the builtin table containing
+ // Code objects is no longer used.
+ return builtin_code_data_container_table_offset() +
+ Builtins::ToInt(id) * kSystemPointerSize;
+#else
+ UNREACHABLE();
+#endif // V8_EXTERNAL_CODE_SPACE
+ }
+
#define V(Offset, Size, Name) \
Address Name##_address() { return reinterpret_cast<Address>(&Name##_); }
ISOLATE_DATA_FIELDS(V)
@@ -126,6 +151,13 @@ class IsolateData final {
ThreadLocalTop const& thread_local_top() const { return thread_local_top_; }
Address* builtin_entry_table() { return builtin_entry_table_; }
Address* builtin_table() { return builtin_table_; }
+ Address* builtin_code_data_container_table() {
+#ifdef V8_EXTERNAL_CODE_SPACE
+ return builtin_code_data_container_table_;
+#else
+ UNREACHABLE();
+#endif
+ }
uint8_t stack_is_iterable() const { return stack_is_iterable_; }
// Returns true if this address points to data stored in this instance. If
@@ -201,6 +233,13 @@ class IsolateData final {
// The entries in this array are tagged pointers to Code objects.
Address builtin_table_[Builtins::kBuiltinCount] = {};
+ LinearAllocationArea new_allocation_info_;
+ LinearAllocationArea old_allocation_info_;
+
+#ifdef V8_EXTERNAL_CODE_SPACE
+ Address builtin_code_data_container_table_[Builtins::kBuiltinCount] = {};
+#endif
+
// Table containing pointers to external objects.
#ifdef V8_HEAP_SANDBOX
ExternalPointerTable external_pointer_table_;
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index 48950b673f..9fa950db02 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -34,6 +34,7 @@ NativeContext Isolate::raw_native_context() {
}
void Isolate::set_pending_message(Object message_obj) {
+ DCHECK(message_obj.IsTheHole(this) || message_obj.IsJSMessageObject());
thread_local_top()->pending_message_ = message_obj;
}
diff --git a/deps/v8/src/execution/isolate-utils-inl.h b/deps/v8/src/execution/isolate-utils-inl.h
index 1e91d494aa..d044f3e646 100644
--- a/deps/v8/src/execution/isolate-utils-inl.h
+++ b/deps/v8/src/execution/isolate-utils-inl.h
@@ -97,6 +97,28 @@ V8_INLINE static Isolate* GetIsolateForHeapSandbox(HeapObject object) {
#endif
}
+// This is an external code space friendly version of GetPtrComprCageBase(..)
+// which also works for objects located in external code space.
+//
+// NOTE: it's supposed to be used only for the cases where performance doesn't
+// matter. For example, in debug only code or in debugging macros.
+// In production code the preferred way is to use precomputed cage base value
+// which is a result of PtrComprCageBase{isolate} or GetPtrComprCageBase()
+// applied to a heap object which is known to not be a part of external code
+// space.
+V8_INLINE PtrComprCageBase GetPtrComprCageBaseSlow(HeapObject object) {
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ Isolate* isolate;
+ if (GetIsolateFromHeapObject(object, &isolate)) {
+ return PtrComprCageBase{isolate};
+ }
+ // If the Isolate can't be obtained then the heap object is a read-only
+ // one and therefore not a Code object, so fallback to auto-computing cage
+ // base value.
+ }
+ return GetPtrComprCageBase(object);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index de396e8732..89b0ba3c1a 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -7,6 +7,7 @@
#include <stdlib.h>
#include <atomic>
+#include <cstdint>
#include <fstream>
#include <memory>
#include <sstream>
@@ -55,7 +56,11 @@
#include "src/handles/global-handles-inl.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/local-heap.h"
+#include "src/heap/parked-scope.h"
#include "src/heap/read-only-heap.h"
+#include "src/heap/safepoint.h"
#include "src/ic/stub-cache.h"
#include "src/init/bootstrapper.h"
#include "src/init/setup-isolate.h"
@@ -102,6 +107,8 @@
#include "src/zone/accounting-allocator.h"
#include "src/zone/type-stats.h"
#ifdef V8_INTL_SUPPORT
+#include "src/objects/intl-objects.h"
+#include "unicode/locid.h"
#include "unicode/uobject.h"
#endif // V8_INTL_SUPPORT
@@ -122,6 +129,10 @@
#include "src/heap/conservative-stack-visitor.h"
#endif
+#if USE_SIMULATOR
+#include "src/execution/simulator-base.h"
+#endif
+
extern "C" const uint8_t* v8_Default_embedded_blob_code_;
extern "C" uint32_t v8_Default_embedded_blob_code_size_;
extern "C" const uint8_t* v8_Default_embedded_blob_data_;
@@ -240,7 +251,7 @@ void FreeCurrentEmbeddedBlob() {
CHECK_EQ(StickyEmbeddedBlobCode(), Isolate::CurrentEmbeddedBlobCode());
CHECK_EQ(StickyEmbeddedBlobData(), Isolate::CurrentEmbeddedBlobData());
- InstructionStream::FreeOffHeapInstructionStream(
+ OffHeapInstructionStream::FreeOffHeapOffHeapInstructionStream(
const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobCode()),
Isolate::CurrentEmbeddedBlobCodeSize(),
const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobData()),
@@ -363,8 +374,22 @@ uint32_t Isolate::CurrentEmbeddedBlobDataSize() {
std::memory_order::memory_order_relaxed);
}
+// static
base::AddressRegion Isolate::GetShortBuiltinsCallRegion() {
- DCHECK_LT(CurrentEmbeddedBlobCodeSize(), kShortBuiltinCallsBoundary);
+ // Update calculations below if the assert fails.
+ STATIC_ASSERT(kMaxPCRelativeCodeRangeInMB <= 4096);
+ if (kMaxPCRelativeCodeRangeInMB == 0) {
+ // Return empty region if pc-relative calls/jumps are not supported.
+ return base::AddressRegion(kNullAddress, 0);
+ }
+ constexpr size_t max_size = std::numeric_limits<size_t>::max();
+ if (uint64_t{kMaxPCRelativeCodeRangeInMB} * MB > max_size) {
+ // The whole addressable space is reachable with pc-relative calls/jumps.
+ return base::AddressRegion(kNullAddress, max_size);
+ }
+ constexpr size_t radius = kMaxPCRelativeCodeRangeInMB * MB;
+
+ DCHECK_LT(CurrentEmbeddedBlobCodeSize(), radius);
Address embedded_blob_code_start =
reinterpret_cast<Address>(CurrentEmbeddedBlobCode());
if (embedded_blob_code_start == kNullAddress) {
@@ -374,10 +399,11 @@ base::AddressRegion Isolate::GetShortBuiltinsCallRegion() {
Address embedded_blob_code_end =
embedded_blob_code_start + CurrentEmbeddedBlobCodeSize();
Address region_start =
- (embedded_blob_code_end > kShortBuiltinCallsBoundary)
- ? (embedded_blob_code_end - kShortBuiltinCallsBoundary)
- : 0;
- Address region_end = embedded_blob_code_start + kShortBuiltinCallsBoundary;
+ (embedded_blob_code_end > radius) ? (embedded_blob_code_end - radius) : 0;
+ Address region_end = embedded_blob_code_start + radius;
+ if (region_end < embedded_blob_code_start) {
+ region_end = static_cast<Address>(-1);
+ }
return base::AddressRegion(region_start, region_end - region_start);
}
@@ -542,6 +568,22 @@ void Isolate::Iterate(RootVisitor* v, ThreadLocalTop* thread) {
// Iterate over pointers on native execution stack.
#if V8_ENABLE_WEBASSEMBLY
wasm::WasmCodeRefScope wasm_code_ref_scope;
+ if (FLAG_experimental_wasm_stack_switching) {
+ wasm::StackMemory* current = wasm_stacks_;
+ DCHECK_NOT_NULL(current);
+ do {
+ if (current->IsActive()) {
+ // The active stack's jump buffer does not match the current state, use
+ // the thread info below instead.
+ current = current->next();
+ continue;
+ }
+ for (StackFrameIterator it(this, current); !it.done(); it.Advance()) {
+ it.frame()->Iterate(v);
+ }
+ current = current->next();
+ } while (current != wasm_stacks_);
+ }
#endif // V8_ENABLE_WEBASSEMBLY
for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
it.frame()->Iterate(v);
@@ -589,7 +631,17 @@ Handle<String> Isolate::StackTraceString() {
void Isolate::PushStackTraceAndDie(void* ptr1, void* ptr2, void* ptr3,
void* ptr4) {
- StackTraceFailureMessage message(this, ptr1, ptr2, ptr3, ptr4);
+ StackTraceFailureMessage message(this,
+ StackTraceFailureMessage::kIncludeStackTrace,
+ ptr1, ptr2, ptr3, ptr4);
+ message.Print();
+ base::OS::Abort();
+}
+
+void Isolate::PushParamsAndDie(void* ptr1, void* ptr2, void* ptr3, void* ptr4) {
+ StackTraceFailureMessage message(
+ this, StackTraceFailureMessage::kDontIncludeStackTrace, ptr1, ptr2, ptr3,
+ ptr4);
message.Print();
base::OS::Abort();
}
@@ -598,14 +650,14 @@ void StackTraceFailureMessage::Print() volatile {
// Print the details of this failure message object, including its own address
// to force stack allocation.
base::OS::PrintError(
- "Stacktrace:\n ptr1=%p\n ptr2=%p\n ptr3=%p\n ptr4=%p\n "
+ "Stacktrace:\n ptr1=%p\n ptr2=%p\n ptr3=%p\n ptr4=%p\n "
"failure_message_object=%p\n%s",
ptr1_, ptr2_, ptr3_, ptr4_, this, &js_stack_trace_[0]);
}
-StackTraceFailureMessage::StackTraceFailureMessage(Isolate* isolate, void* ptr1,
- void* ptr2, void* ptr3,
- void* ptr4) {
+StackTraceFailureMessage::StackTraceFailureMessage(
+ Isolate* isolate, StackTraceFailureMessage::StackTraceMode mode, void* ptr1,
+ void* ptr2, void* ptr3, void* ptr4) {
isolate_ = isolate;
ptr1_ = ptr1;
ptr2_ = ptr2;
@@ -614,17 +666,20 @@ StackTraceFailureMessage::StackTraceFailureMessage(Isolate* isolate, void* ptr1,
// Write a stracktrace into the {js_stack_trace_} buffer.
const size_t buffer_length = arraysize(js_stack_trace_);
memset(&js_stack_trace_, 0, buffer_length);
- FixedStringAllocator fixed(&js_stack_trace_[0], buffer_length - 1);
- StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
- isolate->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
- // Keeping a reference to the last code objects to increase likelyhood that
- // they get included in the minidump.
- const size_t code_objects_length = arraysize(code_objects_);
- size_t i = 0;
- StackFrameIterator it(isolate);
- for (; !it.done() && i < code_objects_length; it.Advance()) {
- code_objects_[i++] =
- reinterpret_cast<void*>(it.frame()->unchecked_code().ptr());
+ memset(&code_objects_, 0, sizeof(code_objects_));
+ if (mode == kIncludeStackTrace) {
+ FixedStringAllocator fixed(&js_stack_trace_[0], buffer_length - 1);
+ StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
+ isolate->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
+ // Keeping a reference to the last code objects to increase likelyhood that
+ // they get included in the minidump.
+ const size_t code_objects_length = arraysize(code_objects_);
+ size_t i = 0;
+ StackFrameIterator it(isolate);
+ for (; !it.done() && i < code_objects_length; it.Advance()) {
+ code_objects_[i++] =
+ reinterpret_cast<void*>(it.frame()->unchecked_code().ptr());
+ }
}
}
@@ -1703,6 +1758,14 @@ Object Isolate::ReThrow(Object exception) {
return ReadOnlyRoots(heap()).exception();
}
+Object Isolate::ReThrow(Object exception, Object message) {
+ DCHECK(!has_pending_exception());
+ DCHECK(!has_pending_message());
+
+ set_pending_message(message);
+ return ReThrow(exception);
+}
+
namespace {
#if V8_ENABLE_WEBASSEMBLY
// This scope will set the thread-in-wasm flag after the execution of all
@@ -2044,7 +2107,10 @@ Isolate::CatchType ToCatchType(HandlerTable::CatchPrediction prediction) {
Isolate::CatchType Isolate::PredictExceptionCatcher() {
Address external_handler = thread_local_top()->try_catch_handler_address();
- if (IsExternalHandlerOnTop(Object())) return CAUGHT_BY_EXTERNAL;
+ if (TopExceptionHandlerType(Object()) ==
+ ExceptionHandlerType::kExternalTryCatch) {
+ return CAUGHT_BY_EXTERNAL;
+ }
// Search for an exception handler by performing a full walk over the stack.
for (StackFrameIterator iter(this); !iter.done(); iter.Advance()) {
@@ -2112,7 +2178,8 @@ void Isolate::ScheduleThrow(Object exception) {
// When scheduling a throw we first throw the exception to get the
// error reporting if it is uncaught before rescheduling it.
Throw(exception);
- PropagatePendingExceptionToExternalTryCatch();
+ PropagatePendingExceptionToExternalTryCatch(
+ TopExceptionHandlerType(pending_exception()));
if (has_pending_exception()) {
set_scheduled_exception(pending_exception());
thread_local_top()->external_caught_exception_ = false;
@@ -2284,44 +2351,25 @@ Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
stack_trace_object);
}
-bool Isolate::IsJavaScriptHandlerOnTop(Object exception) {
+Isolate::ExceptionHandlerType Isolate::TopExceptionHandlerType(
+ Object exception) {
DCHECK_NE(ReadOnlyRoots(heap()).the_hole_value(), exception);
- // For uncatchable exceptions, the JavaScript handler cannot be on top.
- if (!is_catchable_by_javascript(exception)) return false;
-
- // Get the top-most JS_ENTRY handler, cannot be on top if it doesn't exist.
- Address entry_handler = Isolate::handler(thread_local_top());
- if (entry_handler == kNullAddress) return false;
-
- // Get the address of the external handler so we can compare the address to
- // determine which one is closer to the top of the stack.
+ Address js_handler = Isolate::handler(thread_local_top());
Address external_handler = thread_local_top()->try_catch_handler_address();
- if (external_handler == kNullAddress) return true;
- // The exception has been externally caught if and only if there is an
- // external handler which is on top of the top-most JS_ENTRY handler.
- //
- // Note, that finally clauses would re-throw an exception unless it's aborted
- // by jumps in control flow (like return, break, etc.) and we'll have another
- // chance to set proper v8::TryCatch later.
- return (entry_handler < external_handler);
-}
-
-bool Isolate::IsExternalHandlerOnTop(Object exception) {
- DCHECK_NE(ReadOnlyRoots(heap()).the_hole_value(), exception);
-
- // Get the address of the external handler so we can compare the address to
- // determine which one is closer to the top of the stack.
- Address external_handler = thread_local_top()->try_catch_handler_address();
- if (external_handler == kNullAddress) return false;
-
- // For uncatchable exceptions, the external handler is always on top.
- if (!is_catchable_by_javascript(exception)) return true;
+ // A handler cannot be on top if it doesn't exist. For uncatchable exceptions,
+ // the JavaScript handler cannot be on top.
+ if (js_handler == kNullAddress || !is_catchable_by_javascript(exception)) {
+ if (external_handler == kNullAddress) {
+ return ExceptionHandlerType::kNone;
+ }
+ return ExceptionHandlerType::kExternalTryCatch;
+ }
- // Get the top-most JS_ENTRY handler, cannot be on top if it doesn't exist.
- Address entry_handler = Isolate::handler(thread_local_top());
- if (entry_handler == kNullAddress) return true;
+ if (external_handler == kNullAddress) {
+ return ExceptionHandlerType::kJavaScriptHandler;
+ }
// The exception has been externally caught if and only if there is an
// external handler which is on top of the top-most JS_ENTRY handler.
@@ -2329,7 +2377,12 @@ bool Isolate::IsExternalHandlerOnTop(Object exception) {
// Note, that finally clauses would re-throw an exception unless it's aborted
// by jumps in control flow (like return, break, etc.) and we'll have another
// chance to set proper v8::TryCatch later.
- return (entry_handler > external_handler);
+ DCHECK_NE(kNullAddress, external_handler);
+ DCHECK_NE(kNullAddress, js_handler);
+ if (external_handler < js_handler) {
+ return ExceptionHandlerType::kExternalTryCatch;
+ }
+ return ExceptionHandlerType::kJavaScriptHandler;
}
std::vector<MemoryRange>* Isolate::GetCodePages() const {
@@ -2347,11 +2400,13 @@ void Isolate::ReportPendingMessages() {
AllowJavascriptExecutionDebugOnly allow_script(this);
Object exception_obj = pending_exception();
+ ExceptionHandlerType top_handler = TopExceptionHandlerType(exception_obj);
// Try to propagate the exception to an external v8::TryCatch handler. If
// propagation was unsuccessful, then we will get another chance at reporting
// the pending message if the exception is re-thrown.
- bool has_been_propagated = PropagatePendingExceptionToExternalTryCatch();
+ bool has_been_propagated =
+ PropagatePendingExceptionToExternalTryCatch(top_handler);
if (!has_been_propagated) return;
// Clear the pending message object early to avoid endless recursion.
@@ -2363,15 +2418,16 @@ void Isolate::ReportPendingMessages() {
if (!is_catchable_by_javascript(exception_obj)) return;
// Determine whether the message needs to be reported to all message handlers
- // depending on whether and external v8::TryCatch or an internal JavaScript
- // handler is on top.
+ // depending on whether the topmost external v8::TryCatch is verbose. We know
+ // there's no JavaScript handler on top; if there was, we would've returned
+ // early.
+ DCHECK_NE(ExceptionHandlerType::kJavaScriptHandler, top_handler);
+
bool should_report_exception;
- if (IsExternalHandlerOnTop(exception_obj)) {
- // Only report the exception if the external handler is verbose.
+ if (top_handler == ExceptionHandlerType::kExternalTryCatch) {
should_report_exception = try_catch_handler()->is_verbose_;
} else {
- // Report the exception if it isn't caught by JavaScript code.
- should_report_exception = !IsJavaScriptHandlerOnTop(exception_obj);
+ should_report_exception = true;
}
// Actually report the pending message to all message handlers.
@@ -2394,7 +2450,8 @@ void Isolate::ReportPendingMessages() {
bool Isolate::OptionalRescheduleException(bool clear_exception) {
DCHECK(has_pending_exception());
- PropagatePendingExceptionToExternalTryCatch();
+ PropagatePendingExceptionToExternalTryCatch(
+ TopExceptionHandlerType(pending_exception()));
bool is_termination_exception =
pending_exception() == ReadOnlyRoots(this).termination_exception();
@@ -2649,16 +2706,15 @@ bool Isolate::AreWasmExceptionsEnabled(Handle<Context> context) {
bool Isolate::IsWasmDynamicTieringEnabled() {
#if V8_ENABLE_WEBASSEMBLY
+ if (FLAG_wasm_dynamic_tiering) return true;
if (wasm_dynamic_tiering_enabled_callback()) {
HandleScope handle_scope(this);
v8::Local<v8::Context> api_context =
v8::Utils::ToLocal(handle(context(), this));
return wasm_dynamic_tiering_enabled_callback()(api_context);
}
- return FLAG_wasm_dynamic_tiering;
-#else
- return false;
#endif // V8_ENABLE_WEBASSEMBLY
+ return false;
}
Handle<Context> Isolate::GetIncumbentContext() {
@@ -2946,6 +3002,8 @@ Isolate* Isolate::NewShared(const v8::Isolate::CreateParams& params) {
// static
Isolate* Isolate::Allocate(bool is_shared) {
+ // v8::V8::Initialize() must be called before creating any isolates.
+ DCHECK_NOT_NULL(V8::GetCurrentPlatform());
// IsolateAllocator allocates the memory for the Isolate object according to
// the given allocation mode.
std::unique_ptr<IsolateAllocator> isolate_allocator =
@@ -2969,6 +3027,8 @@ Isolate* Isolate::Allocate(bool is_shared) {
// static
void Isolate::Delete(Isolate* isolate) {
DCHECK_NOT_NULL(isolate);
+ // v8::V8::Dispose() must only be called after deleting all isolates.
+ DCHECK_NOT_NULL(V8::GetCurrentPlatform());
// Temporarily set this isolate as current so that various parts of
// the isolate can access it in their destructors without having a
// direct pointer. We don't use Enter/Exit here to avoid
@@ -3075,6 +3135,10 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator,
InitializeDefaultEmbeddedBlob();
MicrotaskQueue::SetUpDefaultMicrotaskQueue(this);
+
+ if (is_shared_) {
+ global_safepoint_ = std::make_unique<GlobalSafepoint>(this);
+ }
}
void Isolate::CheckIsolateLayout() {
@@ -3165,7 +3229,7 @@ void Isolate::Deinit() {
}
// All client isolates should already be detached.
- DCHECK_NULL(client_isolate_head_);
+ if (is_shared()) global_safepoint()->AssertNoClients();
if (FLAG_print_deopt_stress) {
PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_);
@@ -3185,11 +3249,33 @@ void Isolate::Deinit() {
// This stops cancelable tasks (i.e. concurrent marking tasks).
// Stop concurrent tasks before destroying resources since they might still
// use those.
- cancelable_task_manager()->CancelAndWait();
+ {
+ IgnoreLocalGCRequests ignore_gc_requests(heap());
+ ParkedScope parked_scope(main_thread_local_heap());
+ cancelable_task_manager()->CancelAndWait();
+ }
+
+ // Cancel all baseline compiler tasks.
+ delete baseline_batch_compiler_;
+ baseline_batch_compiler_ = nullptr;
+
+ if (lazy_compile_dispatcher_) {
+ lazy_compile_dispatcher_->AbortAll();
+ lazy_compile_dispatcher_.reset();
+ }
+
+ // At this point there are no more background threads left in this isolate.
+ heap_.safepoint()->AssertMainThreadIsOnlyThread();
+
+ {
+ // This isolate might have to park for a shared GC initiated by another
+ // client isolate before it can actually detach from the shared isolate.
+ AllowGarbageCollection allow_shared_gc;
+ DetachFromSharedIsolate();
+ }
ReleaseSharedPtrs();
- string_table_.reset();
builtins_.TearDown();
bootstrapper_->TearDown();
@@ -3201,22 +3287,17 @@ void Isolate::Deinit() {
delete heap_profiler_;
heap_profiler_ = nullptr;
- if (lazy_compile_dispatcher_) {
- lazy_compile_dispatcher_->AbortAll();
- lazy_compile_dispatcher_.reset();
- }
+ string_table_.reset();
- delete baseline_batch_compiler_;
- baseline_batch_compiler_ = nullptr;
+#if USE_SIMULATOR
+ delete simulator_data_;
+ simulator_data_ = nullptr;
+#endif
// After all concurrent tasks are stopped, we know for sure that stats aren't
// updated anymore.
DumpAndResetStats();
- main_thread_local_isolate_->heap()->FreeLinearAllocationArea();
-
- DetachFromSharedIsolate();
-
heap_.TearDown();
main_thread_local_isolate_.reset();
@@ -3360,19 +3441,21 @@ void Isolate::SetTerminationOnExternalTryCatch() {
reinterpret_cast<void*>(ReadOnlyRoots(heap()).null_value().ptr());
}
-bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
+bool Isolate::PropagatePendingExceptionToExternalTryCatch(
+ ExceptionHandlerType top_handler) {
Object exception = pending_exception();
- if (IsJavaScriptHandlerOnTop(exception)) {
+ if (top_handler == ExceptionHandlerType::kJavaScriptHandler) {
thread_local_top()->external_caught_exception_ = false;
return false;
}
- if (!IsExternalHandlerOnTop(exception)) {
+ if (top_handler == ExceptionHandlerType::kNone) {
thread_local_top()->external_caught_exception_ = false;
return true;
}
+ DCHECK_EQ(ExceptionHandlerType::kExternalTryCatch, top_handler);
thread_local_top()->external_caught_exception_ = true;
if (!is_catchable_by_javascript(exception)) {
SetTerminationOnExternalTryCatch();
@@ -3382,7 +3465,7 @@ bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
pending_message().IsTheHole(this));
handler->can_continue_ = true;
handler->has_terminated_ = false;
- handler->exception_ = reinterpret_cast<void*>(pending_exception().ptr());
+ handler->exception_ = reinterpret_cast<void*>(exception.ptr());
// Propagate to the external try-catch only if we got an actual message.
if (!has_pending_message()) return true;
handler->message_obj_ = reinterpret_cast<void*>(pending_message().ptr());
@@ -3426,6 +3509,9 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
// From this point onwards, the old builtin code object is unreachable and
// will be collected by the next GC.
builtins->set_code(builtin, *trampoline);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ builtins->set_codet(builtin, ToCodeT(*trampoline));
+ }
}
}
@@ -3482,8 +3568,8 @@ void Isolate::CreateAndSetEmbeddedBlob() {
uint32_t code_size;
uint8_t* data;
uint32_t data_size;
- InstructionStream::CreateOffHeapInstructionStream(this, &code, &code_size,
- &data, &data_size);
+ OffHeapInstructionStream::CreateOffHeapOffHeapInstructionStream(
+ this, &code, &code_size, &data, &data_size);
CHECK_EQ(0, current_embedded_blob_refs_);
const uint8_t* const_code = const_cast<const uint8_t*>(code);
@@ -3500,8 +3586,13 @@ void Isolate::CreateAndSetEmbeddedBlob() {
}
void Isolate::MaybeRemapEmbeddedBuiltinsIntoCodeRange() {
- if (!is_short_builtin_calls_enabled() || V8_ENABLE_NEAR_CODE_RANGE_BOOL ||
- !RequiresCodeRange()) {
+ if (!is_short_builtin_calls_enabled() || !RequiresCodeRange()) {
+ return;
+ }
+ if (V8_ENABLE_NEAR_CODE_RANGE_BOOL &&
+ GetShortBuiltinsCallRegion().contains(heap_.code_region())) {
+ // The embedded builtins are within the pc-relative reach from the code
+ // range, so there's no need to remap embedded builtins.
return;
}
@@ -3531,7 +3622,7 @@ void Isolate::TearDownEmbeddedBlob() {
current_embedded_blob_refs_--;
if (current_embedded_blob_refs_ == 0 && enable_embedded_blob_refcounting_) {
// We own the embedded blob and are the last holder. Free it.
- InstructionStream::FreeOffHeapInstructionStream(
+ OffHeapInstructionStream::FreeOffHeapOffHeapInstructionStream(
const_cast<uint8_t*>(CurrentEmbeddedBlobCode()),
embedded_blob_code_size(),
const_cast<uint8_t*>(CurrentEmbeddedBlobData()),
@@ -3674,6 +3765,10 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
}
baseline_batch_compiler_ = new baseline::BaselineBatchCompiler(this);
+#if USE_SIMULATOR
+ simulator_data_ = new SimulatorData;
+#endif
+
// Enable logging before setting up the heap
logger_->SetUp(this);
@@ -3689,22 +3784,31 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
// Create LocalIsolate/LocalHeap for the main thread and set state to Running.
main_thread_local_isolate_.reset(new LocalIsolate(this, ThreadKind::kMain));
- main_thread_local_heap()->Unpark();
+
+ {
+ IgnoreLocalGCRequests ignore_gc_requests(heap());
+ main_thread_local_heap()->Unpark();
+ }
+
+ // Lock clients_mutex_ in order to prevent shared GCs from other clients
+ // during deserialization.
+ base::Optional<base::MutexGuard> clients_guard;
+
+ if (shared_isolate_) {
+ clients_guard.emplace(&shared_isolate_->global_safepoint()->clients_mutex_);
+ }
// The main thread LocalHeap needs to be set up when attaching to the shared
// isolate. Otherwise a global safepoint would find an isolate without
// LocalHeaps and not wait until this thread is ready for a GC.
AttachToSharedIsolate();
- // We need to ensure that we do not let a shared GC run before this isolate is
- // fully set up.
- DisallowSafepoints no_shared_gc;
-
// SetUp the object heap.
DCHECK(!heap_.HasBeenSetUp());
heap_.SetUp(main_thread_local_heap());
ReadOnlyHeap::SetUp(this, read_only_snapshot_data, can_rehash);
- heap_.SetUpSpaces();
+ heap_.SetUpSpaces(&isolate_data_.new_allocation_info_,
+ &isolate_data_.old_allocation_info_);
if (OwnsStringTable()) {
string_table_ = std::make_shared<StringTable>(this);
@@ -3730,14 +3834,14 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
}
}
if (V8_ENABLE_NEAR_CODE_RANGE_BOOL) {
- // When enable short builtin calls by near code range, the
- // code range should be close (<2GB) to the embedded blob to use
- // pc-relative calls.
- is_short_builtin_calls_enabled_ =
+ // The short builtin calls could still be enabled if allocated code range
+ // is close enough to embedded builtins so that the latter could be
+ // reached using pc-relative (short) calls/jumps.
+ is_short_builtin_calls_enabled_ |=
GetShortBuiltinsCallRegion().contains(heap_.code_region());
}
}
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
if (heap_.code_range()) {
code_cage_base_ = GetPtrComprCageBaseAddress(heap_.code_range()->base());
} else {
@@ -3823,7 +3927,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
// If we are deserializing, read the state into the now-empty heap.
{
- CodeSpaceMemoryModificationScope modification_scope(heap());
+ CodePageCollectionMemoryModificationScope modification_scope(heap());
if (create_heap_objects) {
heap_.read_only_space()->ClearStringPaddingIfNeeded();
@@ -3923,6 +4027,25 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
}
+#ifdef V8_ENABLE_WEBASSEMBLY
+ if (FLAG_experimental_wasm_stack_switching) {
+ std::unique_ptr<wasm::StackMemory> stack(
+ wasm::StackMemory::GetCurrentStackView(this));
+ this->wasm_stacks() = stack.get();
+ if (FLAG_trace_wasm_stack_switching) {
+ PrintF("Set up native stack object (limit: %p, base: %p)\n",
+ stack->jslimit(), reinterpret_cast<void*>(stack->base()));
+ }
+ HandleScope scope(this);
+ Handle<WasmContinuationObject> continuation =
+ WasmContinuationObject::New(this, std::move(stack));
+ heap()
+ ->roots_table()
+ .slot(RootIndex::kActiveContinuation)
+ .store(*continuation);
+ }
+#endif
+
initialized_ = true;
return true;
@@ -4075,8 +4198,9 @@ bool Isolate::use_optimizer() {
}
void Isolate::IncreaseTotalRegexpCodeGenerated(Handle<HeapObject> code) {
- DCHECK(code->IsCode() || code->IsByteArray());
- total_regexp_code_generated_ += code->Size();
+ PtrComprCageBase cage_base(this);
+ DCHECK(code->IsCode(cage_base) || code->IsByteArray(cage_base));
+ total_regexp_code_generated_ += code->Size(cage_base);
}
bool Isolate::NeedsDetailedOptimizedCodeLineInfo() const {
@@ -4363,7 +4487,8 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
v8::Local<v8::Context> api_context =
v8::Utils::ToLocal(Handle<Context>::cast(native_context()));
if (host_import_module_dynamically_with_import_assertions_callback_ ==
- nullptr) {
+ nullptr &&
+ host_import_module_dynamically_callback_ == nullptr) {
Handle<Object> exception =
factory()->NewError(error_function(), MessageTemplate::kUnsupported);
return NewRejectedPromise(this, api_context, exception);
@@ -4386,18 +4511,31 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
clear_pending_exception();
return NewRejectedPromise(this, api_context, exception);
}
- // TODO(cbruni, v8:12302): Avoid creating tempory ScriptOrModule objects.
- auto script_or_module = i::Handle<i::ScriptOrModule>::cast(
- this->factory()->NewStruct(i::SCRIPT_OR_MODULE_TYPE));
- script_or_module->set_resource_name(referrer->name());
- script_or_module->set_host_defined_options(referrer->host_defined_options());
- ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
- this, promise,
- host_import_module_dynamically_with_import_assertions_callback_(
- api_context, v8::Utils::ToLocal(script_or_module),
- v8::Utils::ToLocal(specifier_str),
- ToApiHandle<v8::FixedArray>(import_assertions_array)),
- MaybeHandle<JSPromise>());
+ if (host_import_module_dynamically_callback_) {
+ ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
+ this, promise,
+ host_import_module_dynamically_callback_(
+ api_context,
+ v8::Utils::ToLocal(handle(referrer->host_defined_options(), this)),
+ v8::Utils::ToLocal(handle(referrer->name(), this)),
+ v8::Utils::ToLocal(specifier_str),
+ ToApiHandle<v8::FixedArray>(import_assertions_array)),
+ MaybeHandle<JSPromise>());
+ } else {
+ // TODO(cbruni, v8:12302): Avoid creating tempory ScriptOrModule objects.
+ auto script_or_module = i::Handle<i::ScriptOrModule>::cast(
+ this->factory()->NewStruct(i::SCRIPT_OR_MODULE_TYPE));
+ script_or_module->set_resource_name(referrer->name());
+ script_or_module->set_host_defined_options(
+ referrer->host_defined_options());
+ ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(
+ this, promise,
+ host_import_module_dynamically_with_import_assertions_callback_(
+ api_context, v8::Utils::ToLocal(script_or_module),
+ v8::Utils::ToLocal(specifier_str),
+ ToApiHandle<v8::FixedArray>(import_assertions_array)),
+ MaybeHandle<JSPromise>());
+ }
return v8::Utils::OpenHandle(*promise);
}
@@ -4489,7 +4627,14 @@ MaybeHandle<FixedArray> Isolate::GetImportAssertionsFromArgument(
void Isolate::ClearKeptObjects() { heap()->ClearKeptObjects(); }
void Isolate::SetHostImportModuleDynamicallyCallback(
+ HostImportModuleDynamicallyCallback callback) {
+ DCHECK_NULL(host_import_module_dynamically_with_import_assertions_callback_);
+ host_import_module_dynamically_callback_ = callback;
+}
+
+void Isolate::SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyWithImportAssertionsCallback callback) {
+ DCHECK_NULL(host_import_module_dynamically_callback_);
host_import_module_dynamically_with_import_assertions_callback_ = callback;
}
@@ -4895,7 +5040,6 @@ void Isolate::CollectSourcePositionsForAllBytecodeArrays() {
HandleScope scope(this);
std::vector<Handle<SharedFunctionInfo>> sfis;
{
- DisallowGarbageCollection no_gc;
HeapObjectIterator iterator(heap());
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
@@ -4928,6 +5072,32 @@ bool StringEqualsLocales(Isolate* isolate, const std::string& str,
} // namespace
+const std::string& Isolate::DefaultLocale() {
+ if (default_locale_.empty()) {
+ icu::Locale default_locale;
+ // Translate ICU's fallback locale to a well-known locale.
+ if (strcmp(default_locale.getName(), "en_US_POSIX") == 0 ||
+ strcmp(default_locale.getName(), "c") == 0) {
+ set_default_locale("en-US");
+ } else {
+ // Set the locale
+ set_default_locale(default_locale.isBogus()
+ ? "und"
+ : Intl::ToLanguageTag(default_locale).FromJust());
+ }
+ DCHECK(!default_locale_.empty());
+ }
+ return default_locale_;
+}
+
+void Isolate::ResetDefaultLocale() {
+ default_locale_.clear();
+ clear_cached_icu_objects();
+ // We inline fast paths assuming certain locales. Since this path is rarely
+ // taken, we deoptimize everything to keep things simple.
+ Deoptimizer::DeoptimizeAll(this);
+}
+
icu::UMemory* Isolate::get_cached_icu_object(ICUObjectCacheType cache_type,
Handle<Object> locales) {
const ICUObjectCacheEntry& entry =
@@ -5192,7 +5362,7 @@ void Isolate::AttachToSharedIsolate() {
if (shared_isolate_) {
DCHECK(shared_isolate_->is_shared());
- shared_isolate_->AppendAsClientIsolate(this);
+ shared_isolate_->global_safepoint()->AppendClient(this);
}
#if DEBUG
@@ -5204,7 +5374,7 @@ void Isolate::DetachFromSharedIsolate() {
DCHECK(attached_to_shared_isolate_);
if (shared_isolate_) {
- shared_isolate_->RemoveAsClientIsolate(this);
+ shared_isolate_->global_safepoint()->RemoveClient(this);
shared_isolate_ = nullptr;
}
@@ -5213,39 +5383,5 @@ void Isolate::DetachFromSharedIsolate() {
#endif // DEBUG
}
-void Isolate::AppendAsClientIsolate(Isolate* client) {
- base::MutexGuard guard(&client_isolate_mutex_);
-
- DCHECK_NULL(client->prev_client_isolate_);
- DCHECK_NULL(client->next_client_isolate_);
- DCHECK_NE(client_isolate_head_, client);
-
- if (client_isolate_head_) {
- client_isolate_head_->prev_client_isolate_ = client;
- }
-
- client->prev_client_isolate_ = nullptr;
- client->next_client_isolate_ = client_isolate_head_;
-
- client_isolate_head_ = client;
-}
-
-void Isolate::RemoveAsClientIsolate(Isolate* client) {
- base::MutexGuard guard(&client_isolate_mutex_);
-
- if (client->next_client_isolate_) {
- client->next_client_isolate_->prev_client_isolate_ =
- client->prev_client_isolate_;
- }
-
- if (client->prev_client_isolate_) {
- client->prev_client_isolate_->next_client_isolate_ =
- client->next_client_isolate_;
- } else {
- DCHECK_EQ(client_isolate_head_, client);
- client_isolate_head_ = client->next_client_isolate_;
- }
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 8dabf059d6..65a85dac9e 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -50,12 +50,23 @@ class UMemory;
} // namespace U_ICU_NAMESPACE
#endif // V8_INTL_SUPPORT
+#if USE_SIMULATOR
+#include "src/execution/encoded-c-signature.h"
+namespace v8 {
+namespace internal {
+class SimulatorData;
+}
+} // namespace v8
+#endif
+
namespace v8_inspector {
class V8Inspector;
} // namespace v8_inspector
namespace v8 {
+class EmbedderState;
+
namespace base {
class RandomNumberGenerator;
} // namespace base
@@ -95,6 +106,7 @@ class HandleScopeImplementer;
class HeapObjectToIndexHashMap;
class HeapProfiler;
class GlobalHandles;
+class GlobalSafepoint;
class InnerPointerToCodeCache;
class LazyCompileDispatcher;
class LocalIsolate;
@@ -145,6 +157,10 @@ namespace metrics {
class Recorder;
} // namespace metrics
+namespace wasm {
+class StackMemory;
+}
+
#define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
do { \
Isolate* __isolate__ = (isolate); \
@@ -756,8 +772,13 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
inline void clear_scheduled_exception();
inline void set_scheduled_exception(Object exception);
- bool IsJavaScriptHandlerOnTop(Object exception);
- bool IsExternalHandlerOnTop(Object exception);
+ enum class ExceptionHandlerType {
+ kJavaScriptHandler,
+ kExternalTryCatch,
+ kNone
+ };
+
+ ExceptionHandlerType TopExceptionHandlerType(Object exception);
inline bool is_catchable_by_javascript(Object exception);
inline bool is_catchable_by_wasm(Object exception);
@@ -871,6 +892,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void* ptr2 = nullptr,
void* ptr3 = nullptr,
void* ptr4 = nullptr);
+ // Similar to the above but without collecting the stack trace.
+ V8_NOINLINE void PushParamsAndDie(void* ptr1 = nullptr, void* ptr2 = nullptr,
+ void* ptr3 = nullptr, void* ptr4 = nullptr);
Handle<FixedArray> CaptureCurrentStackTrace(
int frame_limit, StackTrace::StackTraceOptions options);
Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
@@ -930,7 +954,10 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Re-throw an exception. This involves no error reporting since error
// reporting was handled when the exception was thrown originally.
+ // The first overload doesn't set the corresponding pending message, which
+ // has to be set separately or be guaranteed to not have changed.
Object ReThrow(Object exception);
+ Object ReThrow(Object exception, Object message);
// Find the correct handler for the current pending exception. This also
// clears and returns the current pending exception.
@@ -1087,7 +1114,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// address of the cage where the code space is allocated. Otherwise, it
// defaults to cage_base().
Address code_cage_base() const {
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
return code_cage_base_;
#else
return cage_base();
@@ -1146,6 +1173,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
V8_INLINE Address* builtin_tier0_table() {
return isolate_data_.builtin_tier0_table();
}
+ V8_INLINE Address* builtin_code_data_container_table() {
+ return isolate_data_.builtin_code_data_container_table();
+ }
bool IsBuiltinTableHandleLocation(Address* handle_location);
@@ -1195,7 +1225,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return descriptor_lookup_cache_;
}
- HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
+ V8_INLINE HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
HandleScopeImplementer* handle_scope_implementer() const {
DCHECK(handle_scope_implementer_);
@@ -1279,6 +1309,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
+ THREAD_LOCAL_TOP_ACCESSOR(EmbedderState*, current_embedder_state)
void SetData(uint32_t slot, void* data) {
DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
@@ -1363,9 +1394,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
#ifdef V8_INTL_SUPPORT
- const std::string& default_locale() { return default_locale_; }
+ const std::string& DefaultLocale();
- void ResetDefaultLocale() { default_locale_.clear(); }
+ void ResetDefaultLocale();
void set_default_locale(const std::string& locale) {
DCHECK_EQ(default_locale_.length(), 0);
@@ -1632,8 +1663,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return V8_SHORT_BUILTIN_CALLS_BOOL && is_short_builtin_calls_enabled_;
}
- // Returns a region from which it's possible to make short calls/jumps to
- // embedded builtins or empty region if there's no embedded blob.
+ // Returns a region from which it's possible to make pc-relative (short)
+ // calls/jumps to embedded builtins or empty region if there's no embedded
+ // blob or if pc-relative calls are not supported.
static base::AddressRegion GetShortBuiltinsCallRegion();
void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
@@ -1689,6 +1721,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyWithImportAssertionsCallback callback);
+ void SetHostImportModuleDynamicallyCallback(
+ HostImportModuleDynamicallyCallback callback);
MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
Handle<Script> referrer, Handle<Object> specifier,
MaybeHandle<Object> maybe_import_assertions_argument);
@@ -1857,19 +1891,17 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
shared_isolate_ = shared_isolate;
}
- bool HasClientIsolates() const { return client_isolate_head_; }
+ GlobalSafepoint* global_safepoint() const { return global_safepoint_.get(); }
- template <typename Callback>
- void IterateClientIsolates(Callback callback) {
- for (Isolate* current = client_isolate_head_; current;
- current = current->next_client_isolate_) {
- callback(current);
- }
- }
+ bool OwnsStringTable() { return !FLAG_shared_string_table || is_shared(); }
- base::Mutex* client_isolate_mutex() { return &client_isolate_mutex_; }
+#if USE_SIMULATOR
+ SimulatorData* simulator_data() { return simulator_data_; }
+#endif
- bool OwnsStringTable() { return !FLAG_shared_string_table || is_shared(); }
+#ifdef V8_ENABLE_WEBASSEMBLY
+ wasm::StackMemory*& wasm_stacks() { return wasm_stacks_; }
+#endif
private:
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator,
@@ -1957,7 +1989,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Propagate pending exception message to the v8::TryCatch.
// If there is no external try-catch or message was successfully propagated,
// then return true.
- bool PropagatePendingExceptionToExternalTryCatch();
+ bool PropagatePendingExceptionToExternalTryCatch(
+ ExceptionHandlerType top_handler);
void RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
Handle<JSPromise> promise);
@@ -1997,10 +2030,6 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void AttachToSharedIsolate();
void DetachFromSharedIsolate();
- // Methods for appending and removing to/from client isolates list.
- void AppendAsClientIsolate(Isolate* client);
- void RemoveAsClientIsolate(Isolate* client);
-
// This class contains a collection of data accessible from both C++ runtime
// and compiled code (including assembly stubs, builtins, interpreter bytecode
// handlers and optimized code).
@@ -2073,6 +2102,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
v8::Isolate::AtomicsWaitCallback atomics_wait_callback_ = nullptr;
void* atomics_wait_callback_data_ = nullptr;
PromiseHook promise_hook_ = nullptr;
+ HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_ =
+ nullptr;
HostImportModuleDynamicallyWithImportAssertionsCallback
host_import_module_dynamically_with_import_assertions_callback_ = nullptr;
std::atomic<debug::CoverageMode> code_coverage_mode_{
@@ -2133,7 +2164,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// favor memory over runtime performance.
bool memory_savings_mode_active_ = false;
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
// Base address of the pointer compression cage containing external code
// space, when external code space is enabled.
Address code_cage_base_ = 0;
@@ -2305,15 +2336,12 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool attached_to_shared_isolate_ = false;
#endif // DEBUG
- // A shared isolate will use these two fields to track all its client
- // isolates.
- base::Mutex client_isolate_mutex_;
- Isolate* client_isolate_head_ = nullptr;
-
- // Used to form a linked list of all client isolates. Protected by
- // client_isolate_mutex_.
- Isolate* prev_client_isolate_ = nullptr;
- Isolate* next_client_isolate_ = nullptr;
+ // Used to track and safepoint all client isolates attached to this shared
+ // isolate.
+ std::unique_ptr<GlobalSafepoint> global_safepoint_;
+ // Client isolates list managed by GlobalSafepoint.
+ Isolate* global_safepoint_prev_client_isolate_ = nullptr;
+ Isolate* global_safepoint_next_client_isolate_ = nullptr;
// A signal-safe vector of heap pages containing code. Used with the
// v8::Unwinder API.
@@ -2323,6 +2351,10 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// The mutex only guards adding pages, the retrieval is signal safe.
base::Mutex code_pages_mutex_;
+#ifdef V8_ENABLE_WEBASSEMBLY
+ wasm::StackMemory* wasm_stacks_;
+#endif
+
// Enables the host application to provide a mechanism for recording a
// predefined set of data as crash keys to be used in postmortem debugging
// in case of a crash.
@@ -2332,7 +2364,12 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Isolate::Delete() are used for Isolate creation and deletion.
void* operator new(size_t, void* ptr) { return ptr; }
+#if USE_SIMULATOR
+ SimulatorData* simulator_data_ = nullptr;
+#endif
+
friend class heap::HeapTester;
+ friend class GlobalSafepoint;
friend class TestSerializer;
};
@@ -2462,9 +2499,11 @@ class StackLimitCheck {
class StackTraceFailureMessage {
public:
- explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
- void* ptr2 = nullptr, void* ptr3 = nullptr,
- void* ptr4 = nullptr);
+ enum StackTraceMode { kIncludeStackTrace, kDontIncludeStackTrace };
+
+ explicit StackTraceFailureMessage(Isolate* isolate, StackTraceMode mode,
+ void* ptr1 = nullptr, void* ptr2 = nullptr,
+ void* ptr3 = nullptr, void* ptr4 = nullptr);
V8_NOINLINE void Print() volatile;
diff --git a/deps/v8/src/execution/local-isolate.cc b/deps/v8/src/execution/local-isolate.cc
index 4e752ed892..c3dd70718b 100644
--- a/deps/v8/src/execution/local-isolate.cc
+++ b/deps/v8/src/execution/local-isolate.cc
@@ -27,7 +27,13 @@ LocalIsolate::LocalIsolate(Isolate* isolate, ThreadKind kind,
runtime_call_stats_(kind == ThreadKind::kMain &&
runtime_call_stats == nullptr
? isolate->counters()->runtime_call_stats()
- : runtime_call_stats) {}
+ : runtime_call_stats)
+#ifdef V8_INTL_SUPPORT
+ ,
+ default_locale_(isolate->DefaultLocale())
+#endif
+{
+}
LocalIsolate::~LocalIsolate() {
if (bigint_processor_) bigint_processor_->Destroy();
@@ -64,5 +70,15 @@ bool StackLimitCheck::HasOverflowed(LocalIsolate* local_isolate) {
return GetCurrentStackPosition() < local_isolate->stack_limit();
}
+#ifdef V8_INTL_SUPPORT
+// WARNING: This might be out-of-sync with the main-thread.
+const std::string& LocalIsolate::DefaultLocale() {
+ const std::string& res =
+ is_main_thread() ? isolate_->DefaultLocale() : default_locale_;
+ DCHECK(!res.empty());
+ return res;
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/local-isolate.h b/deps/v8/src/execution/local-isolate.h
index 3d34017019..a7fa429beb 100644
--- a/deps/v8/src/execution/local-isolate.h
+++ b/deps/v8/src/execution/local-isolate.h
@@ -67,6 +67,17 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
base::SharedMutex* internalized_string_access() {
return isolate_->internalized_string_access();
}
+ const AstStringConstants* ast_string_constants() {
+ return isolate_->ast_string_constants();
+ }
+ LazyCompileDispatcher* lazy_compile_dispatcher() {
+ return isolate_->lazy_compile_dispatcher();
+ }
+ Logger* main_thread_logger() {
+ // TODO(leszeks): This is needed for logging in ParseInfo. Figure out a way
+ // to use the LocalLogger for this instead.
+ return isolate_->logger();
+ }
v8::internal::LocalFactory* factory() {
// Upcast to the privately inherited base-class using c-style casts to avoid
@@ -123,8 +134,14 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
return isolate_->pending_message_address();
}
+#ifdef V8_INTL_SUPPORT
+ // WARNING: This might be out-of-sync with the main-thread.
+ const std::string& DefaultLocale();
+#endif
+
private:
friend class v8::internal::LocalFactory;
+ friend class LocalIsolateFactory;
void InitializeBigIntProcessor();
@@ -140,6 +157,9 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
RuntimeCallStats* runtime_call_stats_;
bigint::Processor* bigint_processor_{nullptr};
+#ifdef V8_INTL_SUPPORT
+ std::string default_locale_;
+#endif
};
template <base::MutexSharedType kIsShared>
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index 10d89ca14e..68a6ccb4b9 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -261,10 +261,10 @@ MaybeHandle<Object> AppendErrorString(Isolate* isolate, Handle<Object> error,
DCHECK(isolate->has_pending_exception());
isolate->clear_pending_exception();
isolate->set_external_caught_exception(false);
- builder->AppendCString("<error>");
+ builder->AppendCStringLiteral("<error>");
} else {
// Formatted thrown exception successfully, append it.
- builder->AppendCString("<error: ");
+ builder->AppendCStringLiteral("<error: ");
builder->AppendString(err_str.ToHandleChecked());
builder->AppendCharacter('>');
}
@@ -369,7 +369,7 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Object);
for (int i = 0; i < elems->length(); ++i) {
- builder.AppendCString("\n at ");
+ builder.AppendCStringLiteral("\n at ");
Handle<StackFrameInfo> frame(StackFrameInfo::cast(elems->get(i)), isolate);
SerializeStackFrameInfo(isolate, frame, &builder);
@@ -389,12 +389,12 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
if (exception_string.is_null()) {
// Formatting the thrown exception threw again, give up.
- builder.AppendCString("<error>");
+ builder.AppendCStringLiteral("<error>");
} else {
// Formatted thrown exception successfully, append it.
- builder.AppendCString("<error: ");
+ builder.AppendCStringLiteral("<error: ");
builder.AppendString(exception_string.ToHandleChecked());
- builder.AppendCString("<error>");
+ builder.AppendCStringLiteral("<error>");
}
}
}
@@ -658,7 +658,7 @@ MaybeHandle<String> ErrorUtils::ToString(Isolate* isolate,
// the code unit 0x0020 (SPACE), and msg.
IncrementalStringBuilder builder(isolate);
builder.AppendString(name);
- builder.AppendCString(": ");
+ builder.AppendCStringLiteral(": ");
builder.AppendString(msg);
Handle<String> result;
@@ -745,7 +745,7 @@ Handle<String> BuildDefaultCallSite(Isolate* isolate, Handle<Object> object) {
builder.AppendString(Object::TypeOf(isolate, object));
if (object->IsString()) {
- builder.AppendCString(" \"");
+ builder.AppendCStringLiteral(" \"");
Handle<String> string = Handle<String>::cast(object);
// This threshold must be sufficiently far below String::kMaxLength that
// the {builder}'s result can never exceed that limit.
@@ -756,20 +756,17 @@ Handle<String> BuildDefaultCallSite(Isolate* isolate, Handle<Object> object) {
string = isolate->factory()->NewProperSubString(string, 0,
kMaxPrintedStringLength);
builder.AppendString(string);
- builder.AppendCString("<...>");
+ builder.AppendCStringLiteral("<...>");
}
- builder.AppendCString("\"");
+ builder.AppendCStringLiteral("\"");
} else if (object->IsNull(isolate)) {
- builder.AppendCString(" ");
- builder.AppendString(isolate->factory()->null_string());
+ builder.AppendCStringLiteral(" null");
} else if (object->IsTrue(isolate)) {
- builder.AppendCString(" ");
- builder.AppendString(isolate->factory()->true_string());
+ builder.AppendCStringLiteral(" true");
} else if (object->IsFalse(isolate)) {
- builder.AppendCString(" ");
- builder.AppendString(isolate->factory()->false_string());
+ builder.AppendCStringLiteral(" false");
} else if (object->IsNumber()) {
- builder.AppendCString(" ");
+ builder.AppendCharacter(' ');
builder.AppendString(isolate->factory()->NumberToString(object));
}
@@ -782,8 +779,9 @@ Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object,
if (ComputeLocation(isolate, location)) {
UnoptimizedCompileFlags flags = UnoptimizedCompileFlags::ForFunctionCompile(
isolate, *location->shared());
- UnoptimizedCompileState compile_state(isolate);
- ParseInfo info(isolate, flags, &compile_state);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ ParseInfo info(isolate, flags, &compile_state, &reusable_state);
if (parsing::ParseAny(&info, location->shared(), isolate,
parsing::ReportStatisticsMode::kNo)) {
info.ast_value_factory()->Internalize(isolate);
@@ -841,8 +839,9 @@ Object ErrorUtils::ThrowSpreadArgError(Isolate* isolate, MessageTemplate id,
if (ComputeLocation(isolate, &location)) {
UnoptimizedCompileFlags flags = UnoptimizedCompileFlags::ForFunctionCompile(
isolate, *location.shared());
- UnoptimizedCompileState compile_state(isolate);
- ParseInfo info(isolate, flags, &compile_state);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ ParseInfo info(isolate, flags, &compile_state, &reusable_state);
if (parsing::ParseAny(&info, location.shared(), isolate,
parsing::ReportStatisticsMode::kNo)) {
info.ast_value_factory()->Internalize(isolate);
@@ -917,8 +916,9 @@ Object ErrorUtils::ThrowLoadFromNullOrUndefined(Isolate* isolate,
UnoptimizedCompileFlags flags = UnoptimizedCompileFlags::ForFunctionCompile(
isolate, *location.shared());
- UnoptimizedCompileState compile_state(isolate);
- ParseInfo info(isolate, flags, &compile_state);
+ UnoptimizedCompileState compile_state;
+ ReusableUnoptimizedCompileState reusable_state(isolate);
+ ParseInfo info(isolate, flags, &compile_state, &reusable_state);
if (parsing::ParseAny(&info, location.shared(), isolate,
parsing::ReportStatisticsMode::kNo)) {
info.ast_value_factory()->Internalize(isolate);
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index d9dc7813ee..ea81e6b1c0 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -927,16 +927,18 @@ static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
}
// Calls into the V8 runtime.
-using SimulatorRuntimeCall = intptr_t (*)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5,
- intptr_t arg6, intptr_t arg7,
- intptr_t arg8, intptr_t arg9);
-using SimulatorRuntimePairCall = ObjectPair (*)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5,
- intptr_t arg6, intptr_t arg7,
- intptr_t arg8, intptr_t arg9);
+using SimulatorRuntimeCall = intptr_t (*)(
+ intptr_t arg0, intptr_t arg1, intptr_t arg2, intptr_t arg3, intptr_t arg4,
+ intptr_t arg5, intptr_t arg6, intptr_t arg7, intptr_t arg8, intptr_t arg9,
+ intptr_t arg10, intptr_t arg11, intptr_t arg12, intptr_t arg13,
+ intptr_t arg14, intptr_t arg15, intptr_t arg16, intptr_t arg17,
+ intptr_t arg18, intptr_t arg19);
+using SimulatorRuntimePairCall = ObjectPair (*)(
+ intptr_t arg0, intptr_t arg1, intptr_t arg2, intptr_t arg3, intptr_t arg4,
+ intptr_t arg5, intptr_t arg6, intptr_t arg7, intptr_t arg8, intptr_t arg9,
+ intptr_t arg10, intptr_t arg11, intptr_t arg12, intptr_t arg13,
+ intptr_t arg14, intptr_t arg15, intptr_t arg16, intptr_t arg17,
+ intptr_t arg18, intptr_t arg19);
// These prototypes handle the four types of FP calls.
using SimulatorRuntimeCompareCall = int (*)(double darg0, double darg1);
@@ -966,7 +968,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
(get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
0;
Redirection* redirection = Redirection::FromInstruction(instr);
- const int kArgCount = 10;
+ const int kArgCount = 20;
const int kRegisterArgCount = 8;
int arg0_regnum = 3;
intptr_t result_buffer = 0;
@@ -987,7 +989,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
for (int i = kRegisterArgCount, j = 0; i < kArgCount; i++, j++) {
arg[i] = stack_pointer[kStackFrameExtraParamSlot + j];
}
- STATIC_ASSERT(kArgCount == kRegisterArgCount + 2);
+ STATIC_ASSERT(kArgCount == kRegisterArgCount + 12);
STATIC_ASSERT(kMaxCParameters == kArgCount);
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
@@ -1163,9 +1165,14 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
"\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
- ", %08" V8PRIxPTR,
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg[0], arg[1],
- arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8], arg[9]);
+ arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8], arg[9],
+ arg[10], arg[11], arg[12], arg[13], arg[14], arg[15], arg[16],
+ arg[17], arg[18], arg[19]);
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
get_register(sp));
@@ -1176,8 +1183,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
SimulatorRuntimePairCall target =
reinterpret_cast<SimulatorRuntimePairCall>(external);
- ObjectPair result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
- arg[5], arg[6], arg[7], arg[8], arg[9]);
+ ObjectPair result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5], arg[6],
+ arg[7], arg[8], arg[9], arg[10], arg[11], arg[12], arg[13],
+ arg[14], arg[15], arg[16], arg[17], arg[18], arg[19]);
intptr_t x;
intptr_t y;
decodeObjectPair(&result, &x, &y);
@@ -1207,8 +1216,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
redirection->type() == ExternalReference::FAST_C_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
- intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
- arg[5], arg[6], arg[7], arg[8], arg[9]);
+ intptr_t result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5], arg[6],
+ arg[7], arg[8], arg[9], arg[10], arg[11], arg[12], arg[13],
+ arg[14], arg[15], arg[16], arg[17], arg[18], arg[19]);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %08" V8PRIxPTR "\n", result);
}
@@ -4920,14 +4931,12 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int vra = instr->RAValue();
int vrb = instr->RBValue();
int vrc = instr->RCValue();
- FOR_EACH_LANE(i, int64_t) {
- int64_t vra_val = get_simd_register_by_lane<int64_t>(vra, i);
- int64_t vrb_val = get_simd_register_by_lane<int64_t>(vrb, i);
- int64_t mask = get_simd_register_by_lane<int64_t>(vrc, i);
- int64_t temp = vra_val ^ vrb_val;
- temp = temp & mask;
- set_simd_register_by_lane<int64_t>(vrt, i, temp ^ vra_val);
- }
+ unsigned __int128 src_1 = bit_cast<__int128>(get_simd_register(vra).int8);
+ unsigned __int128 src_2 = bit_cast<__int128>(get_simd_register(vrb).int8);
+ unsigned __int128 src_3 = bit_cast<__int128>(get_simd_register(vrc).int8);
+ unsigned __int128 tmp = (src_1 & ~src_3) | (src_2 & src_3);
+ simdr_t* result = bit_cast<simdr_t*>(&tmp);
+ set_simd_register(vrt, *result);
break;
}
case VPERM: {
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.cc b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
index 19bcb0453c..479c4b6a2f 100644
--- a/deps/v8/src/execution/riscv64/simulator-riscv64.cc
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
@@ -87,6 +87,51 @@
// PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
// HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
// MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+static inline bool is_aligned(const unsigned val, const unsigned pos) {
+ return pos ? (val & (pos - 1)) == 0 : true;
+}
+
+static inline bool is_overlapped(const int astart, int asize, const int bstart,
+ int bsize) {
+ asize = asize == 0 ? 1 : asize;
+ bsize = bsize == 0 ? 1 : bsize;
+
+ const int aend = astart + asize;
+ const int bend = bstart + bsize;
+
+ return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize;
+}
+static inline bool is_overlapped_widen(const int astart, int asize,
+ const int bstart, int bsize) {
+ asize = asize == 0 ? 1 : asize;
+ bsize = bsize == 0 ? 1 : bsize;
+
+ const int aend = astart + asize;
+ const int bend = bstart + bsize;
+
+ if (astart < bstart && is_overlapped(astart, asize, bstart, bsize) &&
+ !is_overlapped(astart, asize, bstart + bsize, bsize)) {
+ return false;
+ } else {
+ return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize;
+ }
+}
+
+#ifdef DEBUG
+#define require_align(val, pos) \
+ if (!is_aligned(val, pos)) { \
+ std::cout << val << " " << pos << std::endl; \
+ } \
+ CHECK_EQ(is_aligned(val, pos), true)
+#else
+#define require_align(val, pos) CHECK_EQ(is_aligned(val, pos), true)
+#endif
+
+#define require_noover(astart, asize, bstart, bsize) \
+ CHECK_EQ(!is_overlapped(astart, asize, bstart, bsize), true)
+#define require_noover_widen(astart, asize, bstart, bsize) \
+ CHECK_EQ(!is_overlapped_widen(astart, asize, bstart, bsize), true)
+
#define RVV_VI_GENERAL_LOOP_BASE \
for (uint64_t i = rvv_vstart(); i < rvv_vl(); i++) {
#define RVV_VI_LOOP_END \
@@ -121,9 +166,6 @@
} else if (rvv_vsew() == E64) { \
VV_PARAMS(64); \
BODY \
- } else if (rvv_vsew() == E128) { \
- VV_PARAMS(128); \
- BODY \
} else { \
UNREACHABLE(); \
} \
@@ -145,9 +187,6 @@
} else if (rvv_vsew() == E64) { \
VV_UPARAMS(64); \
BODY \
- } else if (rvv_vsew() == E128) { \
- VV_UPARAMS(128); \
- BODY \
} else { \
UNREACHABLE(); \
} \
@@ -169,9 +208,6 @@
} else if (rvv_vsew() == E64) { \
VX_PARAMS(64); \
BODY \
- } else if (rvv_vsew() == E128) { \
- VX_PARAMS(128); \
- BODY \
} else { \
UNREACHABLE(); \
} \
@@ -193,9 +229,6 @@
} else if (rvv_vsew() == E64) { \
VX_UPARAMS(64); \
BODY \
- } else if (rvv_vsew() == E128) { \
- VX_UPARAMS(128); \
- BODY \
} else { \
UNREACHABLE(); \
} \
@@ -217,9 +250,6 @@
} else if (rvv_vsew() == E64) { \
VI_PARAMS(64); \
BODY \
- } else if (rvv_vsew() == E128) { \
- VI_PARAMS(128); \
- BODY \
} else { \
UNREACHABLE(); \
} \
@@ -241,15 +271,144 @@
} else if (rvv_vsew() == E64) { \
VI_UPARAMS(64); \
BODY \
- } else if (rvv_vsew() == E128) { \
- VI_UPARAMS(128); \
- BODY \
} else { \
UNREACHABLE(); \
} \
RVV_VI_LOOP_END \
rvv_trace_vd();
+// widen operation loop
+
+#define VI_WIDE_CHECK_COMMON \
+ CHECK_LE(rvv_vflmul(), 4); \
+ CHECK_LE(rvv_vsew() * 2, kRvvELEN); \
+ require_align(rvv_vd_reg(), rvv_vflmul() * 2); \
+ require_vm;
+
+#define VI_NARROW_CHECK_COMMON \
+ CHECK_LE(rvv_vflmul(), 4); \
+ CHECK_LE(rvv_vsew() * 2, kRvvELEN); \
+ require_align(rvv_vs2_reg(), rvv_vflmul() * 2); \
+ require_align(rvv_vd_reg(), rvv_vflmul()); \
+ require_vm;
+
+#define RVV_VI_CHECK_SLIDE(is_over) \
+ require_align(rvv_vs2_reg(), rvv_vflmul()); \
+ require_align(rvv_vd_reg(), rvv_vflmul()); \
+ require_vm; \
+ if (is_over) require(rvv_vd_reg() != rvv_vs2_reg());
+
+#define RVV_VI_CHECK_DDS(is_rs) \
+ VI_WIDE_CHECK_COMMON; \
+ require_align(rvv_vs2_reg(), rvv_vflmul() * 2); \
+ if (is_rs) { \
+ require_align(rvv_vs1_reg(), rvv_vflmul()); \
+ if (rvv_vflmul() < 1) { \
+ require_noover(rvv_vd_reg(), rvv_vflmul() * 2, rvv_vs1_reg(), \
+ rvv_vflmul()); \
+ } else { \
+ require_noover_widen(rvv_vd_reg(), rvv_vflmul() * 2, rvv_vs1_reg(), \
+ rvv_vflmul()); \
+ } \
+ }
+
+#define RVV_VI_CHECK_DSS(is_vs1) \
+ VI_WIDE_CHECK_COMMON; \
+ require_align(rvv_vs2_reg(), rvv_vflmul()); \
+ if (rvv_vflmul() < 1) { \
+ require_noover(rvv_vd_reg(), rvv_vflmul() * 2, rvv_vs2_reg(), \
+ rvv_vflmul()); \
+ } else { \
+ require_noover_widen(rvv_vd_reg(), rvv_vflmul() * 2, rvv_vs2_reg(), \
+ rvv_vflmul()); \
+ } \
+ if (is_vs1) { \
+ require_align(rvv_vs1_reg(), rvv_vflmul()); \
+ if (rvv_vflmul() < 1) { \
+ require_noover(rvv_vd_reg(), rvv_vflmul() * 2, rvv_vs1_reg(), \
+ rvv_vflmul()); \
+ } else { \
+ require_noover_widen(rvv_vd_reg(), rvv_vflmul() * 2, rvv_vs1_reg(), \
+ rvv_vflmul()); \
+ } \
+ }
+
+#define RVV_VI_CHECK_SDS(is_vs1) \
+ VI_NARROW_CHECK_COMMON; \
+ if (rvv_vd_reg() != rvv_vs2_reg()) \
+ require_noover(rvv_vd_reg(), rvv_vflmul(), rvv_vs2_reg(), \
+ rvv_vflmul() * 2); \
+ if (is_vs1) require_align(rvv_vs1_reg(), rvv_vflmul());
+
+#define RVV_VI_VV_LOOP_WIDEN(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VV_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VV_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VV_PARAMS(32); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VX_LOOP_WIDEN(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ if (rvv_vsew() == E8) { \
+ VX_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VX_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VX_PARAMS(32); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define VI_WIDE_OP_AND_ASSIGN(var0, var1, var2, op0, op1, sign) \
+ switch (rvv_vsew()) { \
+ case E8: { \
+ Rvvelt<uint16_t>(rvv_vd_reg(), i, true) = \
+ op1((sign##16_t)(sign##8_t)var0 op0(sign##16_t)(sign##8_t) var1) + \
+ var2; \
+ } break; \
+ case E16: { \
+ Rvvelt<uint32_t>(rvv_vd_reg(), i, true) = \
+ op1((sign##32_t)(sign##16_t)var0 op0(sign##32_t)(sign##16_t) var1) + \
+ var2; \
+ } break; \
+ default: { \
+ Rvvelt<uint64_t>(rvv_vd_reg(), i, true) = \
+ op1((sign##64_t)(sign##32_t)var0 op0(sign##64_t)(sign##32_t) var1) + \
+ var2; \
+ } break; \
+ }
+
+#define VI_WIDE_WVX_OP(var0, op0, sign) \
+ switch (rvv_vsew()) { \
+ case E8: { \
+ sign##16_t & vd_w = Rvvelt<sign##16_t>(rvv_vd_reg(), i, true); \
+ sign##16_t vs2_w = Rvvelt<sign##16_t>(rvv_vs2_reg(), i); \
+ vd_w = vs2_w op0(sign##16_t)(sign##8_t) var0; \
+ } break; \
+ case E16: { \
+ sign##32_t & vd_w = Rvvelt<sign##32_t>(rvv_vd_reg(), i, true); \
+ sign##32_t vs2_w = Rvvelt<sign##32_t>(rvv_vs2_reg(), i); \
+ vd_w = vs2_w op0(sign##32_t)(sign##16_t) var0; \
+ } break; \
+ default: { \
+ sign##64_t & vd_w = Rvvelt<sign##64_t>(rvv_vd_reg(), i, true); \
+ sign##64_t vs2_w = Rvvelt<sign##64_t>(rvv_vs2_reg(), i); \
+ vd_w = vs2_w op0(sign##64_t)(sign##32_t) var0; \
+ } break; \
+ }
+
#define RVV_VI_VVXI_MERGE_LOOP(BODY) \
RVV_VI_GENERAL_LOOP_BASE \
if (rvv_vsew() == E8) { \
@@ -264,9 +423,6 @@
} else if (rvv_vsew() == E64) { \
VXI_PARAMS(64); \
BODY; \
- } else if (rvv_vsew() == E128) { \
- VXI_PARAMS(128); \
- BODY \
} \
RVV_VI_LOOP_END \
rvv_trace_vd();
@@ -346,6 +502,9 @@
type_usew_t<x>::type uimm5 = (type_usew_t<x>::type)instr_.RvvUimm5(); \
type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+#define float32_t float
+#define float64_t double
+
#define RVV_VI_LOOP_CMP_BASE \
CHECK(rvv_vsew() >= E8 && rvv_vsew() <= E64); \
for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
@@ -663,6 +822,37 @@
} \
rvv_trace_vd();
+#define VI_VFP_LOOP_REDUCTION_BASE(width) \
+ float##width##_t vd_0 = Rvvelt<float##width##_t>(rvv_vd_reg(), 0); \
+ float##width##_t vs1_0 = Rvvelt<float##width##_t>(rvv_vs1_reg(), 0); \
+ vd_0 = vs1_0; \
+ /*bool is_active = false;*/ \
+ for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ float##width##_t vs2 = Rvvelt<float##width##_t>(rvv_vs2_reg(), i); \
+ /*is_active = true;*/
+
+#define VI_VFP_LOOP_REDUCTION_END(x) \
+ } \
+ set_rvv_vstart(0); \
+ if (rvv_vl() > 0) { \
+ Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), 0, true) = vd_0; \
+ }
+
+#define RVV_VI_VFP_VV_LOOP_REDUCTION(BODY16, BODY32, BODY64) \
+ if (rvv_vsew() == E16) { \
+ UNIMPLEMENTED(); \
+ } else if (rvv_vsew() == E32) { \
+ VI_VFP_LOOP_REDUCTION_BASE(32) \
+ BODY32; \
+ VI_VFP_LOOP_REDUCTION_END(32) \
+ } else if (rvv_vsew() == E64) { \
+ VI_VFP_LOOP_REDUCTION_BASE(64) \
+ BODY64; \
+ VI_VFP_LOOP_REDUCTION_END(64) \
+ } \
+ rvv_trace_vd();
+
// reduction loop - unsgied
#define RVV_VI_ULOOP_REDUCTION_BASE(x) \
auto& vd_0_des = Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), 0, true); \
@@ -738,7 +928,7 @@
set_rvv_vstart(0); \
if (::v8::internal::FLAG_trace_sim) { \
__int128_t value = Vregister_[rvv_vd_reg()]; \
- SNPrintF(trace_buf_, "0x%016" PRIx64 "%016" PRIx64 " <-- 0x%016" PRIx64, \
+ SNPrintF(trace_buf_, "%016" PRIx64 "%016" PRIx64 " <-- 0x%016" PRIx64, \
*(reinterpret_cast<int64_t*>(&value) + 1), \
*reinterpret_cast<int64_t*>(&value), \
(uint64_t)(get_register(rs1_reg()))); \
@@ -762,7 +952,7 @@
set_rvv_vstart(0); \
if (::v8::internal::FLAG_trace_sim) { \
__int128_t value = Vregister_[rvv_vd_reg()]; \
- SNPrintF(trace_buf_, "0x%016" PRIx64 "%016" PRIx64 " --> 0x%016" PRIx64, \
+ SNPrintF(trace_buf_, "%016" PRIx64 "%016" PRIx64 " --> 0x%016" PRIx64, \
*(reinterpret_cast<int64_t*>(&value) + 1), \
*reinterpret_cast<int64_t*>(&value), \
(uint64_t)(get_register(rs1_reg()))); \
@@ -775,6 +965,11 @@
#define RVV_VI_VFP_CVT_SCALE(BODY8, BODY16, BODY32, CHECK8, CHECK16, CHECK32, \
is_widen, eew_check) \
+ if (is_widen) { \
+ RVV_VI_CHECK_DSS(false); \
+ } else { \
+ RVV_VI_CHECK_SDS(false); \
+ } \
CHECK(eew_check); \
switch (rvv_vsew()) { \
case E8: { \
@@ -845,7 +1040,6 @@ inline Dst unsigned_saturation(Src v, uint n) {
RVV_VI_GENERAL_LOOP_BASE \
RVV_VI_LOOP_MASK_SKIP() \
if (rvv_vsew() == E8) { \
- UNREACHABLE(); \
VN_UPARAMS(16); \
vd = unsigned_saturation<uint16_t, uint8_t>( \
(static_cast<uint16_t>(vs2) >> uimm5) + \
@@ -875,7 +1069,6 @@ inline Dst unsigned_saturation(Src v, uint n) {
RVV_VI_GENERAL_LOOP_BASE \
RVV_VI_LOOP_MASK_SKIP() \
if (rvv_vsew() == E8) { \
- UNREACHABLE(); \
VN_PARAMS(16); \
vd = signed_saturation<int16_t, int8_t>( \
(vs2 >> uimm5) + get_round(static_cast<int>(rvv_vxrm()), vs2, uimm5), \
@@ -898,6 +1091,100 @@ inline Dst unsigned_saturation(Src v, uint n) {
RVV_VI_LOOP_END \
rvv_trace_vd();
+#define CHECK_EXT(div) \
+ CHECK_NE(rvv_vd_reg(), rvv_vs2_reg()); \
+ reg_t from = rvv_vsew() / div; \
+ CHECK(from >= E8 && from <= E64); \
+ CHECK_GE((float)rvv_vflmul() / div, 0.125); \
+ CHECK_LE((float)rvv_vflmul() / div, 8); \
+ require_align(rvv_vd_reg(), rvv_vflmul()); \
+ require_align(rvv_vs2_reg(), rvv_vflmul() / div); \
+ if ((rvv_vflmul() / div) < 1) { \
+ require_noover(rvv_vd_reg(), rvv_vflmul(), rvv_vs2_reg(), \
+ rvv_vflmul() / div); \
+ } else { \
+ require_noover_widen(rvv_vd_reg(), rvv_vflmul(), rvv_vs2_reg(), \
+ rvv_vflmul() / div); \
+ }
+
+#define RVV_VI_VIE_8_LOOP(signed) \
+ CHECK_EXT(8) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E64) { \
+ if (signed) { \
+ VI_VIE_PARAMS(64, 8); \
+ vd = static_cast<int64_t>(vs2); \
+ } else { \
+ VI_VIE_UPARAMS(64, 8); \
+ vd = static_cast<uint64_t>(vs2); \
+ } \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VIE_4_LOOP(signed) \
+ CHECK_EXT(4) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E32) { \
+ if (signed) { \
+ VI_VIE_PARAMS(32, 4); \
+ vd = static_cast<int32_t>(vs2); \
+ } else { \
+ VI_VIE_UPARAMS(32, 4); \
+ vd = static_cast<uint32_t>(vs2); \
+ } \
+ } else if (rvv_vsew() == E64) { \
+ if (signed) { \
+ VI_VIE_PARAMS(64, 4); \
+ vd = static_cast<int64_t>(vs2); \
+ } else { \
+ VI_VIE_UPARAMS(64, 4); \
+ vd = static_cast<uint64_t>(vs2); \
+ } \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VIE_2_LOOP(signed) \
+ CHECK_EXT(2) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E16) { \
+ if (signed) { \
+ VI_VIE_PARAMS(16, 2); \
+ vd = static_cast<int16_t>(vs2); \
+ } else { \
+ VI_VIE_UPARAMS(16, 2); \
+ vd = static_cast<uint16_t>(vs2); \
+ } \
+ } else if (rvv_vsew() == E32) { \
+ if (signed) { \
+ VI_VIE_PARAMS(32, 2); \
+ vd = static_cast<int32_t>(vs2); \
+ } else { \
+ VI_VIE_UPARAMS(32, 2); \
+ vd = static_cast<uint32_t>(vs2); \
+ } \
+ } else if (rvv_vsew() == E64) { \
+ if (signed) { \
+ VI_VIE_PARAMS(64, 2); \
+ vd = static_cast<int64_t>(vs2); \
+ } else { \
+ VI_VIE_UPARAMS(64, 2); \
+ vd = static_cast<uint64_t>(vs2); \
+ } \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
namespace v8 {
namespace internal {
@@ -2127,12 +2414,13 @@ T Simulator::ReadMem(int64_t addr, Instruction* instr) {
DieOrDebug();
}
#ifndef V8_COMPRESS_POINTERS // TODO(RISCV): v8:11812
- // check for natural alignment
- if (!FLAG_riscv_c_extension && ((addr & (sizeof(T) - 1)) != 0)) {
- PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
- reinterpret_cast<intptr_t>(instr));
- DieOrDebug();
- }
+ // // check for natural alignment
+ // if (!FLAG_riscv_c_extension && ((addr & (sizeof(T) - 1)) != 0)) {
+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ // addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // }
#endif
T* ptr = reinterpret_cast<T*>(addr);
T value = *ptr;
@@ -2405,18 +2693,18 @@ void Simulator::SoftwareInterrupt() {
"Call to host function %s at %p "
"args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
" , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
- " , %08" PRIx64 " , %08" PRIx64 " , %016" PRIx64
- " , %016" PRIx64 " , %016" PRIx64 " , %016" PRIx64
- " , %016" PRIx64 " , %016" PRIx64 " , %016" PRIx64
- " , %016" PRIx64 " , %016" PRIx64 " , %016" PRIx64 " \n",
+ " , %08" PRIx64 " , %08" PRIx64 " , %016" PRIx64 " , %016" PRIx64
+ " , %016" PRIx64 " , %016" PRIx64 " , %016" PRIx64 " , %016" PRIx64
+ " , %016" PRIx64 " , %016" PRIx64 " , %016" PRIx64 " , %016" PRIx64
+ " \n",
ExternalReferenceTable::NameOfIsolateIndependentAddress(pc),
reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12,
arg13, arg14, arg15, arg16, arg17, arg18, arg19);
}
ObjectPair result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
- arg8, arg9, arg10, arg11, arg12, arg13, arg14,
- arg15, arg16, arg17, arg18, arg19);
+ arg8, arg9, arg10, arg11, arg12, arg13, arg14,
+ arg15, arg16, arg17, arg18, arg19);
set_register(a0, (int64_t)(result.x));
set_register(a1, (int64_t)(result.y));
}
@@ -3738,6 +4026,10 @@ bool Simulator::DecodeRvvVL() {
RVV_VI_LD(0, (i * nf + fn), int32, false);
break;
}
+ case 64: {
+ RVV_VI_LD(0, (i * nf + fn), int64, false);
+ break;
+ }
default:
UNIMPLEMENTED_RISCV();
break;
@@ -3799,6 +4091,10 @@ bool Simulator::DecodeRvvVS() {
RVV_VI_ST(0, (i * nf + fn), uint32, false);
break;
}
+ case 64: {
+ RVV_VI_ST(0, (i * nf + fn), uint64, false);
+ break;
+ }
default:
UNIMPLEMENTED_RISCV();
break;
@@ -4471,6 +4767,28 @@ static inline T sat_sub(T x, T y, bool& sat) {
return res;
}
+template <typename T>
+T sat_addu(T x, T y, bool& sat) {
+ T res = x + y;
+ sat = false;
+
+ sat = res < x;
+ res |= -(res < x);
+
+ return res;
+}
+
+template <typename T>
+T sat_subu(T x, T y, bool& sat) {
+ T res = x - y;
+ sat = false;
+
+ sat = !(res <= x);
+ res &= -(res <= x);
+
+ return res;
+}
+
void Simulator::DecodeRvvIVV() {
DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVV);
switch (instr_.InstructionBits() & kVTypeMask) {
@@ -4546,6 +4864,35 @@ void Simulator::DecodeRvvIVV() {
RVV_VI_LOOP_END
break;
}
+ case RO_V_VSSUBU_VV: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VV_UPARAMS(8);
+ vd = sat_subu<uint8_t>(vs2, vs1, sat);
+ break;
+ }
+ case E16: {
+ VV_UPARAMS(16);
+ vd = sat_subu<uint16_t>(vs2, vs1, sat);
+ break;
+ }
+ case E32: {
+ VV_UPARAMS(32);
+ vd = sat_subu<uint32_t>(vs2, vs1, sat);
+ break;
+ }
+ default: {
+ VV_UPARAMS(64);
+ vd = sat_subu<uint64_t>(vs2, vs1, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
case RO_V_VAND_VV: {
RVV_VI_VV_LOOP({ vd = vs1 & vs2; })
break;
@@ -4654,6 +5001,46 @@ void Simulator::DecodeRvvIVV() {
RVV_VI_VV_LOOP({ vd = vs2 << vs1; })
break;
}
+ case RO_V_VSRL_VV:
+ RVV_VI_VV_ULOOP({ vd = vs2 >> vs1; })
+ break;
+ case RO_V_VSRA_VV:
+ RVV_VI_VV_LOOP({ vd = vs2 >> vs1; })
+ break;
+ case RO_V_VSMUL_VV: {
+ RVV_VI_GENERAL_LOOP_BASE
+ RVV_VI_LOOP_MASK_SKIP()
+ if (rvv_vsew() == E8) {
+ VV_PARAMS(8);
+ int16_t result = (int16_t)vs1 * (int16_t)vs2;
+ uint8_t round = get_round(static_cast<int>(rvv_vxrm()), result, 7);
+ result = (result >> 7) + round;
+ vd = signed_saturation<int16_t, int8_t>(result, 8);
+ } else if (rvv_vsew() == E16) {
+ VV_PARAMS(16);
+ int32_t result = (int32_t)vs1 * (int32_t)vs2;
+ uint8_t round = get_round(static_cast<int>(rvv_vxrm()), result, 15);
+ result = (result >> 15) + round;
+ vd = signed_saturation<int32_t, int16_t>(result, 16);
+ } else if (rvv_vsew() == E32) {
+ VV_PARAMS(32);
+ int64_t result = (int64_t)vs1 * (int64_t)vs2;
+ uint8_t round = get_round(static_cast<int>(rvv_vxrm()), result, 31);
+ result = (result >> 31) + round;
+ vd = signed_saturation<int64_t, int32_t>(result, 32);
+ } else if (rvv_vsew() == E64) {
+ VV_PARAMS(64);
+ __int128_t result = (__int128_t)vs1 * (__int128_t)vs2;
+ uint8_t round = get_round(static_cast<int>(rvv_vxrm()), result, 63);
+ result = (result >> 63) + round;
+ vd = signed_saturation<__int128_t, int64_t>(result, 64);
+ } else {
+ UNREACHABLE();
+ }
+ RVV_VI_LOOP_END
+ rvv_trace_vd();
+ break;
+ }
case RO_V_VRGATHER_VV: {
RVV_VI_GENERAL_LOOP_BASE
CHECK_NE(rvv_vs1_reg(), rvv_vd_reg());
@@ -4686,6 +5073,7 @@ void Simulator::DecodeRvvIVV() {
}
}
RVV_VI_LOOP_END;
+ rvv_trace_vd();
break;
}
default:
@@ -4748,7 +5136,7 @@ void Simulator::DecodeRvvIVI() {
break;
}
case RO_V_VRSUB_VI: {
- RVV_VI_VI_LOOP({ vd = vs2 - simm5; })
+ RVV_VI_VI_LOOP({ vd = simm5 - vs2; })
break;
}
case RO_V_VAND_VI: {
@@ -4796,6 +5184,7 @@ void Simulator::DecodeRvvIVI() {
RVV_VI_VI_LOOP_CMP({ res = vs2 > simm5; })
break;
case RO_V_VSLIDEDOWN_VI: {
+ RVV_VI_CHECK_SLIDE(false);
const uint8_t sh = instr_.RvvUimm5();
RVV_VI_GENERAL_LOOP_BASE
@@ -4806,7 +5195,7 @@ void Simulator::DecodeRvvIVI() {
offset = sh;
}
- switch (rvv_sew()) {
+ switch (rvv_vsew()) {
case E8: {
VI_XI_SLIDEDOWN_PARAMS(8, offset);
vd = is_valid ? vs2 : 0;
@@ -4825,12 +5214,16 @@ void Simulator::DecodeRvvIVI() {
} break;
}
RVV_VI_LOOP_END
+ rvv_trace_vd();
} break;
case RO_V_VSRL_VI:
- RVV_VI_VI_LOOP({ vd = vs2 >> simm5; })
+ RVV_VI_VI_ULOOP({ vd = vs2 >> uimm5; })
+ break;
+ case RO_V_VSRA_VI:
+ RVV_VI_VI_LOOP({ vd = vs2 >> (simm5 & (rvv_sew() - 1) & 0x1f); })
break;
case RO_V_VSLL_VI:
- RVV_VI_VI_LOOP({ vd = vs2 << simm5; })
+ RVV_VI_VI_ULOOP({ vd = vs2 << uimm5; })
break;
case RO_V_VADC_VI:
if (instr_.RvvVM()) {
@@ -5047,7 +5440,11 @@ void Simulator::DecodeRvvIVX() {
break;
}
case RO_V_VSRL_VX: {
- RVV_VI_VX_LOOP({ vd = int32_t(uint32_t(vs2) >> (rs1 & (xlen - 1))); })
+ RVV_VI_VX_ULOOP({ vd = (vs2 >> (rs1 & (rvv_sew() - 1))); })
+ break;
+ }
+ case RO_V_VSRA_VX: {
+ RVV_VI_VX_LOOP({ vd = ((vs2) >> (rs1 & (rvv_sew() - 1))); })
break;
}
default:
@@ -5059,6 +5456,71 @@ void Simulator::DecodeRvvIVX() {
void Simulator::DecodeRvvMVV() {
DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVV);
switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VMUNARY0: {
+ if (instr_.Vs1Value() == VID_V) {
+ CHECK(rvv_vsew() >= E8 && rvv_vsew() <= E64);
+ uint8_t rd_num = rvv_vd_reg();
+ require_align(rd_num, rvv_vflmul());
+ require_vm;
+ for (uint8_t i = rvv_vstart(); i < rvv_vl(); ++i) {
+ RVV_VI_LOOP_MASK_SKIP();
+ switch (rvv_vsew()) {
+ case E8:
+ Rvvelt<uint8_t>(rd_num, i, true) = i;
+ break;
+ case E16:
+ Rvvelt<uint16_t>(rd_num, i, true) = i;
+ break;
+ case E32:
+ Rvvelt<uint32_t>(rd_num, i, true) = i;
+ break;
+ default:
+ Rvvelt<uint64_t>(rd_num, i, true) = i;
+ break;
+ }
+ }
+ set_rvv_vstart(0);
+ } else {
+ UNIMPLEMENTED_RISCV();
+ }
+ break;
+ }
+ case RO_V_VMUL_VV: {
+ RVV_VI_VV_LOOP({ vd = vs2 * vs1; })
+ break;
+ }
+ case RO_V_VWMUL_VV: {
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VV_LOOP_WIDEN({
+ VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, *, +, int);
+ USE(vd);
+ })
+ break;
+ }
+ case RO_V_VWMULU_VV: {
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VV_LOOP_WIDEN({
+ VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, *, +, uint);
+ USE(vd);
+ })
+ break;
+ }
+ case RO_V_VMULHU_VV: {
+ RVV_VI_VV_LOOP({ vd = ((__uint128_t)vs2 * vs1) >> rvv_sew(); })
+ break;
+ }
+ case RO_V_VMULH_VV: {
+ RVV_VI_VV_LOOP({ vd = ((__int128_t)vs2 * vs1) >> rvv_sew(); })
+ break;
+ }
+ case RO_V_VDIV_VV: {
+ RVV_VI_VV_LOOP({ vd = vs2 / vs1; })
+ break;
+ }
+ case RO_V_VDIVU_VV: {
+ RVV_VI_VV_LOOP({ vd = vs2 / vs1; })
+ break;
+ }
case RO_V_VWXUNARY0: {
if (rvv_vs1_reg() == 0) {
switch (rvv_vsew()) {
@@ -5078,7 +5540,7 @@ void Simulator::DecodeRvvMVV() {
UNREACHABLE();
}
set_rvv_vstart(0);
- SNPrintF(trace_buf_, "0x%ld", get_register(rd_reg()));
+ SNPrintF(trace_buf_, "%lx", get_register(rd_reg()));
} else {
v8::base::EmbeddedVector<char, 256> buffer;
disasm::NameConverter converter;
@@ -5105,6 +5567,76 @@ void Simulator::DecodeRvvMVV() {
RVV_VI_VV_LOOP_REDUCTION(
{ vd_0_res = (vd_0_res <= vs2) ? vd_0_res : vs2; })
break;
+ case RO_V_VXUNARY0:
+ if (rvv_vs1_reg() == 0b00010) {
+ RVV_VI_VIE_8_LOOP(false);
+ } else if (rvv_vs1_reg() == 0b00011) {
+ RVV_VI_VIE_8_LOOP(true);
+ } else if (rvv_vs1_reg() == 0b00100) {
+ RVV_VI_VIE_4_LOOP(false);
+ } else if (rvv_vs1_reg() == 0b00101) {
+ RVV_VI_VIE_4_LOOP(true);
+ } else if (rvv_vs1_reg() == 0b00110) {
+ RVV_VI_VIE_2_LOOP(false);
+ } else if (rvv_vs1_reg() == 0b00111) {
+ RVV_VI_VIE_2_LOOP(true);
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ case RO_V_VWADDU_VV:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VV_LOOP_WIDEN({
+ VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, +, +, uint);
+ USE(vd);
+ })
+ break;
+ case RO_V_VWADD_VV:
+ RVV_VI_CHECK_DSS(true);
+ RVV_VI_VV_LOOP_WIDEN({
+ VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, +, +, int);
+ USE(vd);
+ })
+ break;
+ case RO_V_VCOMPRESS_VV: {
+ CHECK_EQ(rvv_vstart(), 0);
+ require_align(rvv_vd_reg(), rvv_vflmul());
+ require_align(rvv_vs2_reg(), rvv_vflmul());
+ require(rvv_vd_reg() != rvv_vs2_reg());
+ require_noover(rvv_vd_reg(), rvv_vflmul(), rvv_vs1_reg(), 1);
+
+ reg_t pos = 0;
+
+ RVV_VI_GENERAL_LOOP_BASE
+ const uint64_t midx = i / 64;
+ const uint64_t mpos = i % 64;
+
+ bool do_mask = (Rvvelt<uint64_t>(rvv_vs1_reg(), midx) >> mpos) & 0x1;
+ if (do_mask) {
+ switch (rvv_vsew()) {
+ case E8:
+ Rvvelt<uint8_t>(rvv_vd_reg(), pos, true) =
+ Rvvelt<uint8_t>(rvv_vs2_reg(), i);
+ break;
+ case E16:
+ Rvvelt<uint16_t>(rvv_vd_reg(), pos, true) =
+ Rvvelt<uint16_t>(rvv_vs2_reg(), i);
+ break;
+ case E32:
+ Rvvelt<uint32_t>(rvv_vd_reg(), pos, true) =
+ Rvvelt<uint32_t>(rvv_vs2_reg(), i);
+ break;
+ default:
+ Rvvelt<uint64_t>(rvv_vd_reg(), pos, true) =
+ Rvvelt<uint64_t>(rvv_vs2_reg(), i);
+ break;
+ }
+
+ ++pos;
+ }
+ RVV_VI_LOOP_END;
+ rvv_trace_vd();
+ } break;
default:
v8::base::EmbeddedVector<char, 256> buffer;
disasm::NameConverter converter;
@@ -5151,6 +5683,27 @@ void Simulator::DecodeRvvMVX() {
UNSUPPORTED_RISCV();
}
break;
+ case RO_V_VDIV_VX: {
+ RVV_VI_VX_LOOP({ vd = vs2 / rs1; })
+ break;
+ }
+ case RO_V_VDIVU_VX: {
+ RVV_VI_VX_ULOOP({ vd = vs2 / rs1; })
+ break;
+ }
+ case RO_V_VMUL_VX: {
+ RVV_VI_VX_LOOP({ vd = vs2 * rs1; })
+ break;
+ }
+ case RO_V_VWADDUW_VX: {
+ RVV_VI_CHECK_DDS(false);
+ RVV_VI_VX_LOOP_WIDEN({
+ VI_WIDE_WVX_OP(rs1, +, uint);
+ USE(vd);
+ USE(vs2);
+ })
+ break;
+ }
default:
v8::base::EmbeddedVector<char, 256> buffer;
disasm::NameConverter converter;
@@ -5200,13 +5753,13 @@ void Simulator::DecodeRvvFVV() {
if (is_invalid_fdiv(vs1, vs2)) {
this->set_fflags(kInvalidOperation);
return std::numeric_limits<double>::quiet_NaN();
- } else if (vs2 == 0.0f) {
+ } else if (vs1 == 0.0f) {
this->set_fflags(kDivideByZero);
return (std::signbit(vs1) == std::signbit(vs2)
? std::numeric_limits<double>::infinity()
: -std::numeric_limits<double>::infinity());
} else {
- return vs1 / vs2;
+ return vs2 / vs1;
}
};
auto alu_out = fn(vs1, vs2);
@@ -5342,6 +5895,81 @@ void Simulator::DecodeRvvFVV() {
},
{ ; }, { ; }, { ; }, false, (rvv_vsew() >= E16))
break;
+ case VFNCVT_X_F_W:
+ RVV_VI_VFP_CVT_SCALE(
+ { UNREACHABLE(); }, { UNREACHABLE(); },
+ {
+ auto vs2 = Rvvelt<double>(rvv_vs2_reg(), i);
+ int32_t& vd = Rvvelt<int32_t>(rvv_vd_reg(), i, true);
+ vd = RoundF2IHelper<int32_t>(vs2, read_csr_value(csr_frm));
+ },
+ { ; }, { ; }, { ; }, false, (rvv_vsew() <= E32))
+ break;
+ case VFNCVT_XU_F_W:
+ RVV_VI_VFP_CVT_SCALE(
+ { UNREACHABLE(); }, { UNREACHABLE(); },
+ {
+ auto vs2 = Rvvelt<double>(rvv_vs2_reg(), i);
+ uint32_t& vd = Rvvelt<uint32_t>(rvv_vd_reg(), i, true);
+ vd = RoundF2IHelper<uint32_t>(vs2, read_csr_value(csr_frm));
+ },
+ { ; }, { ; }, { ; }, false, (rvv_vsew() <= E32))
+ break;
+ case VFWCVT_F_X_V:
+ RVV_VI_VFP_CVT_SCALE({ UNREACHABLE(); },
+ {
+ auto vs2 = Rvvelt<int16_t>(rvv_vs2_reg(), i);
+ Rvvelt<float32_t>(rvv_vd_reg(), i, true) =
+ static_cast<float>(vs2);
+ },
+ {
+ auto vs2 = Rvvelt<int32_t>(rvv_vs2_reg(), i);
+ Rvvelt<double>(rvv_vd_reg(), i, true) =
+ static_cast<double>(vs2);
+ },
+ { ; }, { ; }, { ; }, true, (rvv_vsew() >= E8))
+ break;
+ case VFWCVT_F_XU_V:
+ RVV_VI_VFP_CVT_SCALE({ UNREACHABLE(); },
+ {
+ auto vs2 = Rvvelt<uint16_t>(rvv_vs2_reg(), i);
+ Rvvelt<float32_t>(rvv_vd_reg(), i, true) =
+ static_cast<float>(vs2);
+ },
+ {
+ auto vs2 = Rvvelt<uint32_t>(rvv_vs2_reg(), i);
+ Rvvelt<double>(rvv_vd_reg(), i, true) =
+ static_cast<double>(vs2);
+ },
+ { ; }, { ; }, { ; }, true, (rvv_vsew() >= E8))
+ break;
+ case VFWCVT_XU_F_V:
+ RVV_VI_VFP_CVT_SCALE({ UNREACHABLE(); }, { UNREACHABLE(); },
+ {
+ auto vs2 = Rvvelt<float32_t>(rvv_vs2_reg(), i);
+ Rvvelt<uint64_t>(rvv_vd_reg(), i, true) =
+ static_cast<uint64_t>(vs2);
+ },
+ { ; }, { ; }, { ; }, true, (rvv_vsew() >= E16))
+ break;
+ case VFWCVT_X_F_V:
+ RVV_VI_VFP_CVT_SCALE({ UNREACHABLE(); }, { UNREACHABLE(); },
+ {
+ auto vs2 = Rvvelt<float32_t>(rvv_vs2_reg(), i);
+ Rvvelt<int64_t>(rvv_vd_reg(), i, true) =
+ static_cast<int64_t>(vs2);
+ },
+ { ; }, { ; }, { ; }, true, (rvv_vsew() >= E16))
+ break;
+ case VFWCVT_F_F_V:
+ RVV_VI_VFP_CVT_SCALE({ UNREACHABLE(); }, { UNREACHABLE(); },
+ {
+ auto vs2 = Rvvelt<float32_t>(rvv_vs2_reg(), i);
+ Rvvelt<double>(rvv_vd_reg(), i, true) =
+ static_cast<double>(vs2);
+ },
+ { ; }, { ; }, { ; }, true, (rvv_vsew() >= E16))
+ break;
default:
UNSUPPORTED_RISCV();
break;
@@ -5365,29 +5993,40 @@ void Simulator::DecodeRvvFVV() {
USE(vd);
})
break;
+ case VFSQRT_V:
+ RVV_VI_VFP_VF_LOOP({ UNIMPLEMENTED(); },
+ {
+ vd = std::sqrt(vs2);
+ USE(fs1);
+ },
+ {
+ vd = std::sqrt(vs2);
+ USE(fs1);
+ })
+ break;
default:
break;
}
break;
case RO_V_VMFEQ_VV: {
RVV_VI_VFP_LOOP_CMP({ UNIMPLEMENTED(); },
- { res = CompareFHelper(vs1, vs2, EQ); },
- { res = CompareFHelper(vs1, vs2, EQ); }, true)
+ { res = CompareFHelper(vs2, vs1, EQ); },
+ { res = CompareFHelper(vs2, vs1, EQ); }, true)
} break;
case RO_V_VMFNE_VV: {
RVV_VI_VFP_LOOP_CMP({ UNIMPLEMENTED(); },
- { res = CompareFHelper(vs1, vs2, NE); },
- { res = CompareFHelper(vs1, vs2, NE); }, true)
+ { res = CompareFHelper(vs2, vs1, NE); },
+ { res = CompareFHelper(vs2, vs1, NE); }, true)
} break;
case RO_V_VMFLT_VV: {
RVV_VI_VFP_LOOP_CMP({ UNIMPLEMENTED(); },
- { res = CompareFHelper(vs1, vs2, LT); },
- { res = CompareFHelper(vs1, vs2, LT); }, true)
+ { res = CompareFHelper(vs2, vs1, LT); },
+ { res = CompareFHelper(vs2, vs1, LT); }, true)
} break;
case RO_V_VMFLE_VV: {
RVV_VI_VFP_LOOP_CMP({ UNIMPLEMENTED(); },
- { res = CompareFHelper(vs1, vs2, LE); },
- { res = CompareFHelper(vs1, vs2, LE); }, true)
+ { res = CompareFHelper(vs2, vs1, LE); },
+ { res = CompareFHelper(vs2, vs1, LE); }, true)
} break;
case RO_V_VFMAX_VV: {
RVV_VI_VFP_VV_LOOP({ UNIMPLEMENTED(); },
@@ -5395,6 +6034,13 @@ void Simulator::DecodeRvvFVV() {
{ vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMax); })
break;
}
+ case RO_V_VFREDMAX_VV: {
+ RVV_VI_VFP_VV_LOOP_REDUCTION(
+ { UNIMPLEMENTED(); },
+ { vd_0 = FMaxMinHelper(vd_0, vs2, MaxMinKind::kMax); },
+ { vd_0 = FMaxMinHelper(vd_0, vs2, MaxMinKind::kMax); })
+ break;
+ }
case RO_V_VFMIN_VV: {
RVV_VI_VFP_VV_LOOP({ UNIMPLEMENTED(); },
{ vd = FMaxMinHelper(vs2, vs1, MaxMinKind::kMin); },
@@ -5656,6 +6302,8 @@ void Simulator::DecodeVType() {
case RO_V_VSETVLI: {
uint64_t avl;
set_rvv_vtype(rvv_zimm());
+ CHECK_GE(rvv_vsew(), E8);
+ CHECK_LE(rvv_vsew(), E64);
if (rs1_reg() != zero_reg) {
avl = rs1();
} else if (rd_reg() != zero_reg) {
@@ -5673,6 +6321,8 @@ void Simulator::DecodeVType() {
if (!(instr_.InstructionBits() & 0x40000000)) {
uint64_t avl;
set_rvv_vtype(rs2());
+ CHECK_GE(rvv_sew(), E8);
+ CHECK_LE(rvv_sew(), E64);
if (rs1_reg() != zero_reg) {
avl = rs1();
} else if (rd_reg() != zero_reg) {
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.h b/deps/v8/src/execution/riscv64/simulator-riscv64.h
index 32cdc155ba..4d2cd460c4 100644
--- a/deps/v8/src/execution/riscv64/simulator-riscv64.h
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.h
@@ -392,6 +392,13 @@ class Simulator : public SimulatorBase {
inline uint64_t rvv_vlenb() const { return vlenb_; }
inline uint32_t rvv_zimm() const { return instr_.Rvvzimm(); }
inline uint32_t rvv_vlmul() const { return (rvv_vtype() & 0x7); }
+ inline float rvv_vflmul() const {
+ if ((rvv_vtype() & 0b100) == 0) {
+ return static_cast<float>(0x1 << (rvv_vtype() & 0x7));
+ } else {
+ return 1.0 / static_cast<float>(0x1 << (4 - rvv_vtype() & 0x3));
+ }
+ }
inline uint32_t rvv_vsew() const { return ((rvv_vtype() >> 3) & 0x7); }
inline const char* rvv_sew_s() const {
@@ -416,7 +423,7 @@ class Simulator : public SimulatorBase {
RVV_LMUL(CAST_VLMUL)
default:
return "unknown";
-#undef CAST_VSEW
+#undef CAST_VLMUL
}
}
@@ -427,7 +434,7 @@ class Simulator : public SimulatorBase {
}
inline uint64_t rvv_vlmax() const {
if ((rvv_vlmul() & 0b100) != 0) {
- return (rvv_vlen() / rvv_sew()) >> (rvv_vlmul() & 0b11);
+ return (rvv_vlen() / rvv_sew()) >> (4 - (rvv_vlmul() & 0b11));
} else {
return ((rvv_vlen() << rvv_vlmul()) / rvv_sew());
}
@@ -792,10 +799,21 @@ class Simulator : public SimulatorBase {
auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
auto vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i - offset);
+/* Vector Integer Extension */
+#define VI_VIE_PARAMS(x, scale) \
+ if ((x / scale) < 8) UNREACHABLE(); \
+ auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ auto vs2 = Rvvelt<type_sew_t<x / scale>::type>(rvv_vs2_reg(), i);
+
+#define VI_VIE_UPARAMS(x, scale) \
+ if ((x / scale) < 8) UNREACHABLE(); \
+ auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ auto vs2 = Rvvelt<type_usew_t<x / scale>::type>(rvv_vs2_reg(), i);
+
inline void rvv_trace_vd() {
if (::v8::internal::FLAG_trace_sim) {
__int128_t value = Vregister_[rvv_vd_reg()];
- SNPrintF(trace_buf_, "0x%016" PRIx64 "%016" PRIx64 " (%" PRId64 ")",
+ SNPrintF(trace_buf_, "%016" PRIx64 "%016" PRIx64 " (%" PRId64 ")",
*(reinterpret_cast<int64_t*>(&value) + 1),
*reinterpret_cast<int64_t*>(&value), icount_);
}
diff --git a/deps/v8/src/execution/runtime-profiler.cc b/deps/v8/src/execution/runtime-profiler.cc
index 4d710c5aaa..a586d2d3b6 100644
--- a/deps/v8/src/execution/runtime-profiler.cc
+++ b/deps/v8/src/execution/runtime-profiler.cc
@@ -175,12 +175,9 @@ bool RuntimeProfiler::MaybeOSR(JSFunction function, UnoptimizedFrame* frame) {
namespace {
-bool ShouldOptimizeAsSmallFunction(int bytecode_size, int ticks,
- bool any_ic_changed,
- bool active_tier_is_turboprop) {
- if (any_ic_changed || bytecode_size >= FLAG_max_bytecode_size_for_early_opt)
- return false;
- return true;
+bool ShouldOptimizeAsSmallFunction(int bytecode_size, bool any_ic_changed) {
+ return !any_ic_changed &&
+ bytecode_size < FLAG_max_bytecode_size_for_early_opt;
}
} // namespace
@@ -193,16 +190,14 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
if (V8_UNLIKELY(FLAG_turboprop) && function.ActiveTierIsToptierTurboprop()) {
return OptimizationReason::kDoNotOptimize;
}
- int ticks = function.feedback_vector().profiler_ticks();
- bool active_tier_is_turboprop = function.ActiveTierIsMidtierTurboprop();
- int ticks_for_optimization =
+ const int ticks = function.feedback_vector().profiler_ticks();
+ const int ticks_for_optimization =
FLAG_ticks_before_optimization +
(bytecode.length() / FLAG_bytecode_size_allowance_per_tick);
if (ticks >= ticks_for_optimization) {
return OptimizationReason::kHotAndStable;
- } else if (ShouldOptimizeAsSmallFunction(bytecode.length(), ticks,
- any_ic_changed_,
- active_tier_is_turboprop)) {
+ } else if (ShouldOptimizeAsSmallFunction(bytecode.length(),
+ any_ic_changed_)) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
return OptimizationReason::kSmallFunction;
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 292d248588..42d2000bcb 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -1936,16 +1936,18 @@ static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
}
// Calls into the V8 runtime.
-using SimulatorRuntimeCall = intptr_t (*)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5,
- intptr_t arg6, intptr_t arg7,
- intptr_t arg8, intptr_t arg9);
-using SimulatorRuntimePairCall = ObjectPair (*)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5,
- intptr_t arg6, intptr_t arg7,
- intptr_t arg8, intptr_t arg9);
+using SimulatorRuntimeCall = intptr_t (*)(
+ intptr_t arg0, intptr_t arg1, intptr_t arg2, intptr_t arg3, intptr_t arg4,
+ intptr_t arg5, intptr_t arg6, intptr_t arg7, intptr_t arg8, intptr_t arg9,
+ intptr_t arg10, intptr_t arg11, intptr_t arg12, intptr_t arg13,
+ intptr_t arg14, intptr_t arg15, intptr_t arg16, intptr_t arg17,
+ intptr_t arg18, intptr_t arg19);
+using SimulatorRuntimePairCall = ObjectPair (*)(
+ intptr_t arg0, intptr_t arg1, intptr_t arg2, intptr_t arg3, intptr_t arg4,
+ intptr_t arg5, intptr_t arg6, intptr_t arg7, intptr_t arg8, intptr_t arg9,
+ intptr_t arg10, intptr_t arg11, intptr_t arg12, intptr_t arg13,
+ intptr_t arg14, intptr_t arg15, intptr_t arg16, intptr_t arg17,
+ intptr_t arg18, intptr_t arg19);
// These prototypes handle the four types of FP calls.
using SimulatorRuntimeCompareCall = int (*)(double darg0, double darg1);
@@ -1975,7 +1977,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
(get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
0;
Redirection* redirection = Redirection::FromInstruction(instr);
- const int kArgCount = 10;
+ const int kArgCount = 20;
const int kRegisterArgCount = 5;
int arg0_regnum = 2;
intptr_t result_buffer = 0;
@@ -1998,7 +2000,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
stack_pointer[(kCalleeRegisterSaveAreaSize / kSystemPointerSize) +
(i - kRegisterArgCount)];
}
- STATIC_ASSERT(kArgCount == kRegisterArgCount + 5);
+ STATIC_ASSERT(kArgCount == kRegisterArgCount + 15);
STATIC_ASSERT(kMaxCParameters == kArgCount);
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
@@ -2177,9 +2179,14 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
"\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
- ", %08" V8PRIxPTR,
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg[0], arg[1],
- arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8], arg[9]);
+ arg[2], arg[3], arg[4], arg[5], arg[6], arg[7], arg[8], arg[9],
+ arg[10], arg[11], arg[12], arg[13], arg[14], arg[15], arg[16],
+ arg[17], arg[18], arg[19]);
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
static_cast<intptr_t>(get_register(sp)));
@@ -2190,8 +2197,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
SimulatorRuntimePairCall target =
reinterpret_cast<SimulatorRuntimePairCall>(external);
- ObjectPair result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
- arg[5], arg[6], arg[7], arg[8], arg[9]);
+ ObjectPair result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5], arg[6],
+ arg[7], arg[8], arg[9], arg[10], arg[11], arg[12], arg[13],
+ arg[14], arg[15], arg[16], arg[17], arg[18], arg[19]);
intptr_t x;
intptr_t y;
decodeObjectPair(&result, &x, &y);
@@ -2221,8 +2230,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
redirection->type() == ExternalReference::FAST_C_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
- intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
- arg[5], arg[6], arg[7], arg[8], arg[9]);
+ intptr_t result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5], arg[6],
+ arg[7], arg[8], arg[9], arg[10], arg[11], arg[12], arg[13],
+ arg[14], arg[15], arg[16], arg[17], arg[18], arg[19]);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %08" V8PRIxPTR "\n", result);
}
@@ -4044,15 +4055,12 @@ EVALUATE(VSEL) {
DECODE_VRR_E_INSTRUCTION(r1, r2, r3, r4, m6, m5);
USE(m5);
USE(m6);
- fpr_t scratch = get_simd_register(r2);
- fpr_t mask = get_simd_register(r4);
- scratch.int64[0] ^= get_simd_register_by_lane<int64_t>(r3, 0);
- scratch.int64[1] ^= get_simd_register_by_lane<int64_t>(r3, 1);
- mask.int64[0] &= scratch.int64[0];
- mask.int64[1] &= scratch.int64[1];
- mask.int64[0] ^= get_simd_register_by_lane<int64_t>(r3, 0);
- mask.int64[1] ^= get_simd_register_by_lane<int64_t>(r3, 1);
- set_simd_register(r1, mask);
+ unsigned __int128 src_1 = bit_cast<__int128>(get_simd_register(r2).int8);
+ unsigned __int128 src_2 = bit_cast<__int128>(get_simd_register(r3).int8);
+ unsigned __int128 src_3 = bit_cast<__int128>(get_simd_register(r4).int8);
+ unsigned __int128 tmp = (src_1 & src_3) | (src_2 & ~src_3);
+ fpr_t* result = bit_cast<fpr_t*>(&tmp);
+ set_simd_register(r1, *result);
return length;
}
diff --git a/deps/v8/src/execution/simulator-base.cc b/deps/v8/src/execution/simulator-base.cc
index b26c775917..6bdb8f8a17 100644
--- a/deps/v8/src/execution/simulator-base.cc
+++ b/deps/v8/src/execution/simulator-base.cc
@@ -96,6 +96,31 @@ Redirection* Redirection::Get(Address external_function,
return new Redirection(external_function, type);
}
+void SimulatorData::RegisterFunctionsAndSignatures(
+ Address* c_functions, const CFunctionInfo* const* c_signatures,
+ unsigned num_functions) {
+ base::MutexGuard guard(&signature_map_mutex_);
+ for (unsigned i = 0; i < num_functions; ++i) {
+ EncodedCSignature sig(c_signatures[i]);
+ AddSignatureForTarget(c_functions[i], sig);
+ }
+}
+
+void SimulatorData::AddSignatureForTarget(Address target,
+ const EncodedCSignature& signature) {
+ target_to_signature_table_[target] = signature;
+}
+
+const EncodedCSignature& SimulatorData::GetSignatureForTarget(Address target) {
+ base::MutexGuard guard(&signature_map_mutex_);
+ auto entry = target_to_signature_table_.find(target);
+ if (entry != target_to_signature_table_.end()) {
+ const EncodedCSignature& sig = entry->second;
+ return sig;
+ }
+ return EncodedCSignature::Invalid();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/simulator-base.h b/deps/v8/src/execution/simulator-base.h
index 90e9441609..c97cecfdc1 100644
--- a/deps/v8/src/execution/simulator-base.h
+++ b/deps/v8/src/execution/simulator-base.h
@@ -7,6 +7,9 @@
#include <type_traits>
+#ifdef V8_TARGET_ARCH_ARM64
+#include "include/v8-fast-api-calls.h"
+#endif // V8_TARGET_ARCH_ARM64
#include "src/base/hashmap.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
@@ -68,6 +71,16 @@ class SimulatorBase {
return Object(ret);
}
+#ifdef V8_TARGET_ARCH_ARM64
+ template <typename T>
+ static typename std::enable_if<std::is_same<T, v8::AnyCType>::value, T>::type
+ ConvertReturn(intptr_t ret) {
+ v8::AnyCType result;
+ result.int64_value = static_cast<int64_t>(ret);
+ return result;
+ }
+#endif // V8_TARGET_ARCH_ARM64
+
// Convert back void return type (i.e. no return).
template <typename T>
static typename std::enable_if<std::is_void<T>::value, T>::type ConvertReturn(
@@ -106,6 +119,13 @@ class SimulatorBase {
ConvertArg(T arg) {
return reinterpret_cast<intptr_t>(arg);
}
+
+ template <typename T>
+ static
+ typename std::enable_if<std::is_floating_point<T>::value, intptr_t>::type
+ ConvertArg(T arg) {
+ UNREACHABLE();
+ }
};
// When the generated code calls an external reference we need to catch that in
@@ -176,6 +196,39 @@ class Redirection {
#endif
};
+class SimulatorData {
+ public:
+ // Calls AddSignatureForTarget for each function and signature, registering
+ // an encoded version of the signature within a mapping maintained by the
+ // simulator (from function address -> encoded signature). The function
+ // is supposed to be called whenever one compiles a fast API function with
+ // possibly multiple overloads.
+ // Note that this function is called from one or more compiler threads,
+ // while the main thread might be reading at the same time from the map, so
+ // both Register* and Get* are guarded with a single mutex.
+ void RegisterFunctionsAndSignatures(Address* c_functions,
+ const CFunctionInfo* const* c_signatures,
+ unsigned num_functions);
+ // The following method is used by the simulator itself to query
+ // whether a signature is registered for the call target and use this
+ // information to address arguments correctly (load them from either GP or
+ // FP registers, or from the stack).
+ const EncodedCSignature& GetSignatureForTarget(Address target);
+ // This method is exposed only for tests, which don't need synchronisation.
+ void AddSignatureForTargetForTesting(Address target,
+ const EncodedCSignature& signature) {
+ AddSignatureForTarget(target, signature);
+ }
+
+ private:
+ void AddSignatureForTarget(Address target,
+ const EncodedCSignature& signature);
+
+ v8::base::Mutex signature_map_mutex_;
+ typedef std::unordered_map<Address, EncodedCSignature> TargetToSignatureTable;
+ TargetToSignatureTable target_to_signature_table_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/simulator.h b/deps/v8/src/execution/simulator.h
index 5bf9d4612e..6b6b845e1e 100644
--- a/deps/v8/src/execution/simulator.h
+++ b/deps/v8/src/execution/simulator.h
@@ -122,9 +122,13 @@ class GeneratedCode {
// Starboard is a platform abstraction interface that also include Windows
// platforms like UWP.
#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN) && \
- !defined(V8_OS_STARBOARD)
- FATAL("Generated code execution not possible during cross-compilation.");
-#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
+ !defined(V8_OS_STARBOARD) && !defined(V8_TARGET_ARCH_ARM)
+ FATAL(
+ "Generated code execution not possible during cross-compilation."
+ "Also, generic C function calls are not implemented on 32-bit arm "
+ "yet.");
+#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN) &&
+ // !defined(V8_OS_STARBOARD) && !defined(V8_TARGET_ARCH_ARM)
return Simulator::current(isolate_)->template Call<Return>(
reinterpret_cast<Address>(fn_ptr_), args...);
}
diff --git a/deps/v8/src/execution/thread-local-top.cc b/deps/v8/src/execution/thread-local-top.cc
index c2b09c67b1..3bdeb227a8 100644
--- a/deps/v8/src/execution/thread-local-top.cc
+++ b/deps/v8/src/execution/thread-local-top.cc
@@ -31,6 +31,7 @@ void ThreadLocalTop::Clear() {
js_entry_sp_ = kNullAddress;
external_callback_scope_ = nullptr;
current_vm_state_ = EXTERNAL;
+ current_embedder_state_ = nullptr;
failed_access_check_callback_ = nullptr;
thread_in_wasm_flag_address_ = kNullAddress;
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
diff --git a/deps/v8/src/execution/thread-local-top.h b/deps/v8/src/execution/thread-local-top.h
index 236beda8a0..b072005d40 100644
--- a/deps/v8/src/execution/thread-local-top.h
+++ b/deps/v8/src/execution/thread-local-top.h
@@ -23,6 +23,7 @@ class TryCatch;
namespace internal {
+class EmbedderState;
class ExternalCallbackScope;
class Isolate;
class PromiseOnStack;
@@ -34,9 +35,9 @@ class ThreadLocalTop {
// refactor this to really consist of just Addresses and 32-bit
// integer fields.
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
- static constexpr uint32_t kSizeInBytes = 25 * kSystemPointerSize;
+ static constexpr uint32_t kSizeInBytes = 26 * kSystemPointerSize;
#else
- static constexpr uint32_t kSizeInBytes = 24 * kSystemPointerSize;
+ static constexpr uint32_t kSizeInBytes = 25 * kSystemPointerSize;
#endif
// Does early low-level initialization that does not depend on the
@@ -151,6 +152,7 @@ class ThreadLocalTop {
// The external callback we're currently in.
ExternalCallbackScope* external_callback_scope_;
StateTag current_vm_state_;
+ EmbedderState* current_embedder_state_;
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index ded34d2680..6a8eb14677 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -181,12 +181,6 @@ struct MaybeBoolFlag {
#define V8_VIRTUAL_MEMORY_CAGE_BOOL false
#endif
-#ifdef V8_CAGED_POINTERS
-#define V8_CAGED_POINTERS_BOOL true
-#else
-#define V8_CAGED_POINTERS_BOOL false
-#endif
-
// D8's MultiMappedAllocator is only available on Linux, and only if the virtual
// memory cage is not enabled.
#if V8_OS_LINUX && !V8_VIRTUAL_MEMORY_CAGE_BOOL
@@ -336,7 +330,6 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_atomics, "harmony atomics") \
V(harmony_private_brand_checks, "harmony private brand checks") \
- V(harmony_top_level_await, "harmony top level await") \
V(harmony_relative_indexing_methods, "harmony relative indexing methods") \
V(harmony_error_cause, "harmony error cause property") \
V(harmony_object_has_own, "harmony Object.hasOwn") \
@@ -344,13 +337,7 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
V(harmony_array_find_last, "harmony array find last helpers")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
- V(harmony_intl_dateformat_day_period, \
- "Add dayPeriod option to DateTimeFormat") \
- V(harmony_intl_displaynames_v2, "Intl.DisplayNames v2") \
- V(harmony_intl_more_timezone, \
- "Extend Intl.DateTimeFormat timeZoneName Option")
+#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#endif
@@ -445,8 +432,9 @@ DEFINE_NEG_IMPLICATION(enable_third_party_heap, allocation_site_pretenuring)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, turbo_allocation_folding)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, concurrent_inlining)
+DEFINE_NEG_IMPLICATION(enable_third_party_heap, script_streaming)
DEFINE_NEG_IMPLICATION(enable_third_party_heap,
- finalize_streaming_on_background)
+ parallel_compile_tasks_for_eager_toplevel)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, use_marking_progress_bar)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, move_object_start)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, concurrent_marking)
@@ -578,8 +566,6 @@ DEFINE_NEG_NEG_IMPLICATION(allocation_site_tracking,
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
-DEFINE_BOOL_READONLY(always_promote_young_mc, true,
- "always promote young objects during mark-compact")
DEFINE_INT(page_promotion_threshold, 70,
"min percentage of live bytes on a page to enable fast evacuation")
DEFINE_BOOL(trace_pretenuring, false,
@@ -716,12 +702,15 @@ DEFINE_BOOL_READONLY(concurrent_sparkplug, false,
#else
DEFINE_BOOL(concurrent_sparkplug, false,
"compile Sparkplug code in a background thread")
-#endif
-#if !MUST_WRITE_PROTECT_CODE_MEMORY
-// TODO(victorgomes): Currently concurrent compilation only works if we assume
-// no write protect in code space.
-DEFINE_NEG_IMPLICATION(concurrent_sparkplug, write_protect_code_memory)
-#endif
+DEFINE_IMPLICATION(concurrent_sparkplug, sparkplug)
+DEFINE_WEAK_IMPLICATION(future, concurrent_sparkplug)
+DEFINE_NEG_IMPLICATION(predictable, concurrent_sparkplug)
+DEFINE_NEG_IMPLICATION(single_threaded, concurrent_sparkplug)
+DEFINE_NEG_IMPLICATION(jitless, concurrent_sparkplug)
+#endif
+DEFINE_UINT(
+ concurrent_sparkplug_max_threads, 0,
+ "max number of threads that concurrent Sparkplug can use (0 for unbounded)")
#else
DEFINE_BOOL(baseline_batch_compilation, false, "batch compile Sparkplug code")
DEFINE_BOOL_READONLY(concurrent_sparkplug, false,
@@ -930,6 +919,7 @@ DEFINE_BOOL(turbo_dynamic_map_checks, false,
"if all handlers in an IC are the same for turboprop")
DEFINE_BOOL(turbo_compress_translation_arrays, false,
"compress translation arrays (experimental)")
+DEFINE_WEAK_IMPLICATION(future, turbo_inline_js_wasm_calls)
DEFINE_BOOL(turbo_inline_js_wasm_calls, false, "inline JS->Wasm calls")
DEFINE_BOOL(turbo_use_mid_tier_regalloc_for_huge_functions, false,
"fall back to the mid-tier register allocator for huge functions "
@@ -994,10 +984,6 @@ DEFINE_BOOL(wasm_tier_up, true,
"have an effect)")
DEFINE_BOOL(wasm_dynamic_tiering, false,
"enable dynamic tier up to the optimizing compiler")
-DEFINE_BOOL(new_wasm_dynamic_tiering, false, "dynamic tier up (new impl)")
-// For dynamic tiering to have an effect, we have to turn off eager tierup.
-// This is handled in module-compiler.cc for --wasm-dynamic-tiering.
-DEFINE_NEG_IMPLICATION(new_wasm_dynamic_tiering, wasm_tier_up)
DEFINE_INT(wasm_tiering_budget, 1800000,
"budget for dynamic tiering (rough approximation of bytes executed")
DEFINE_INT(
@@ -1012,6 +998,8 @@ DEFINE_DEBUG_BOOL(trace_wasm_interpreter, false,
"trace interpretation of wasm code")
DEFINE_DEBUG_BOOL(trace_wasm_streaming, false,
"trace streaming compilation of wasm code")
+DEFINE_DEBUG_BOOL(trace_wasm_stack_switching, false,
+ "trace wasm stack switching")
DEFINE_BOOL(liftoff, true,
"enable Liftoff, the baseline compiler for WebAssembly")
DEFINE_BOOL(liftoff_only, false,
@@ -1105,8 +1093,14 @@ DEFINE_BOOL(trace_wasm_inlining, false, "trace wasm inlining")
DEFINE_BOOL(trace_wasm_speculative_inlining, false,
"trace wasm speculative inlining")
DEFINE_IMPLICATION(wasm_speculative_inlining, experimental_wasm_typed_funcref)
+DEFINE_IMPLICATION(wasm_speculative_inlining, wasm_dynamic_tiering)
DEFINE_IMPLICATION(wasm_speculative_inlining, wasm_inlining)
-DEFINE_NEG_IMPLICATION(wasm_speculative_inlining, wasm_tier_up)
+DEFINE_WEAK_IMPLICATION(experimental_wasm_gc, wasm_speculative_inlining)
+// Speculative inlining needs type feedback from Liftoff and compilation in
+// Turbofan.
+DEFINE_NEG_NEG_IMPLICATION(liftoff, wasm_speculative_inlining)
+DEFINE_NEG_IMPLICATION(liftoff_only, wasm_speculative_inlining)
+
DEFINE_BOOL(wasm_loop_unrolling, true,
"enable loop unrolling for wasm functions")
DEFINE_BOOL(wasm_fuzzer_gen_test, false,
@@ -1213,6 +1207,10 @@ DEFINE_BOOL(trace_gc_freelists_verbose, false,
"prints details of freelists of each page before and after "
"each major garbage collection")
DEFINE_IMPLICATION(trace_gc_freelists_verbose, trace_gc_freelists)
+DEFINE_BOOL(trace_gc_heap_layout, false,
+ "print layout of pages in heap before and after gc")
+DEFINE_BOOL(trace_gc_heap_layout_ignore_minor_gc, true,
+ "do not print trace line before and after minor-gc")
DEFINE_BOOL(trace_evacuation_candidates, false,
"Show statistics about the pages evacuation by the compaction")
DEFINE_BOOL(
@@ -1259,7 +1257,7 @@ DEFINE_BOOL_READONLY(write_protect_code_memory, true,
#else
DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory")
#endif
-#if defined(V8_ATOMIC_MARKING_STATE) && defined(V8_ATOMIC_OBJECT_FIELD_WRITES)
+#if defined(V8_ATOMIC_OBJECT_FIELD_WRITES)
#define V8_CONCURRENT_MARKING_BOOL true
#else
#define V8_CONCURRENT_MARKING_BOOL false
@@ -1337,10 +1335,23 @@ DEFINE_INT(heap_growing_percent, 0,
"specifies heap growing factor as (1 + heap_growing_percent/100)")
DEFINE_INT(v8_os_page_size, 0, "override OS page size (in KBytes)")
DEFINE_BOOL(allocation_buffer_parking, true, "allocation buffer parking")
-DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
-DEFINE_BOOL(never_compact, false,
- "Never perform compaction on full GC - testing only")
-DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
+DEFINE_BOOL(compact, true,
+ "Perform compaction on full GCs based on V8's default heuristics")
+DEFINE_BOOL(compact_code_space, true,
+ "Perform code space compaction on full collections.")
+DEFINE_BOOL(compact_on_every_full_gc, false,
+ "Perform compaction on every full GC")
+DEFINE_BOOL(compact_with_stack, true,
+ "Perform compaction when finalizing a full GC with stack")
+DEFINE_BOOL(
+ compact_code_space_with_stack, true,
+ "Perform code space compaction when finalizing a full GC with stack")
+DEFINE_BOOL(stress_compaction, false,
+ "Stress GC compaction to flush out bugs (implies "
+ "--force_marking_deque_overflows)")
+DEFINE_BOOL(stress_compaction_random, false,
+ "Stress GC compaction by selecting random percent of pages as "
+ "evacuation candidates. Overrides stress_compaction.")
DEFINE_BOOL(flush_baseline_code, false,
"flush of baseline code when it has not been executed recently")
DEFINE_BOOL(flush_bytecode, true,
@@ -1355,12 +1366,6 @@ DEFINE_BOOL(stress_per_context_marking_worklist, false,
DEFINE_BOOL(force_marking_deque_overflows, false,
"force overflows of marking deque by reducing it's size "
"to 64 words")
-DEFINE_BOOL(stress_compaction, false,
- "stress the GC compactor to flush out bugs (implies "
- "--force_marking_deque_overflows)")
-DEFINE_BOOL(stress_compaction_random, false,
- "Stress GC compaction by selecting random percent of pages as "
- "evacuation candidates. It overrides stress_compaction.")
DEFINE_BOOL(stress_incremental_marking, false,
"force incremental marking for small heaps and run it more often")
@@ -1479,14 +1484,8 @@ DEFINE_BOOL(enable_regexp_unaligned_accesses, true,
DEFINE_BOOL(script_streaming, true, "enable parsing on background")
DEFINE_BOOL(stress_background_compile, false,
"stress test parsing on background")
-DEFINE_BOOL(
- finalize_streaming_on_background, true,
- "perform the script streaming finalization on the background thread")
DEFINE_BOOL(concurrent_cache_deserialization, true,
"enable deserializing code caches on background")
-// TODO(leszeks): Parallel compile tasks currently don't support off-thread
-// finalization.
-DEFINE_NEG_IMPLICATION(parallel_compile_tasks, finalize_streaming_on_background)
DEFINE_BOOL(disable_old_api_accessors, false,
"Disable old-style API accessors whose setters trigger through the "
"prototype chain")
@@ -1577,11 +1576,19 @@ DEFINE_BOOL(compilation_cache, true, "enable compilation cache")
DEFINE_BOOL(cache_prototype_transitions, true, "cache prototype transitions")
// lazy-compile-dispatcher.cc
-DEFINE_BOOL(parallel_compile_tasks, false, "enable parallel compile tasks")
DEFINE_BOOL(lazy_compile_dispatcher, false, "enable compiler dispatcher")
-DEFINE_IMPLICATION(parallel_compile_tasks, lazy_compile_dispatcher)
+DEFINE_UINT(lazy_compile_dispatcher_max_threads, 0,
+ "max threads for compiler dispatcher (0 for unbounded)")
DEFINE_BOOL(trace_compiler_dispatcher, false,
"trace compiler dispatcher activity")
+DEFINE_BOOL(
+ parallel_compile_tasks_for_eager_toplevel, false,
+ "spawn parallel compile tasks for eagerly compiled, top-level functions")
+DEFINE_IMPLICATION(parallel_compile_tasks_for_eager_toplevel,
+ lazy_compile_dispatcher)
+DEFINE_BOOL(parallel_compile_tasks_for_lazy, false,
+ "spawn parallel compile tasks for all lazily compiled functions")
+DEFINE_IMPLICATION(parallel_compile_tasks_for_lazy, lazy_compile_dispatcher)
// cpu-profiler.cc
DEFINE_INT(cpu_profiler_sampling_interval, 1000,
@@ -1716,9 +1723,6 @@ DEFINE_BOOL(correctness_fuzzer_suppressions, false,
"fuzzing: Abort program when the stack overflows or a string "
"exceeds maximum length (as opposed to throwing RangeError). "
"Use a fixed suppression string for error messages.")
-DEFINE_BOOL(randomize_hashes, true,
- "randomize hashes to avoid predictable hash collisions "
- "(with snapshots this option cannot override the baked-in seed)")
DEFINE_BOOL(rehash_snapshot, true,
"rehash strings from the snapshot to override the baked-in seed")
DEFINE_UINT64(hash_seed, 0,
@@ -1742,12 +1746,9 @@ DEFINE_BOOL(experimental_flush_embedded_blob_icache, true,
"Used in an experiment to evaluate icache flushing on certain CPUs")
// Flags for short builtin calls feature
-#undef FLAG
#if V8_SHORT_BUILTIN_CALLS
-#define FLAG FLAG_FULL
#define V8_SHORT_BUILTIN_CALLS_BOOL true
#else
-#define FLAG FLAG_READONLY
#define V8_SHORT_BUILTIN_CALLS_BOOL false
#endif
@@ -1755,9 +1756,6 @@ DEFINE_BOOL(short_builtin_calls, V8_SHORT_BUILTIN_CALLS_BOOL,
"Put embedded builtins code into the code range for shorter "
"builtin calls/jumps if system has >=4GB memory")
-#undef FLAG
-#define FLAG FLAG_FULL
-
// runtime.cc
DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
DEFINE_GENERIC_IMPLICATION(
@@ -1772,8 +1770,9 @@ DEFINE_BOOL(rcs_cpu_time, false,
DEFINE_IMPLICATION(rcs_cpu_time, rcs)
// snapshot-common.cc
-DEFINE_BOOL(skip_snapshot_checksum, false,
- "Skip snapshot checksum calculation when deserializing an Isolate.")
+DEFINE_BOOL(verify_snapshot_checksum, true,
+ "Verify snapshot checksums when deserializing snapshots. Enable "
+ "checksum creation and verification for code caches.")
DEFINE_BOOL(profile_deserialization, false,
"Print the time it takes to deserialize the snapshot.")
DEFINE_BOOL(serialization_statistics, false,
@@ -1885,13 +1884,10 @@ DEFINE_BOOL_READONLY(minor_mc, false,
DEFINE_BOOL(help, false, "Print usage message, including flags, on console")
DEFINE_BOOL(print_flag_values, false, "Print all flag values of V8")
-DEFINE_BOOL(dump_counters, false, "Dump counters on exit")
+// Slow histograms are also enabled via --dump-counters in d8.
DEFINE_BOOL(slow_histograms, false,
"Enable slow histograms with more overhead.")
-DEFINE_IMPLICATION(dump_counters, slow_histograms)
-DEFINE_BOOL(dump_counters_nvp, false,
- "Dump counters as name-value pairs on exit")
DEFINE_BOOL(use_external_strings, false, "Use external strings for source code")
DEFINE_STRING(map_counters, "", "Map counters to a file")
DEFINE_BOOL(mock_arraybuffer_allocator, false,
@@ -2179,8 +2175,10 @@ DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
// before. Audit them, and remove any unneeded implications.
DEFINE_IMPLICATION(predictable, single_threaded_gc)
DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
-DEFINE_NEG_IMPLICATION(predictable, lazy_compile_dispatcher)
DEFINE_NEG_IMPLICATION(predictable, stress_concurrent_inlining)
+DEFINE_NEG_IMPLICATION(predictable, lazy_compile_dispatcher)
+DEFINE_NEG_IMPLICATION(predictable, parallel_compile_tasks_for_eager_toplevel)
+DEFINE_NEG_IMPLICATION(predictable, parallel_compile_tasks_for_lazy)
DEFINE_BOOL(predictable_gc_schedule, false,
"Predictable garbage collection schedule. Fixes heap growing, "
@@ -2197,8 +2195,11 @@ DEFINE_NEG_IMPLICATION(predictable_gc_schedule, memory_reducer)
DEFINE_BOOL(single_threaded, false, "disable the use of background tasks")
DEFINE_IMPLICATION(single_threaded, single_threaded_gc)
DEFINE_NEG_IMPLICATION(single_threaded, concurrent_recompilation)
-DEFINE_NEG_IMPLICATION(single_threaded, lazy_compile_dispatcher)
DEFINE_NEG_IMPLICATION(single_threaded, stress_concurrent_inlining)
+DEFINE_NEG_IMPLICATION(single_threaded, lazy_compile_dispatcher)
+DEFINE_NEG_IMPLICATION(single_threaded,
+ parallel_compile_tasks_for_eager_toplevel)
+DEFINE_NEG_IMPLICATION(single_threaded, parallel_compile_tasks_for_lazy)
//
// Parallel and concurrent GC (Orinoco) related flags.
diff --git a/deps/v8/src/handles/handles-inl.h b/deps/v8/src/handles/handles-inl.h
index 4c1817e80d..c0dab51de8 100644
--- a/deps/v8/src/handles/handles-inl.h
+++ b/deps/v8/src/handles/handles-inl.h
@@ -95,7 +95,7 @@ HandleScope::HandleScope(HandleScope&& other) V8_NOEXCEPT
}
HandleScope::~HandleScope() {
- if (isolate_ == nullptr) return;
+ if (V8_UNLIKELY(isolate_ == nullptr)) return;
CloseScope(isolate_, prev_next_, prev_limit_);
}
@@ -123,7 +123,7 @@ void HandleScope::CloseScope(Isolate* isolate, Address* prev_next,
std::swap(current->next, prev_next);
current->level--;
Address* limit = prev_next;
- if (current->limit != prev_limit) {
+ if (V8_UNLIKELY(current->limit != prev_limit)) {
current->limit = prev_limit;
limit = prev_limit;
DeleteExtensions(isolate);
diff --git a/deps/v8/src/handles/handles.h b/deps/v8/src/handles/handles.h
index 166b7ee4ab..3bde90f81f 100644
--- a/deps/v8/src/handles/handles.h
+++ b/deps/v8/src/handles/handles.h
@@ -199,7 +199,7 @@ inline std::ostream& operator<<(std::ostream& os, Handle<T> handle);
// for which the handle scope has been deleted is undefined.
class V8_NODISCARD HandleScope {
public:
- explicit inline HandleScope(Isolate* isolate);
+ explicit V8_INLINE HandleScope(Isolate* isolate);
inline HandleScope(HandleScope&& other) V8_NOEXCEPT;
HandleScope(const HandleScope&) = delete;
HandleScope& operator=(const HandleScope&) = delete;
@@ -213,7 +213,7 @@ class V8_NODISCARD HandleScope {
void* operator new(size_t size) = delete;
void operator delete(void* size_t) = delete;
- inline ~HandleScope();
+ V8_INLINE ~HandleScope();
inline HandleScope& operator=(HandleScope&& other) V8_NOEXCEPT;
@@ -253,8 +253,8 @@ class V8_NODISCARD HandleScope {
Address* prev_limit_;
// Close the handle scope resetting limits to a previous state.
- static inline void CloseScope(Isolate* isolate, Address* prev_next,
- Address* prev_limit);
+ static V8_INLINE void CloseScope(Isolate* isolate, Address* prev_next,
+ Address* prev_limit);
// Extend the handle scope making room for more handles.
V8_EXPORT_PRIVATE static Address* Extend(Isolate* isolate);
diff --git a/deps/v8/src/heap/allocation-observer.cc b/deps/v8/src/heap/allocation-observer.cc
index 94d5a2f833..d25734e349 100644
--- a/deps/v8/src/heap/allocation-observer.cc
+++ b/deps/v8/src/heap/allocation-observer.cc
@@ -60,8 +60,8 @@ void AllocationCounter::RemoveAllocationObserver(AllocationObserver* observer) {
} else {
size_t step_size = 0;
- for (AllocationObserverCounter& observer : observers_) {
- size_t left_in_step = observer.next_counter_ - current_counter_;
+ for (AllocationObserverCounter& observer_counter : observers_) {
+ size_t left_in_step = observer_counter.next_counter_ - current_counter_;
DCHECK_GT(left_in_step, 0);
step_size = step_size ? std::min(step_size, left_in_step) : left_in_step;
}
diff --git a/deps/v8/src/heap/base/worklist.h b/deps/v8/src/heap/base/worklist.h
index e2d33616ad..70c8a4f1ba 100644
--- a/deps/v8/src/heap/base/worklist.h
+++ b/deps/v8/src/heap/base/worklist.h
@@ -52,14 +52,17 @@ class Worklist {
bool Pop(Segment** segment);
// Returns true if the list of segments is empty.
- bool IsEmpty();
+ bool IsEmpty() const;
// Returns the number of segments in the list.
- size_t Size();
+ size_t Size() const;
// Moves the segments of the given marking worklist into this
// marking worklist.
void Merge(Worklist<EntryType, SegmentSize>* other);
+ // Swaps the segments with the given marking worklist.
+ void Swap(Worklist<EntryType, SegmentSize>* other);
+
// These functions are not thread-safe. They should be called only
// if all local marking worklists that use the current worklist have
// been published and are empty.
@@ -100,13 +103,13 @@ bool Worklist<EntryType, SegmentSize>::Pop(Segment** segment) {
}
template <typename EntryType, uint16_t SegmentSize>
-bool Worklist<EntryType, SegmentSize>::IsEmpty() {
+bool Worklist<EntryType, SegmentSize>::IsEmpty() const {
return v8::base::AsAtomicPtr(&top_)->load(std::memory_order_relaxed) ==
nullptr;
}
template <typename EntryType, uint16_t SegmentSize>
-size_t Worklist<EntryType, SegmentSize>::Size() {
+size_t Worklist<EntryType, SegmentSize>::Size() const {
// It is safe to read |size_| without a lock since this variable is
// atomic, keeping in mind that threads may not immediately see the new
// value when it is updated.
@@ -191,6 +194,17 @@ void Worklist<EntryType, SegmentSize>::Merge(
}
template <typename EntryType, uint16_t SegmentSize>
+void Worklist<EntryType, SegmentSize>::Swap(
+ Worklist<EntryType, SegmentSize>* other) {
+ Segment* top = top_;
+ set_top(other->top_);
+ other->set_top(top);
+ size_t other_size = other->size_.exchange(
+ size_.load(std::memory_order_relaxed), std::memory_order_relaxed);
+ size_.store(other_size, std::memory_order_relaxed);
+}
+
+template <typename EntryType, uint16_t SegmentSize>
class Worklist<EntryType, SegmentSize>::Segment : public internal::SegmentBase {
public:
static const uint16_t kSize = SegmentSize;
@@ -214,14 +228,14 @@ class Worklist<EntryType, SegmentSize>::Segment : public internal::SegmentBase {
friend class Worklist<EntryType, SegmentSize>::Local;
- FRIEND_TEST(CppgcWorkListTest, SegmentCreate);
- FRIEND_TEST(CppgcWorkListTest, SegmentPush);
- FRIEND_TEST(CppgcWorkListTest, SegmentPushPop);
- FRIEND_TEST(CppgcWorkListTest, SegmentIsEmpty);
- FRIEND_TEST(CppgcWorkListTest, SegmentIsFull);
- FRIEND_TEST(CppgcWorkListTest, SegmentClear);
- FRIEND_TEST(CppgcWorkListTest, SegmentUpdateFalse);
- FRIEND_TEST(CppgcWorkListTest, SegmentUpdate);
+ FRIEND_TEST(WorkListTest, SegmentCreate);
+ FRIEND_TEST(WorkListTest, SegmentPush);
+ FRIEND_TEST(WorkListTest, SegmentPushPop);
+ FRIEND_TEST(WorkListTest, SegmentIsEmpty);
+ FRIEND_TEST(WorkListTest, SegmentIsFull);
+ FRIEND_TEST(WorkListTest, SegmentClear);
+ FRIEND_TEST(WorkListTest, SegmentUpdateFalse);
+ FRIEND_TEST(WorkListTest, SegmentUpdate);
};
template <typename EntryType, uint16_t SegmentSize>
diff --git a/deps/v8/src/heap/code-range.cc b/deps/v8/src/heap/code-range.cc
index 5d5f3f3014..5c5911d676 100644
--- a/deps/v8/src/heap/code-range.cc
+++ b/deps/v8/src/heap/code-range.cc
@@ -124,8 +124,16 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
: VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
params.base_bias_size = RoundUp(reserved_area, allocate_page_size);
params.page_size = MemoryChunk::kPageSize;
+ // V8_EXTERNAL_CODE_SPACE imposes additional alignment requirement for the
+ // base address, so make sure the hint calculation function takes that into
+ // account. Otherwise the allocated reservation might be outside of the
+ // preferred region (see Isolate::GetShortBuiltinsCallRegion()).
+ const size_t hint_alignment =
+ V8_EXTERNAL_CODE_SPACE_BOOL
+ ? RoundUp(params.base_alignment, allocate_page_size)
+ : allocate_page_size;
params.requested_start_hint =
- GetCodeRangeAddressHint()->GetAddressHint(requested, allocate_page_size);
+ GetCodeRangeAddressHint()->GetAddressHint(requested, hint_alignment);
if (!VirtualMemoryCage::InitReservation(params)) return false;
diff --git a/deps/v8/src/heap/code-range.h b/deps/v8/src/heap/code-range.h
index 10e0bd5718..4fcea5f26f 100644
--- a/deps/v8/src/heap/code-range.h
+++ b/deps/v8/src/heap/code-range.h
@@ -21,7 +21,7 @@ namespace internal {
class CodeRangeAddressHint {
public:
// When near code range is enabled, an address within
- // kShortBuiltinCallsBoundary to the embedded blob is returned if
+ // kMaxPCRelativeCodeRangeInMB to the embedded blob is returned if
// there is enough space. Otherwise a random address is returned.
// When near code range is disabled, returns the most recently freed code
// range start address for the given size. If there is no such entry, then a
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index abca2c75f9..1fff3e9484 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -17,21 +17,22 @@ namespace internal {
// Record code statisitcs.
void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject object,
Isolate* isolate) {
- if (object.IsScript()) {
+ PtrComprCageBase cage_base(isolate);
+ if (object.IsScript(cage_base)) {
Script script = Script::cast(object);
// Log the size of external source code.
- Object source = script.source();
- if (source.IsExternalString()) {
+ Object source = script.source(cage_base);
+ if (source.IsExternalString(cage_base)) {
ExternalString external_source_string = ExternalString::cast(source);
int size = isolate->external_script_source_size();
size += external_source_string.ExternalPayloadSize();
isolate->set_external_script_source_size(size);
}
- } else if (object.IsAbstractCode()) {
+ } else if (object.IsAbstractCode(cage_base)) {
// Record code+metadata statisitcs.
AbstractCode abstract_code = AbstractCode::cast(object);
int size = abstract_code.SizeIncludingMetadata();
- if (abstract_code.IsCode()) {
+ if (abstract_code.IsCode(cage_base)) {
size += isolate->code_and_metadata_size();
isolate->set_code_and_metadata_size(size);
} else {
@@ -42,7 +43,7 @@ void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject object,
#ifdef DEBUG
// Record code kind and code comment statistics.
isolate->code_kind_statistics()[static_cast<int>(abstract_code.kind())] +=
- abstract_code.Size();
+ abstract_code.Size(cage_base);
CodeStatistics::CollectCodeCommentStatistics(object, isolate);
#endif
}
diff --git a/deps/v8/src/heap/collection-barrier.cc b/deps/v8/src/heap/collection-barrier.cc
index 3a1a1e5947..3cf8f41c43 100644
--- a/deps/v8/src/heap/collection-barrier.cc
+++ b/deps/v8/src/heap/collection-barrier.cc
@@ -22,14 +22,17 @@ bool CollectionBarrier::WasGCRequested() {
return collection_requested_.load();
}
-void CollectionBarrier::RequestGC() {
+bool CollectionBarrier::TryRequestGC() {
base::MutexGuard guard(&mutex_);
+ if (shutdown_requested_) return false;
bool was_already_requested = collection_requested_.exchange(true);
if (!was_already_requested) {
CHECK(!timer_.IsStarted());
timer_.Start();
}
+
+ return true;
}
class BackgroundCollectionInterruptTask : public CancelableTask {
@@ -59,8 +62,19 @@ void CollectionBarrier::NotifyShutdownRequested() {
void CollectionBarrier::ResumeThreadsAwaitingCollection() {
base::MutexGuard guard(&mutex_);
+ DCHECK(!timer_.IsStarted());
+ collection_requested_.store(false);
+ block_for_collection_ = false;
+ collection_performed_ = true;
+ cv_wakeup_.NotifyAll();
+}
+
+void CollectionBarrier::CancelCollectionAndResumeThreads() {
+ base::MutexGuard guard(&mutex_);
+ if (timer_.IsStarted()) timer_.Stop();
collection_requested_.store(false);
block_for_collection_ = false;
+ collection_performed_ = false;
cv_wakeup_.NotifyAll();
}
@@ -72,6 +86,10 @@ bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
// set before the next GC.
base::MutexGuard guard(&mutex_);
if (shutdown_requested_) return false;
+
+ // Collection was cancelled by the main thread.
+ if (!collection_requested_.load()) return false;
+
first_thread = !block_for_collection_;
block_for_collection_ = true;
CHECK(timer_.IsStarted());
@@ -88,7 +106,8 @@ bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
cv_wakeup_.Wait(&mutex_);
}
- return true;
+ // Collection may have been cancelled while blocking for it.
+ return collection_performed_;
}
void CollectionBarrier::ActivateStackGuardAndPostTask() {
diff --git a/deps/v8/src/heap/collection-barrier.h b/deps/v8/src/heap/collection-barrier.h
index ee7fd33ad1..fd894324a6 100644
--- a/deps/v8/src/heap/collection-barrier.h
+++ b/deps/v8/src/heap/collection-barrier.h
@@ -27,8 +27,10 @@ class CollectionBarrier {
// Returns true when collection was requested.
bool WasGCRequested();
- // Requests a GC from the main thread.
- void RequestGC();
+ // Requests a GC from the main thread. Returns whether GC was successfully
+ // requested. Requesting a GC can fail when isolate shutdown was already
+ // initiated.
+ bool TryRequestGC();
// Resumes all threads waiting for GC when tear down starts.
void NotifyShutdownRequested();
@@ -39,7 +41,11 @@ class CollectionBarrier {
// Resumes threads waiting for collection.
void ResumeThreadsAwaitingCollection();
+ // Cancels collection if one was requested and resumes threads waiting for GC.
+ void CancelCollectionAndResumeThreads();
+
// This is the method use by background threads to request and wait for GC.
+ // Returns whether a GC was performed.
bool AwaitCollectionBackground(LocalHeap* local_heap);
private:
@@ -50,8 +56,21 @@ class CollectionBarrier {
base::Mutex mutex_;
base::ConditionVariable cv_wakeup_;
base::ElapsedTimer timer_;
+
+ // Flag that main thread checks whether a GC was requested from the background
+ // thread.
std::atomic<bool> collection_requested_{false};
+
+ // This flag is used to detect whether to block for the GC. Only set if the
+ // main thread was actually running and is unset when GC resumes background
+ // threads.
bool block_for_collection_ = false;
+
+ // Set to true when a GC was performed, false in case it was canceled because
+ // the main thread parked itself without running the GC.
+ bool collection_performed_ = false;
+
+ // Will be set as soon as Isolate starts tear down.
bool shutdown_requested_ = false;
};
diff --git a/deps/v8/src/heap/concurrent-allocator.cc b/deps/v8/src/heap/concurrent-allocator.cc
index 6f4bd625c6..bfdfaea7fe 100644
--- a/deps/v8/src/heap/concurrent-allocator.cc
+++ b/deps/v8/src/heap/concurrent-allocator.cc
@@ -36,7 +36,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
AllocationResult result = local_heap.AllocateRaw(
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
- AllocationAlignment::kWordAligned);
+ AllocationAlignment::kTaggedAligned);
if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kSmallObjectSize,
@@ -47,7 +47,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld,
AllocationOrigin::kRuntime,
- AllocationAlignment::kWordAligned);
+ AllocationAlignment::kTaggedAligned);
if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kMediumObjectSize,
@@ -58,7 +58,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
AllocationOrigin::kRuntime,
- AllocationAlignment::kWordAligned);
+ AllocationAlignment::kTaggedAligned);
if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kLargeObjectSize,
@@ -81,10 +81,22 @@ void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) {
}
void ConcurrentAllocator::FreeLinearAllocationArea() {
+ // The code page of the linear allocation area needs to be unprotected
+ // because we are going to write a filler into that memory area below.
+ base::Optional<CodePageMemoryModificationScope> optional_scope;
+ if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
+ optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
+ }
lab_.CloseAndMakeIterable();
}
void ConcurrentAllocator::MakeLinearAllocationAreaIterable() {
+ // The code page of the linear allocation area needs to be unprotected
+ // because we are going to write a filler into that memory area below.
+ base::Optional<CodePageMemoryModificationScope> optional_scope;
+ if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
+ optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
+ }
lab_.MakeIterable();
}
@@ -110,7 +122,7 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
AllocationResult ConcurrentAllocator::AllocateInLabSlow(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
if (!EnsureLab(origin)) {
- return AllocationResult::Retry(OLD_SPACE);
+ return AllocationResult::Retry(space_->identity());
}
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
@@ -121,10 +133,10 @@ AllocationResult ConcurrentAllocator::AllocateInLabSlow(
bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground(
- local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin);
+ local_heap_, kLabSize, kMaxLabSize, kTaggedAligned, origin);
if (!result) return false;
- if (local_heap_->heap()->incremental_marking()->black_allocation()) {
+ if (IsBlackAllocationEnabled()) {
Address top = result->first;
Address limit = top + result->second;
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
@@ -145,17 +157,23 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground(local_heap_, object_size,
object_size, alignment, origin);
- if (!result) return AllocationResult::Retry(OLD_SPACE);
+ if (!result) return AllocationResult::Retry(space_->identity());
HeapObject object = HeapObject::FromAddress(result->first);
- if (local_heap_->heap()->incremental_marking()->black_allocation()) {
- local_heap_->heap()->incremental_marking()->MarkBlackBackground(
- object, object_size);
+ if (IsBlackAllocationEnabled()) {
+ owning_heap()->incremental_marking()->MarkBlackBackground(object,
+ object_size);
}
return AllocationResult(object);
}
+bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
+ return owning_heap()->incremental_marking()->black_allocation();
+}
+
+Heap* ConcurrentAllocator::owning_heap() const { return space_->heap(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/concurrent-allocator.h b/deps/v8/src/heap/concurrent-allocator.h
index fe6144eb7e..bf596cf6de 100644
--- a/deps/v8/src/heap/concurrent-allocator.h
+++ b/deps/v8/src/heap/concurrent-allocator.h
@@ -63,6 +63,12 @@ class ConcurrentAllocator {
V8_EXPORT_PRIVATE AllocationResult AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin);
+ bool IsBlackAllocationEnabled() const;
+
+ // Returns the Heap of space_. This might differ from the LocalHeap's Heap for
+ // shared spaces.
+ Heap* owning_heap() const;
+
LocalHeap* const local_heap_;
PagedSpace* const space_;
LocalAllocationBuffer lab_;
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index fc19fe3f0d..f806c4eca6 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -23,7 +23,7 @@
#include "src/heap/memory-measurement.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
-#include "src/heap/worklist.h"
+#include "src/heap/weak-object-worklists.h"
#include "src/init/v8.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/embedder-data-array-inl.h"
@@ -41,8 +41,9 @@ namespace internal {
class ConcurrentMarkingState final
: public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
public:
- explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
- : memory_chunk_data_(memory_chunk_data) {}
+ ConcurrentMarkingState(PtrComprCageBase cage_base,
+ MemoryChunkDataMap* memory_chunk_data)
+ : MarkingStateBase(cage_base), memory_chunk_data_(memory_chunk_data) {}
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const BasicMemoryChunk* chunk) {
return chunk->marking_bitmap<AccessMode::ATOMIC>();
@@ -85,17 +86,17 @@ class ConcurrentMarkingVisitor final
public:
ConcurrentMarkingVisitor(int task_id,
MarkingWorklists::Local* local_marking_worklists,
- WeakObjects* weak_objects, Heap* heap,
+ WeakObjects::Local* local_weak_objects, Heap* heap,
unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
bool embedder_tracing_enabled,
bool should_keep_ages_unchanged,
MemoryChunkDataMap* memory_chunk_data)
- : MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
+ : MarkingVisitorBase(local_marking_worklists, local_weak_objects, heap,
mark_compact_epoch, code_flush_mode,
embedder_tracing_enabled,
should_keep_ages_unchanged),
- marking_state_(memory_chunk_data),
+ marking_state_(heap->isolate(), memory_chunk_data),
memory_chunk_data_(memory_chunk_data) {}
template <typename T>
@@ -119,12 +120,19 @@ class ConcurrentMarkingVisitor final
int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
return VisitJSObjectSubclass(map, object);
}
+ int VisitWasmSuspenderObject(Map map, WasmSuspenderObject object) {
+ return VisitJSObjectSubclass(map, object);
+ }
#endif // V8_ENABLE_WEBASSEMBLY
int VisitJSWeakCollection(Map map, JSWeakCollection object) {
return VisitJSObjectSubclass(map, object);
}
+ int VisitJSFinalizationRegistry(Map map, JSFinalizationRegistry object) {
+ return VisitJSObjectSubclass(map, object);
+ }
+
int VisitConsString(Map map, ConsString object) {
return VisitFullyWithSnapshot(map, object);
}
@@ -159,7 +167,7 @@ class ConcurrentMarkingVisitor final
}
} else if (marking_state_.IsWhite(value)) {
- weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
+ local_weak_objects_->next_ephemerons_local.Push(Ephemeron{key, value});
}
return false;
}
@@ -169,6 +177,10 @@ class ConcurrentMarkingVisitor final
return marking_state_.GreyToBlack(object);
}
+ bool ShouldVisitUnaccounted(HeapObject object) {
+ return marking_state_.GreyToBlackUnaccounted(object);
+ }
+
private:
// Helper class for collecting in-object slot addresses and values.
class SlotSnapshottingVisitor final : public ObjectVisitorWithCageBases {
@@ -204,19 +216,21 @@ class ConcurrentMarkingVisitor final
void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
// This should never happen, because snapshotting is performed only on
- // JSObjects (and derived classes).
+ // some String subclasses.
UNREACHABLE();
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
// This should never happen, because snapshotting is performed only on
- // JSObjects (and derived classes).
+ // some String subclasses.
UNREACHABLE();
}
void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
- DCHECK(host.IsWeakCell() || host.IsJSWeakRef());
+ // This should never happen, because snapshotting is performed only on
+ // some String subclasses.
+ UNREACHABLE();
}
private:
@@ -248,11 +262,15 @@ class ConcurrentMarkingVisitor final
// The length() function checks that the length is a Smi.
// This is not necessarily the case if the array is being left-trimmed.
Object length = object.unchecked_length(kAcquireLoad);
- if (!ShouldVisit(object)) return 0;
+ // No accounting here to avoid re-reading the length which could already
+ // contain a non-SMI value when left-trimming happens concurrently.
+ if (!ShouldVisitUnaccounted(object)) return 0;
// The cached length must be the actual length as the array is not black.
// Left trimming marks the array black before over-writing the length.
DCHECK(length.IsSmi());
int size = T::SizeFor(Smi::ToInt(length));
+ marking_state_.IncrementLiveBytes(MemoryChunk::FromHeapObject(object),
+ size);
VisitMapPointer(object);
T::BodyDescriptor::IterateBody(map, object, size, this);
return size;
@@ -419,10 +437,6 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
: heap_(heap),
marking_worklists_(marking_worklists),
weak_objects_(weak_objects) {
-#ifndef V8_ATOMIC_MARKING_STATE
- // Concurrent and parallel marking require atomic marking state.
- CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
-#endif
#ifndef V8_ATOMIC_OBJECT_FIELD_WRITES
// Concurrent marking requires atomic object field writes.
CHECK(!FLAG_concurrent_marking);
@@ -438,8 +452,9 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
uint8_t task_id = delegate->GetTaskId() + 1;
TaskState* task_state = &task_state_[task_id];
MarkingWorklists::Local local_marking_worklists(marking_worklists_);
+ WeakObjects::Local local_weak_objects(weak_objects_);
ConcurrentMarkingVisitor visitor(
- task_id, &local_marking_worklists, weak_objects_, heap_,
+ task_id, &local_marking_worklists, &local_weak_objects, heap_,
mark_compact_epoch, code_flush_mode,
heap_->local_embedder_heap_tracer()->InUse(), should_keep_ages_unchanged,
&task_state->memory_chunk_data);
@@ -460,8 +475,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
{
Ephemeron ephemeron;
-
- while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
+ while (local_weak_objects.current_ephemerons_local.Pop(&ephemeron)) {
if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
another_ephemeron_iteration = true;
}
@@ -529,8 +543,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
if (done) {
Ephemeron ephemeron;
-
- while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
+ while (local_weak_objects.discovered_ephemerons_local.Pop(&ephemeron)) {
if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
another_ephemeron_iteration = true;
}
@@ -538,18 +551,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
}
local_marking_worklists.Publish();
- weak_objects_->transition_arrays.FlushToGlobal(task_id);
- weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
- weak_objects_->current_ephemerons.FlushToGlobal(task_id);
- weak_objects_->next_ephemerons.FlushToGlobal(task_id);
- weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
- weak_objects_->weak_references.FlushToGlobal(task_id);
- weak_objects_->js_weak_refs.FlushToGlobal(task_id);
- weak_objects_->weak_cells.FlushToGlobal(task_id);
- weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
- weak_objects_->code_flushing_candidates.FlushToGlobal(task_id);
- weak_objects_->baseline_flushing_candidates.FlushToGlobal(task_id);
- weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
+ local_weak_objects.Publish();
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes;
@@ -570,10 +572,10 @@ size_t ConcurrentMarking::GetMaxConcurrency(size_t worker_count) {
marking_items += worklist.worklist->Size();
return std::min<size_t>(
kMaxTasks,
- worker_count + std::max<size_t>(
- {marking_items,
- weak_objects_->discovered_ephemerons.GlobalPoolSize(),
- weak_objects_->current_ephemerons.GlobalPoolSize()}));
+ worker_count +
+ std::max<size_t>({marking_items,
+ weak_objects_->discovered_ephemerons.Size(),
+ weak_objects_->current_ephemerons.Size()}));
}
void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
@@ -594,8 +596,8 @@ void ConcurrentMarking::RescheduleJobIfNeeded(TaskPriority priority) {
if (heap_->IsTearingDown()) return;
if (marking_worklists_->shared()->IsEmpty() &&
- weak_objects_->current_ephemerons.IsGlobalPoolEmpty() &&
- weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
+ weak_objects_->current_ephemerons.IsEmpty() &&
+ weak_objects_->discovered_ephemerons.IsEmpty()) {
return;
}
if (!job_handle_ || !job_handle_->IsValid()) {
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 12ee70da56..caba9450b5 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -16,7 +16,6 @@
#include "src/heap/memory-measurement.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
-#include "src/heap/worklist.h"
#include "src/init/v8.h"
#include "src/tasks/cancelable-task.h"
#include "src/utils/allocation.h"
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index 6a7173a478..15737881ef 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -10,6 +10,7 @@
#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/platform.h"
+#include "include/v8-isolate.h"
#include "include/v8-local-handle.h"
#include "include/v8-platform.h"
#include "src/base/logging.h"
@@ -18,6 +19,7 @@
#include "src/base/platform/time.h"
#include "src/execution/isolate-inl.h"
#include "src/flags/flags.h"
+#include "src/handles/global-handles.h"
#include "src/handles/handles.h"
#include "src/heap/base/stack.h"
#include "src/heap/cppgc-js/cpp-snapshot.h"
@@ -220,10 +222,8 @@ void UnifiedHeapMarker::AddObject(void* object) {
void FatalOutOfMemoryHandlerImpl(const std::string& reason,
const SourceLocation&, HeapBase* heap) {
- FatalProcessOutOfMemory(
- reinterpret_cast<v8::internal::Isolate*>(
- static_cast<v8::internal::CppHeap*>(heap)->isolate()),
- reason.c_str());
+ FatalProcessOutOfMemory(static_cast<v8::internal::CppHeap*>(heap)->isolate(),
+ reason.c_str());
}
} // namespace
@@ -330,7 +330,9 @@ CppHeap::CppHeap(
: cppgc::internal::HeapBase(
std::make_shared<CppgcPlatformAdapter>(platform), custom_spaces,
cppgc::internal::HeapBase::StackSupport::
- kSupportsConservativeStackScan),
+ kSupportsConservativeStackScan,
+ cppgc::internal::HeapBase::MarkingType::kIncrementalAndConcurrent,
+ cppgc::internal::HeapBase::SweepingType::kIncrementalAndConcurrent),
wrapper_descriptor_(wrapper_descriptor) {
CHECK_NE(WrapperDescriptor::kUnknownEmbedderId,
wrapper_descriptor_.embedder_id_for_garbage_collected);
@@ -363,11 +365,8 @@ void CppHeap::AttachIsolate(Isolate* isolate) {
isolate_->heap_profiler()->AddBuildEmbedderGraphCallback(
&CppGraphBuilder::Run, this);
}
- isolate_->heap()->SetEmbedderHeapTracer(this);
- isolate_->heap()->local_embedder_heap_tracer()->SetWrapperDescriptor(
- wrapper_descriptor_);
SetMetricRecorder(std::make_unique<MetricRecorderAdapter>(*this));
- SetStackStart(base::Stack::GetStackStart());
+ isolate_->global_handles()->SetStackStart(base::Stack::GetStackStart());
oom_handler().SetCustomHandler(&FatalOutOfMemoryHandlerImpl);
no_gc_scope_--;
}
@@ -379,17 +378,20 @@ void CppHeap::DetachIsolate() {
// Delegate to existing EmbedderHeapTracer API to finish any ongoing garbage
// collection.
- FinalizeTracing();
+ if (isolate_->heap()->incremental_marking()->IsMarking()) {
+ isolate_->heap()->FinalizeIncrementalMarkingAtomically(
+ i::GarbageCollectionReason::kExternalFinalize);
+ }
sweeper_.FinishIfRunning();
- if (isolate_->heap_profiler()) {
- isolate_->heap_profiler()->RemoveBuildEmbedderGraphCallback(
- &CppGraphBuilder::Run, this);
+ auto* heap_profiler = isolate_->heap_profiler();
+ if (heap_profiler) {
+ heap_profiler->RemoveBuildEmbedderGraphCallback(&CppGraphBuilder::Run,
+ this);
}
SetMetricRecorder(nullptr);
isolate_ = nullptr;
// Any future garbage collections will ignore the V8->C++ references.
- isolate()->SetEmbedderHeapTracer(nullptr);
oom_handler().SetCustomHandler(nullptr);
// Enter no GC scope.
no_gc_scope_++;
@@ -408,36 +410,42 @@ void CppHeap::RegisterV8References(
namespace {
-bool ShouldReduceMemory(CppHeap::TraceFlags flags) {
- return (flags == CppHeap::TraceFlags::kReduceMemory) ||
- (flags == CppHeap::TraceFlags::kForced);
+bool IsMemoryReducingGC(CppHeap::GarbageCollectionFlags flags) {
+ return flags & CppHeap::GarbageCollectionFlagValues::kReduceMemory;
}
-} // namespace
+bool IsForceGC(CppHeap::GarbageCollectionFlags flags) {
+ return flags & CppHeap::GarbageCollectionFlagValues::kForced;
+}
-void CppHeap::TracePrologue(TraceFlags flags) {
+bool ShouldReduceMemory(CppHeap::GarbageCollectionFlags flags) {
+ return IsMemoryReducingGC(flags) || IsForceGC(flags);
+}
+
+} // namespace
+void CppHeap::TracePrologue(GarbageCollectionFlags gc_flags) {
CHECK(!sweeper_.IsSweepingInProgress());
#if defined(CPPGC_YOUNG_GENERATION)
cppgc::internal::SequentialUnmarker unmarker(raw_heap());
#endif // defined(CPPGC_YOUNG_GENERATION)
- current_flags_ = flags;
+ current_gc_flags_ = gc_flags;
+
const UnifiedHeapMarker::MarkingConfig marking_config{
UnifiedHeapMarker::MarkingConfig::CollectionType::kMajor,
cppgc::Heap::StackState::kNoHeapPointers,
- ((current_flags_ & TraceFlags::kForced) &&
- !force_incremental_marking_for_testing_)
+ (IsForceGC(current_gc_flags_) && !force_incremental_marking_for_testing_)
? UnifiedHeapMarker::MarkingConfig::MarkingType::kAtomic
: UnifiedHeapMarker::MarkingConfig::MarkingType::
kIncrementalAndConcurrent,
- flags & TraceFlags::kForced
+ IsForceGC(current_gc_flags_)
? UnifiedHeapMarker::MarkingConfig::IsForcedGC::kForced
: UnifiedHeapMarker::MarkingConfig::IsForcedGC::kNotForced};
DCHECK_IMPLIES(!isolate_, (cppgc::Heap::MarkingType::kAtomic ==
marking_config.marking_type) ||
force_incremental_marking_for_testing_);
- if (ShouldReduceMemory(flags)) {
+ if (ShouldReduceMemory(current_gc_flags_)) {
// Only enable compaction when in a memory reduction garbage collection as
// it may significantly increase the final garbage collection pause.
compactor_.InitializeIfShouldCompact(marking_config.marking_type,
@@ -450,7 +458,7 @@ void CppHeap::TracePrologue(TraceFlags flags) {
marking_done_ = false;
}
-bool CppHeap::AdvanceTracing(double deadline_in_ms) {
+bool CppHeap::AdvanceTracing(double max_duration) {
is_in_v8_marking_step_ = true;
cppgc::internal::StatsCollector::EnabledScope stats_scope(
stats_collector(),
@@ -458,7 +466,7 @@ bool CppHeap::AdvanceTracing(double deadline_in_ms) {
: cppgc::internal::StatsCollector::kIncrementalMark);
const v8::base::TimeDelta deadline =
in_atomic_pause_ ? v8::base::TimeDelta::Max()
- : v8::base::TimeDelta::FromMillisecondsD(deadline_in_ms);
+ : v8::base::TimeDelta::FromMillisecondsD(max_duration);
const size_t marked_bytes_limit = in_atomic_pause_ ? SIZE_MAX : 0;
DCHECK_NOT_NULL(marker_);
// TODO(chromium:1056170): Replace when unified heap transitions to
@@ -472,20 +480,18 @@ bool CppHeap::AdvanceTracing(double deadline_in_ms) {
bool CppHeap::IsTracingDone() { return marking_done_; }
-void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
+void CppHeap::EnterFinalPause(cppgc::EmbedderStackState stack_state) {
CHECK(!in_disallow_gc_scope());
in_atomic_pause_ = true;
if (override_stack_state_) {
stack_state = *override_stack_state_;
}
marker_->EnterAtomicPause(stack_state);
- if (compactor_.CancelIfShouldNotCompact(cppgc::Heap::MarkingType::kAtomic,
- stack_state)) {
- marker_->NotifyCompactionCancelled();
- }
+ compactor_.CancelIfShouldNotCompact(cppgc::Heap::MarkingType::kAtomic,
+ stack_state);
}
-void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
+void CppHeap::TraceEpilogue() {
CHECK(in_atomic_pause_);
CHECK(marking_done_);
{
@@ -523,12 +529,12 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
compactable_space_handling = compactor_.CompactSpacesIfEnabled();
const cppgc::internal::Sweeper::SweepingConfig sweeping_config{
// In case the GC was forced, also finalize sweeping right away.
- current_flags_ & TraceFlags::kForced
+ IsForceGC(current_gc_flags_)
? cppgc::internal::Sweeper::SweepingConfig::SweepingType::kAtomic
: cppgc::internal::Sweeper::SweepingConfig::SweepingType::
kIncrementalAndConcurrent,
compactable_space_handling,
- ShouldReduceMemory(current_flags_)
+ ShouldReduceMemory(current_gc_flags_)
? cppgc::internal::Sweeper::SweepingConfig::FreeMemoryHandling::
kDiscardWherePossible
: cppgc::internal::Sweeper::SweepingConfig::FreeMemoryHandling::
@@ -539,9 +545,6 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
sweeping_config.sweeping_type);
sweeper().Start(sweeping_config);
}
- DCHECK_NOT_NULL(trace_summary);
- trace_summary->allocated_size = SIZE_MAX;
- trace_summary->time = 0;
in_atomic_pause_ = false;
sweeper().NotifyDoneIfNeeded();
}
@@ -561,7 +564,7 @@ void CppHeap::ReportBufferedAllocationSizeIfPossible() {
// finalizations where not allowed.
// - Recursive sweeping.
// - GC forbidden scope.
- if (sweeper().IsSweepingOnMutatorThread() || in_no_gc_scope()) {
+ if (sweeper().IsSweepingOnMutatorThread() || in_no_gc_scope() || !isolate_) {
return;
}
@@ -571,10 +574,12 @@ void CppHeap::ReportBufferedAllocationSizeIfPossible() {
const int64_t bytes_to_report = buffered_allocated_bytes_;
buffered_allocated_bytes_ = 0;
+ auto* const tracer = isolate_->heap()->local_embedder_heap_tracer();
+ DCHECK_NOT_NULL(tracer);
if (bytes_to_report < 0) {
- DecreaseAllocatedSize(static_cast<size_t>(-bytes_to_report));
+ tracer->DecreaseAllocatedSize(static_cast<size_t>(-bytes_to_report));
} else {
- IncreaseAllocatedSize(static_cast<size_t>(bytes_to_report));
+ tracer->IncreaseAllocatedSize(static_cast<size_t>(bytes_to_report));
}
}
@@ -588,17 +593,16 @@ void CppHeap::CollectGarbageForTesting(
SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
if (isolate_) {
- // Go through EmbedderHeapTracer API and perform a unified heap collection.
- GarbageCollectionForTesting(stack_state);
+ reinterpret_cast<v8::Isolate*>(isolate_)
+ ->RequestGarbageCollectionForTesting(
+ v8::Isolate::kFullGarbageCollection, stack_state);
} else {
// Perform an atomic GC, with starting incremental/concurrent marking and
// immediately finalizing the garbage collection.
- if (!IsMarking()) TracePrologue(TraceFlags::kForced);
+ if (!IsMarking()) TracePrologue(GarbageCollectionFlagValues::kForced);
EnterFinalPause(stack_state);
AdvanceTracing(std::numeric_limits<double>::infinity());
- TraceSummary trace_summary;
- TraceEpilogue(&trace_summary);
- DCHECK_EQ(SIZE_MAX, trace_summary.allocated_size);
+ TraceEpilogue();
}
}
@@ -616,12 +620,12 @@ void CppHeap::StartIncrementalGarbageCollectionForTesting() {
DCHECK_NULL(isolate_);
if (IsMarking()) return;
force_incremental_marking_for_testing_ = true;
- TracePrologue(TraceFlags::kForced);
+ TracePrologue(GarbageCollectionFlagValues::kForced);
force_incremental_marking_for_testing_ = false;
}
void CppHeap::FinalizeIncrementalGarbageCollectionForTesting(
- EmbedderStackState stack_state) {
+ cppgc::EmbedderStackState stack_state) {
DCHECK(!in_no_gc_scope());
DCHECK_NULL(isolate_);
DCHECK(IsMarking());
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index a2d11bcd39..3f9e8d9ec7 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -12,8 +12,8 @@ static_assert(
#include "include/v8-callbacks.h"
#include "include/v8-cppgc.h"
-#include "include/v8-embedder-heap.h"
#include "include/v8-metrics.h"
+#include "src/base/flags.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/stats-collector.h"
@@ -29,9 +29,16 @@ namespace internal {
class V8_EXPORT_PRIVATE CppHeap final
: public cppgc::internal::HeapBase,
public v8::CppHeap,
- public v8::EmbedderHeapTracer,
public cppgc::internal::StatsCollector::AllocationObserver {
public:
+ enum GarbageCollectionFlagValues : uint8_t {
+ kNoFlags = 0,
+ kReduceMemory = 1 << 1,
+ kForced = 1 << 2,
+ };
+
+ using GarbageCollectionFlags = base::Flags<GarbageCollectionFlagValues>;
+
class MetricRecorderAdapter final : public cppgc::internal::MetricRecorder {
public:
static constexpr int kMaxBatchedEvents = 16;
@@ -106,14 +113,13 @@ class V8_EXPORT_PRIVATE CppHeap final
void FinishSweepingIfRunning();
- // v8::EmbedderHeapTracer interface.
void RegisterV8References(
- const std::vector<std::pair<void*, void*> >& embedder_fields) final;
- void TracePrologue(TraceFlags flags) final;
- bool AdvanceTracing(double deadline_in_ms) final;
- bool IsTracingDone() final;
- void TraceEpilogue(TraceSummary* trace_summary) final;
- void EnterFinalPause(EmbedderStackState stack_state) final;
+ const std::vector<std::pair<void*, void*>>& embedder_fields);
+ void TracePrologue(GarbageCollectionFlags);
+ bool AdvanceTracing(double max_duration);
+ bool IsTracingDone();
+ void TraceEpilogue();
+ void EnterFinalPause(cppgc::EmbedderStackState stack_state);
// StatsCollector::AllocationObserver interface.
void AllocatedObjectSizeIncreased(size_t) final;
@@ -122,6 +128,12 @@ class V8_EXPORT_PRIVATE CppHeap final
MetricRecorderAdapter* GetMetricRecorder() const;
+ v8::WrapperDescriptor wrapper_descriptor() const {
+ return wrapper_descriptor_;
+ }
+
+ Isolate* isolate() const { return isolate_; }
+
private:
void FinalizeIncrementalGarbageCollectionIfNeeded(
cppgc::Heap::StackState) final {
@@ -132,11 +144,12 @@ class V8_EXPORT_PRIVATE CppHeap final
void ReportBufferedAllocationSizeIfPossible();
void StartIncrementalGarbageCollectionForTesting() final;
- void FinalizeIncrementalGarbageCollectionForTesting(EmbedderStackState) final;
+ void FinalizeIncrementalGarbageCollectionForTesting(
+ cppgc::EmbedderStackState) final;
Isolate* isolate_ = nullptr;
bool marking_done_ = false;
- TraceFlags current_flags_ = TraceFlags::kNoFlags;
+ GarbageCollectionFlags current_gc_flags_;
// Buffered allocated bytes. Reporting allocated bytes to V8 can trigger a GC
// atomic pause. Allocated bytes are buffer in case this is temporarily
@@ -153,6 +166,8 @@ class V8_EXPORT_PRIVATE CppHeap final
friend class MetricRecorderAdapter;
};
+DEFINE_OPERATORS_FOR_FLAGS(CppHeap::GarbageCollectionFlags)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
index 9b20b5c0a7..e1065376ea 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -463,7 +463,8 @@ class CppGraphBuilderImpl final {
void AddEdge(State& parent, const TracedReferenceBase& ref,
const std::string& edge_name) {
DCHECK(parent.IsVisibleNotDependent());
- v8::Local<v8::Value> v8_value = ref.Get(cpp_heap_.isolate());
+ v8::Local<v8::Value> v8_value =
+ ref.Get(reinterpret_cast<v8::Isolate*>(cpp_heap_.isolate()));
if (!v8_value.IsEmpty()) {
if (!parent.get_node()) {
parent.set_node(AddNode(*parent.header()));
@@ -836,7 +837,8 @@ void CppGraphBuilderImpl::VisitWeakContainerForVisibility(
void CppGraphBuilderImpl::VisitForVisibility(State& parent,
const TracedReferenceBase& ref) {
- v8::Local<v8::Value> v8_value = ref.Get(cpp_heap_.isolate());
+ v8::Local<v8::Value> v8_value =
+ ref.Get(reinterpret_cast<v8::Isolate*>(cpp_heap_.isolate()));
if (!v8_value.IsEmpty()) {
parent.MarkVisible();
}
diff --git a/deps/v8/src/heap/cppgc/caged-heap.h b/deps/v8/src/heap/cppgc/caged-heap.h
index 89b2f7f112..1d20bbcff3 100644
--- a/deps/v8/src/heap/cppgc/caged-heap.h
+++ b/deps/v8/src/heap/cppgc/caged-heap.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_CPPGC_CAGED_HEAP_H_
#define V8_HEAP_CPPGC_CAGED_HEAP_H_
+#include <limits>
#include <memory>
#include "include/cppgc/platform.h"
@@ -22,7 +23,11 @@ class CagedHeap final {
public:
using AllocatorType = v8::base::BoundedPageAllocator;
- static uintptr_t OffsetFromAddress(const void* address) {
+ template <typename RetType = uintptr_t>
+ static RetType OffsetFromAddress(const void* address) {
+ static_assert(
+ std::numeric_limits<RetType>::max() >= (kCagedHeapReservationSize - 1),
+ "The return type should be large enough");
return reinterpret_cast<uintptr_t>(address) &
(kCagedHeapReservationAlignment - 1);
}
@@ -52,6 +57,8 @@ class CagedHeap final {
reserved_area_.address();
}
+ void* base() const { return reserved_area_.address(); }
+
private:
const VirtualMemory reserved_area_;
std::unique_ptr<AllocatorType> bounded_allocator_;
diff --git a/deps/v8/src/heap/cppgc/compactor.cc b/deps/v8/src/heap/cppgc/compactor.cc
index 91f30445a3..c300793515 100644
--- a/deps/v8/src/heap/cppgc/compactor.cc
+++ b/deps/v8/src/heap/cppgc/compactor.cc
@@ -474,6 +474,7 @@ void Compactor::InitializeIfShouldCompact(
compaction_worklists_ = std::make_unique<CompactionWorklists>();
is_enabled_ = true;
+ is_cancelled_ = false;
}
bool Compactor::CancelIfShouldNotCompact(
@@ -481,15 +482,16 @@ bool Compactor::CancelIfShouldNotCompact(
GarbageCollector::Config::StackState stack_state) {
if (!is_enabled_ || ShouldCompact(marking_type, stack_state)) return false;
- DCHECK_NOT_NULL(compaction_worklists_);
- compaction_worklists_->movable_slots_worklist()->Clear();
- compaction_worklists_.reset();
-
+ is_cancelled_ = true;
is_enabled_ = false;
return true;
}
Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
+ if (is_cancelled_ && compaction_worklists_) {
+ compaction_worklists_->movable_slots_worklist()->Clear();
+ compaction_worklists_.reset();
+ }
if (!is_enabled_) return CompactableSpaceHandling::kSweep;
StatsCollector::EnabledScope stats_scope(heap_.heap()->stats_collector(),
diff --git a/deps/v8/src/heap/cppgc/compactor.h b/deps/v8/src/heap/cppgc/compactor.h
index 46a8e1ef53..82ce5291bb 100644
--- a/deps/v8/src/heap/cppgc/compactor.h
+++ b/deps/v8/src/heap/cppgc/compactor.h
@@ -48,6 +48,7 @@ class V8_EXPORT_PRIVATE Compactor final {
std::unique_ptr<CompactionWorklists> compaction_worklists_;
bool is_enabled_ = false;
+ bool is_cancelled_ = false;
bool enable_for_next_gc_for_testing_ = false;
};
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index d2657ca417..a5c89b6218 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -36,7 +36,7 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
private:
static size_t ObjectSize(const HeapObjectHeader& header) {
- return ObjectView(header).Size();
+ return ObjectView<>(header).Size();
}
bool VisitHeapObjectHeader(HeapObjectHeader& header) {
@@ -53,7 +53,8 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
HeapBase::HeapBase(
std::shared_ptr<cppgc::Platform> platform,
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
- StackSupport stack_support)
+ StackSupport stack_support, MarkingType marking_support,
+ SweepingType sweeping_support)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
oom_handler_(std::make_unique<FatalOutOfMemoryHandler>(this)),
@@ -81,7 +82,9 @@ HeapBase::HeapBase(
weak_persistent_region_(*oom_handler_.get()),
strong_cross_thread_persistent_region_(*oom_handler_.get()),
weak_cross_thread_persistent_region_(*oom_handler_.get()),
- stack_support_(stack_support) {
+ stack_support_(stack_support),
+ marking_support_(marking_support),
+ sweeping_support_(sweeping_support) {
stats_collector_->RegisterObserver(
&allocation_observer_for_PROCESS_HEAP_STATISTICS_);
}
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index b0848dc7b7..041f4cf3bd 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -62,10 +62,6 @@ class V8_EXPORT HeapHandle {
namespace internal {
-namespace testing {
-class TestWithHeap;
-} // namespace testing
-
class FatalOutOfMemoryHandler;
class PageBackend;
class PreFinalizerHandler;
@@ -75,6 +71,8 @@ class StatsCollector;
class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
public:
using StackSupport = cppgc::Heap::StackSupport;
+ using MarkingType = cppgc::Heap::MarkingType;
+ using SweepingType = cppgc::Heap::SweepingType;
static HeapBase& From(cppgc::HeapHandle& heap_handle) {
return static_cast<HeapBase&>(heap_handle);
@@ -85,7 +83,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
HeapBase(std::shared_ptr<cppgc::Platform> platform,
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
- StackSupport stack_support);
+ StackSupport stack_support, MarkingType marking_support,
+ SweepingType sweeping_support);
virtual ~HeapBase();
HeapBase(const HeapBase&) = delete;
@@ -125,6 +124,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
}
MarkerBase* marker() const { return marker_.get(); }
+ std::unique_ptr<MarkerBase>& GetMarkerRefForTesting() { return marker_; }
Compactor& compactor() { return compactor_; }
@@ -206,6 +206,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
int GetCreationThreadId() const { return creation_thread_id_; }
+ MarkingType marking_support() const { return marking_support_; }
+
protected:
// Used by the incremental scheduler to finalize a GC if supported.
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
@@ -276,8 +278,10 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
int creation_thread_id_ = v8::base::OS::GetCurrentThreadId();
+ const MarkingType marking_support_;
+ const SweepingType sweeping_support_;
+
friend class MarkerBase::IncrementalMarkingTask;
- friend class testing::TestWithHeap;
friend class cppgc::subtle::DisallowGarbageCollectionScope;
friend class cppgc::subtle::NoGarbageCollectionScope;
friend class cppgc::testing::Heap;
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
index f1d67df8b5..e5a428a5a9 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -19,6 +19,10 @@
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/globals.h"
+#if defined(CPPGC_CAGED_HEAP)
+#include "src/heap/cppgc/caged-heap.h"
+#endif // defined(CPPGC_CAGED_HEAP)
+
namespace cppgc {
class Visitor;
@@ -102,6 +106,11 @@ class HeapObjectHeader {
inline bool IsFinalizable() const;
void Finalize();
+#if defined(CPPGC_CAGED_HEAP)
+ inline void SetNextUnfinalized(HeapObjectHeader* next);
+ inline HeapObjectHeader* GetNextUnfinalized(uintptr_t cage_base) const;
+#endif // defined(CPPGC_CAGED_HEAP)
+
V8_EXPORT_PRIVATE HeapObjectName GetName() const;
template <AccessMode = AccessMode::kNonAtomic>
@@ -140,7 +149,13 @@ class HeapObjectHeader {
inline void StoreEncoded(uint16_t bits, uint16_t mask);
#if defined(V8_TARGET_ARCH_64_BIT)
+ // If cage is enabled, to save on space required by sweeper metadata, we store
+ // the list of to-be-finalized objects inlined in HeapObjectHeader.
+#if defined(CPPGC_CAGED_HEAP)
+ uint32_t next_unfinalized_ = 0;
+#else // !defined(CPPGC_CAGED_HEAP)
uint32_t padding_ = 0;
+#endif // !defined(CPPGC_CAGED_HEAP)
#endif // defined(V8_TARGET_ARCH_64_BIT)
uint16_t encoded_high_;
uint16_t encoded_low_;
@@ -163,9 +178,9 @@ const HeapObjectHeader& HeapObjectHeader::FromObject(const void* object) {
}
HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
-#if defined(V8_TARGET_ARCH_64_BIT)
+#if defined(V8_TARGET_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP)
USE(padding_);
-#endif // defined(V8_TARGET_ARCH_64_BIT)
+#endif // defined(V8_TARGET_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP)
DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1));
DCHECK_GE(kMaxSize, size);
@@ -288,6 +303,22 @@ bool HeapObjectHeader::IsFinalizable() const {
return gc_info.finalize;
}
+#if defined(CPPGC_CAGED_HEAP)
+void HeapObjectHeader::SetNextUnfinalized(HeapObjectHeader* next) {
+ next_unfinalized_ = CagedHeap::OffsetFromAddress<uint32_t>(next);
+}
+
+HeapObjectHeader* HeapObjectHeader::GetNextUnfinalized(
+ uintptr_t cage_base) const {
+ DCHECK(cage_base);
+ DCHECK_EQ(0u,
+ CagedHeap::OffsetFromAddress(reinterpret_cast<void*>(cage_base)));
+ return next_unfinalized_ ? reinterpret_cast<HeapObjectHeader*>(
+ cage_base + next_unfinalized_)
+ : nullptr;
+}
+#endif // defined(CPPGC_CAGED_HEAP)
+
template <AccessMode mode>
void HeapObjectHeader::Trace(Visitor* visitor) const {
const GCInfo& gc_info =
diff --git a/deps/v8/src/heap/cppgc/heap-page.cc b/deps/v8/src/heap/cppgc/heap-page.cc
index 567f152f94..c7af4e971e 100644
--- a/deps/v8/src/heap/cppgc/heap-page.cc
+++ b/deps/v8/src/heap/cppgc/heap-page.cc
@@ -8,6 +8,7 @@
#include "include/cppgc/internal/api-constants.h"
#include "src/base/logging.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-space.h"
@@ -239,8 +240,14 @@ void LargePage::Destroy(LargePage* page) {
DCHECK(page);
#if DEBUG
const BaseSpace& space = page->space();
- DCHECK_EQ(space.end(), std::find(space.begin(), space.end(), page));
-#endif
+ {
+ // Destroy() happens on the mutator but another concurrent sweeper task may
+ // add add a live object using `BaseSpace::AddPage()` while iterating the
+ // pages.
+ v8::base::LockGuard<v8::base::Mutex> guard(&space.pages_mutex());
+ DCHECK_EQ(space.end(), std::find(space.begin(), space.end(), page));
+ }
+#endif // DEBUG
page->~LargePage();
PageBackend* backend = page->heap().page_backend();
page->heap().stats_collector()->NotifyFreedMemory(
diff --git a/deps/v8/src/heap/cppgc/heap-space.h b/deps/v8/src/heap/cppgc/heap-space.h
index 0c640e653f..18fe7ba225 100644
--- a/deps/v8/src/heap/cppgc/heap-space.h
+++ b/deps/v8/src/heap/cppgc/heap-space.h
@@ -46,6 +46,7 @@ class V8_EXPORT_PRIVATE BaseSpace {
void AddPage(BasePage*);
void RemovePage(BasePage*);
Pages RemoveAllPages();
+ v8::base::Mutex& pages_mutex() const { return pages_mutex_; }
bool is_compactable() const { return is_compactable_; }
@@ -57,7 +58,7 @@ class V8_EXPORT_PRIVATE BaseSpace {
private:
RawHeap* heap_;
Pages pages_;
- v8::base::Mutex pages_mutex_;
+ mutable v8::base::Mutex pages_mutex_;
const size_t index_;
const PageType type_;
const bool is_compactable_;
diff --git a/deps/v8/src/heap/cppgc/heap-state.cc b/deps/v8/src/heap/cppgc/heap-state.cc
index 364f03c643..756c19aa8f 100644
--- a/deps/v8/src/heap/cppgc/heap-state.cc
+++ b/deps/v8/src/heap/cppgc/heap-state.cc
@@ -22,6 +22,13 @@ bool HeapState::IsSweeping(const HeapHandle& heap_handle) {
}
// static
+bool HeapState::IsSweepingOnOwningThread(const HeapHandle& heap_handle) {
+ return internal::HeapBase::From(heap_handle)
+ .sweeper()
+ .IsSweepingOnMutatorThread();
+}
+
+// static
bool HeapState::IsInAtomicPause(const HeapHandle& heap_handle) {
return internal::HeapBase::From(heap_handle).in_atomic_pause();
}
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index a453e847e3..26500a9ca8 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -62,8 +62,8 @@ namespace internal {
namespace {
-void CheckConfig(Heap::Config config, Heap::MarkingType marking_support,
- Heap::SweepingType sweeping_support) {
+void CheckConfig(Heap::Config config, HeapBase::MarkingType marking_support,
+ HeapBase::SweepingType sweeping_support) {
CHECK_WITH_MSG(
(config.collection_type != Heap::Config::CollectionType::kMinor) ||
(config.stack_state == Heap::Config::StackState::kNoHeapPointers),
@@ -78,23 +78,29 @@ void CheckConfig(Heap::Config config, Heap::MarkingType marking_support,
Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
cppgc::Heap::HeapOptions options)
- : HeapBase(platform, options.custom_spaces, options.stack_support),
+ : HeapBase(platform, options.custom_spaces, options.stack_support,
+ options.marking_support, options.sweeping_support),
gc_invoker_(this, platform_.get(), options.stack_support),
growing_(&gc_invoker_, stats_collector_.get(),
options.resource_constraints, options.marking_support,
- options.sweeping_support),
- marking_support_(options.marking_support),
- sweeping_support_(options.sweeping_support) {
- CHECK_IMPLIES(options.marking_support != MarkingType::kAtomic,
+ options.sweeping_support) {
+ CHECK_IMPLIES(options.marking_support != HeapBase::MarkingType::kAtomic,
platform_->GetForegroundTaskRunner());
- CHECK_IMPLIES(options.sweeping_support != SweepingType::kAtomic,
+ CHECK_IMPLIES(options.sweeping_support != HeapBase::SweepingType::kAtomic,
platform_->GetForegroundTaskRunner());
}
Heap::~Heap() {
- subtle::NoGarbageCollectionScope no_gc(*this);
- // Finish already running GC if any, but don't finalize live objects.
- sweeper_.FinishIfRunning();
+ // Gracefully finish already running GC if any, but don't finalize live
+ // objects.
+ FinalizeIncrementalGarbageCollectionIfRunning(
+ {Config::CollectionType::kMajor,
+ Config::StackState::kMayContainHeapPointers,
+ Config::MarkingType::kAtomic, Config::SweepingType::kAtomic});
+ {
+ subtle::NoGarbageCollectionScope no_gc(*this);
+ sweeper_.FinishIfRunning();
+ }
}
void Heap::CollectGarbage(Config config) {
@@ -114,7 +120,7 @@ void Heap::CollectGarbage(Config config) {
void Heap::StartIncrementalGarbageCollection(Config config) {
DCHECK_NE(Config::MarkingType::kAtomic, config.marking_type);
- DCHECK_NE(marking_support_, MarkingType::kAtomic);
+ DCHECK_NE(marking_support_, Config::MarkingType::kAtomic);
CheckConfig(config, marking_support_, sweeping_support_);
if (IsMarking() || in_no_gc_scope()) return;
@@ -125,7 +131,6 @@ void Heap::StartIncrementalGarbageCollection(Config config) {
}
void Heap::FinalizeIncrementalGarbageCollectionIfRunning(Config config) {
- DCHECK_NE(marking_support_, MarkingType::kAtomic);
CheckConfig(config, marking_support_, sweeping_support_);
if (!IsMarking()) return;
diff --git a/deps/v8/src/heap/cppgc/heap.h b/deps/v8/src/heap/cppgc/heap.h
index b57e40b13b..c3504073bc 100644
--- a/deps/v8/src/heap/cppgc/heap.h
+++ b/deps/v8/src/heap/cppgc/heap.h
@@ -53,9 +53,6 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
GCInvoker gc_invoker_;
HeapGrowing growing_;
- const MarkingType marking_support_;
- const SweepingType sweeping_support_;
-
size_t epoch_ = 0;
};
diff --git a/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc b/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
index 0eae47e59d..ce7d1aadc3 100644
--- a/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
+++ b/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
@@ -30,7 +30,6 @@ void IncrementalMarkingSchedule::UpdateMutatorThreadMarkedBytes(
void IncrementalMarkingSchedule::AddConcurrentlyMarkedBytes(
size_t marked_bytes) {
- DCHECK(!incremental_marking_start_time_.IsNull());
concurrently_marked_bytes_.fetch_add(marked_bytes, std::memory_order_relaxed);
}
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index 0410a4eaea..e792c4c844 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -240,6 +240,7 @@ void MarkerBase::StartMarking() {
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
mutator_marking_state_.Publish();
concurrent_marker_->Start();
+ concurrent_marking_active_ = true;
}
incremental_marking_allocation_observer_ =
std::make_unique<IncrementalMarkingAllocationObserver>(*this);
@@ -255,8 +256,9 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
StatsCollector::kMarkAtomicPrologue);
if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
- // Cancel remaining concurrent/incremental tasks.
- concurrent_marker_->Cancel();
+ // Cancel remaining incremental tasks. Concurrent marking jobs are left to
+ // run in parallel with the atomic pause until the mutator thread runs out
+ // of work.
incremental_marking_handle_.Cancel();
heap().stats_collector()->UnregisterObserver(
incremental_marking_allocation_observer_.get());
@@ -276,6 +278,17 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
MarkNotFullyConstructedObjects();
}
}
+ if (heap().marking_support() ==
+ MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ // Start parallel marking.
+ mutator_marking_state_.Publish();
+ if (concurrent_marking_active_) {
+ concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
+ } else {
+ concurrent_marker_->Start();
+ concurrent_marking_active_ = true;
+ }
+ }
}
void MarkerBase::LeaveAtomicPause() {
@@ -414,6 +427,16 @@ void MarkerBase::AdvanceMarkingOnAllocation() {
}
}
+bool MarkerBase::CancelConcurrentMarkingIfNeeded() {
+ if (config_.marking_type != MarkingConfig::MarkingType::kAtomic ||
+ !concurrent_marking_active_)
+ return false;
+
+ concurrent_marker_->Cancel();
+ concurrent_marking_active_ = false;
+ return true;
+}
+
bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
size_t marked_bytes_limit) {
bool is_done = false;
@@ -433,6 +456,9 @@ bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
// adjustment.
is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
}
+ if (is_done && CancelConcurrentMarkingIfNeeded()) {
+ is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
+ }
schedule_.UpdateMutatorThreadMarkedBytes(
mutator_marking_state_.marked_bytes());
}
@@ -592,13 +618,6 @@ void MarkerBase::WaitForConcurrentMarkingForTesting() {
concurrent_marker_->JoinForTesting();
}
-void MarkerBase::NotifyCompactionCancelled() {
- // Compaction cannot be cancelled while concurrent marking is active.
- DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
- DCHECK_IMPLIES(concurrent_marker_, !concurrent_marker_->IsActive());
- mutator_marking_state_.NotifyCompactionCancelled();
-}
-
Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config)
: MarkerBase(key, heap, platform, config),
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
index f1aa37965a..1f76583177 100644
--- a/deps/v8/src/heap/cppgc/marker.h
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -133,8 +133,6 @@ class V8_EXPORT_PRIVATE MarkerBase {
void WaitForConcurrentMarkingForTesting();
- void NotifyCompactionCancelled();
-
bool IsMarking() const { return is_marking_; }
protected:
@@ -173,6 +171,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
void AdvanceMarkingOnAllocation();
+ bool CancelConcurrentMarkingIfNeeded();
+
HeapBase& heap_;
MarkingConfig config_ = MarkingConfig::Default();
@@ -189,6 +189,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
IncrementalMarkingSchedule schedule_;
std::unique_ptr<ConcurrentMarkerBase> concurrent_marker_{nullptr};
+ bool concurrent_marking_active_ = false;
bool main_marking_disabled_for_testing_{false};
bool visited_cross_thread_persistents_in_atomic_pause_{false};
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index 5f6f0aba37..513c781b96 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -110,12 +110,6 @@ class MarkingStateBase {
return movable_slots_worklist_.get();
}
- void NotifyCompactionCancelled() {
- DCHECK(IsCompactionEnabled());
- movable_slots_worklist_->Clear();
- movable_slots_worklist_.reset();
- }
-
bool DidDiscoverNewEphemeronPairs() const {
return discovered_new_ephemeron_pairs_;
}
@@ -415,15 +409,17 @@ void MutatorMarkingState::InvokeWeakRootsCallbackIfNeeded(
#if DEBUG
const HeapObjectHeader& header =
HeapObjectHeader::FromObject(desc.base_object_payload);
- DCHECK_IMPLIES(header.IsInConstruction(), header.IsMarked());
+ DCHECK_IMPLIES(header.IsInConstruction(),
+ header.IsMarked<AccessMode::kAtomic>());
#endif // DEBUG
weak_callback(LivenessBrokerFactory::Create(), parameter);
}
bool MutatorMarkingState::IsMarkedWeakContainer(HeapObjectHeader& header) {
- const bool result = weak_containers_worklist_.Contains(&header) &&
- !recently_retraced_weak_containers_.Contains(&header);
- DCHECK_IMPLIES(result, header.IsMarked());
+ const bool result =
+ weak_containers_worklist_.Contains<AccessMode::kAtomic>(&header) &&
+ !recently_retraced_weak_containers_.Contains(&header);
+ DCHECK_IMPLIES(result, header.IsMarked<AccessMode::kAtomic>());
DCHECK_IMPLIES(result, !header.IsInConstruction());
return result;
}
@@ -493,7 +489,7 @@ template <AccessMode mode>
void DynamicallyTraceMarkedObject(Visitor& visitor,
const HeapObjectHeader& header) {
DCHECK(!header.IsInConstruction<mode>());
- DCHECK(header.IsMarked<mode>());
+ DCHECK(header.IsMarked<AccessMode::kAtomic>());
header.Trace<mode>(&visitor);
}
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index 05a6e23df8..feb009708d 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -45,6 +45,13 @@ void MarkingVerifierBase::Run(
Heap::Config::StackState stack_state, uintptr_t stack_end,
v8::base::Optional<size_t> expected_marked_bytes) {
Traverse(heap_.raw_heap());
+// Avoid verifying the stack when running with TSAN as the TSAN runtime changes
+// stack contents when e.g. working with locks. Specifically, the marker uses
+// locks in slow path operations which results in stack changes throughout
+// marking. This means that the conservative iteration below may find more
+// objects then the regular marker. The difference is benign as the delta of
+// objects is not reachable from user code but it prevents verification.
+#if !defined(THREAD_SANITIZER)
if (stack_state == Heap::Config::StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
heap_.stack()->IteratePointersUnsafe(this, stack_end);
@@ -58,6 +65,7 @@ void MarkingVerifierBase::Run(
in_construction_objects_heap_.find(header));
}
}
+#endif // !defined(THREAD_SANITIZER)
if (expected_marked_bytes && verifier_found_marked_bytes_are_exact_) {
CHECK_EQ(expected_marked_bytes.value(), verifier_found_marked_bytes_);
}
@@ -124,7 +132,7 @@ bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader& header) {
}
verifier_found_marked_bytes_ +=
- ObjectView(header).Size() + sizeof(HeapObjectHeader);
+ ObjectView<>(header).Size() + sizeof(HeapObjectHeader);
verification_state_.SetCurrentParent(nullptr);
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.cc b/deps/v8/src/heap/cppgc/marking-visitor.cc
index a740d33a84..f2dff286cd 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc/marking-visitor.cc
@@ -4,6 +4,7 @@
#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-state.h"
@@ -54,7 +55,7 @@ ConservativeMarkingVisitor::ConservativeMarkingVisitor(
void ConservativeMarkingVisitor::VisitFullyConstructedConservatively(
HeapObjectHeader& header) {
- if (header.IsMarked()) {
+ if (header.IsMarked<AccessMode::kAtomic>()) {
if (marking_state_.IsMarkedWeakContainer(header))
marking_state_.ReTraceMarkedWeakContainer(visitor_, header);
return;
diff --git a/deps/v8/src/heap/cppgc/object-poisoner.h b/deps/v8/src/heap/cppgc/object-poisoner.h
index 3b738eaeb6..2bcb3caf94 100644
--- a/deps/v8/src/heap/cppgc/object-poisoner.h
+++ b/deps/v8/src/heap/cppgc/object-poisoner.h
@@ -24,7 +24,8 @@ class UnmarkedObjectsPoisoner : public HeapVisitor<UnmarkedObjectsPoisoner> {
bool VisitHeapObjectHeader(HeapObjectHeader& header) {
if (header.IsFree() || header.IsMarked()) return true;
- ASAN_POISON_MEMORY_REGION(header.ObjectStart(), ObjectView(header).Size());
+ ASAN_POISON_MEMORY_REGION(header.ObjectStart(),
+ ObjectView<>(header).Size());
return true;
}
};
diff --git a/deps/v8/src/heap/cppgc/object-size-trait.cc b/deps/v8/src/heap/cppgc/object-size-trait.cc
index 000b8eef9d..c1713f2402 100644
--- a/deps/v8/src/heap/cppgc/object-size-trait.cc
+++ b/deps/v8/src/heap/cppgc/object-size-trait.cc
@@ -14,8 +14,8 @@ namespace internal {
// static
size_t BaseObjectSizeTrait::GetObjectSizeForGarbageCollected(
const void* object) {
- return ObjectView(HeapObjectHeader::FromObject(object))
- .Size<AccessMode::kAtomic>();
+ return ObjectView<AccessMode::kAtomic>(HeapObjectHeader::FromObject(object))
+ .Size();
}
// static
@@ -26,7 +26,7 @@ size_t BaseObjectSizeTrait::GetObjectSizeForGarbageCollectedMixin(
const auto& header =
BasePage::FromPayload(address)
->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(address);
- DCHECK(!header.IsLargeObject());
+ DCHECK(!header.IsLargeObject<AccessMode::kAtomic>());
return header.ObjectSize<AccessMode::kAtomic>();
}
diff --git a/deps/v8/src/heap/cppgc/object-view.h b/deps/v8/src/heap/cppgc/object-view.h
index 98b378c5ac..159ee901d1 100644
--- a/deps/v8/src/heap/cppgc/object-view.h
+++ b/deps/v8/src/heap/cppgc/object-view.h
@@ -15,13 +15,13 @@ namespace internal {
// ObjectView allows accessing a header within the bounds of the actual object.
// It is not exposed externally and does not keep the underlying object alive.
+template <AccessMode = AccessMode::kNonAtomic>
class ObjectView final {
public:
V8_INLINE explicit ObjectView(const HeapObjectHeader& header);
V8_INLINE Address Start() const;
V8_INLINE ConstAddress End() const;
- template <AccessMode = AccessMode::kNonAtomic>
V8_INLINE size_t Size() const;
private:
@@ -30,25 +30,30 @@ class ObjectView final {
const bool is_large_object_;
};
-ObjectView::ObjectView(const HeapObjectHeader& header)
+template <AccessMode access_mode>
+ObjectView<access_mode>::ObjectView(const HeapObjectHeader& header)
: header_(header),
base_page_(
BasePage::FromPayload(const_cast<HeapObjectHeader*>(&header_))),
- is_large_object_(header_.IsLargeObject()) {
+ is_large_object_(header_.IsLargeObject<access_mode>()) {
DCHECK_EQ(Start() + Size(), End());
}
-Address ObjectView::Start() const { return header_.ObjectStart(); }
+template <AccessMode access_mode>
+Address ObjectView<access_mode>::Start() const {
+ return header_.ObjectStart();
+}
-ConstAddress ObjectView::End() const {
+template <AccessMode access_mode>
+ConstAddress ObjectView<access_mode>::End() const {
return is_large_object_ ? LargePage::From(base_page_)->PayloadEnd()
: header_.ObjectEnd();
}
-template <AccessMode mode>
-size_t ObjectView::Size() const {
+template <AccessMode access_mode>
+size_t ObjectView<access_mode>::Size() const {
return is_large_object_ ? LargePage::From(base_page_)->ObjectSize()
- : header_.ObjectSize<mode>();
+ : header_.ObjectSize<access_mode>();
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/persistent-node.cc b/deps/v8/src/heap/cppgc/persistent-node.cc
index ac6ffb624a..4ff93958d7 100644
--- a/deps/v8/src/heap/cppgc/persistent-node.cc
+++ b/deps/v8/src/heap/cppgc/persistent-node.cc
@@ -63,10 +63,10 @@ size_t PersistentRegionBase::NodesInUse() const {
return nodes_in_use_;
}
-void PersistentRegionBase::EnsureNodeSlots() {
+void PersistentRegionBase::RefillFreeList() {
auto node_slots = std::make_unique<PersistentNodeSlots>();
if (!node_slots.get()) {
- oom_handler_("Oilpan: PersistentRegionBase::EnsureNodeSlots()");
+ oom_handler_("Oilpan: PersistentRegionBase::RefillFreeList()");
}
nodes_.push_back(std::move(node_slots));
for (auto& node : *nodes_.back()) {
@@ -75,6 +75,14 @@ void PersistentRegionBase::EnsureNodeSlots() {
}
}
+PersistentNode* PersistentRegionBase::RefillFreeListAndAllocateNode(
+ void* owner, TraceCallback trace) {
+ RefillFreeList();
+ auto* node = TryAllocateNodeFromFreeList(owner, trace);
+ CPPGC_DCHECK(node);
+ return node;
+}
+
void PersistentRegionBase::Trace(Visitor* visitor) {
free_list_head_ = nullptr;
for (auto& slots : nodes_) {
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
index 9f641d6f4b..1a4c60e3a2 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -52,7 +52,9 @@ void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer pre_finalizer) {
}
void PreFinalizerHandler::InvokePreFinalizers() {
- StatsCollector::DisabledScope stats_scope(
+ StatsCollector::EnabledScope stats_scope(heap_.stats_collector(),
+ StatsCollector::kAtomicSweep);
+ StatsCollector::EnabledScope nested_stats_scope(
heap_.stats_collector(), StatsCollector::kSweepInvokePreFinalizers);
DCHECK(CurrentThreadIsCreationThread());
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 06f1ffcad0..b063b26f04 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -136,11 +136,15 @@ class ThreadSafeStack {
void Push(T t) {
v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
vector_.push_back(std::move(t));
+ is_empty_.store(false, std::memory_order_relaxed);
}
Optional<T> Pop() {
v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
- if (vector_.empty()) return v8::base::nullopt;
+ if (vector_.empty()) {
+ is_empty_.store(true, std::memory_order_relaxed);
+ return v8::base::nullopt;
+ }
T top = std::move(vector_.back());
vector_.pop_back();
// std::move is redundant but is needed to avoid the bug in gcc-7.
@@ -151,22 +155,28 @@ class ThreadSafeStack {
void Insert(It begin, It end) {
v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
vector_.insert(vector_.end(), begin, end);
+ is_empty_.store(false, std::memory_order_relaxed);
}
- bool IsEmpty() const {
- v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
- return vector_.empty();
- }
+ bool IsEmpty() const { return is_empty_.load(std::memory_order_relaxed); }
private:
std::vector<T> vector_;
mutable v8::base::Mutex mutex_;
+ std::atomic<bool> is_empty_{true};
};
struct SpaceState {
struct SweptPageState {
BasePage* page = nullptr;
+#if defined(CPPGC_CAGED_HEAP)
+ // The list of unfinalized objects may be extremely big. To save on space,
+ // if cage is enabled, the list of unfinalized objects is stored inlined in
+ // HeapObjectHeader.
+ HeapObjectHeader* unfinalized_objects_head = nullptr;
+#else // !defined(CPPGC_CAGED_HEAP)
std::vector<HeapObjectHeader*> unfinalized_objects;
+#endif // !defined(CPPGC_CAGED_HEAP)
FreeList cached_free_list;
std::vector<FreeList::Block> unfinalized_free_list;
bool is_empty = false;
@@ -230,7 +240,18 @@ class DeferredFinalizationBuilder final : public FreeHandler {
void AddFinalizer(HeapObjectHeader* header, size_t size) {
if (header->IsFinalizable()) {
+#if defined(CPPGC_CAGED_HEAP)
+ if (!current_unfinalized_) {
+ DCHECK_NULL(result_.unfinalized_objects_head);
+ current_unfinalized_ = header;
+ result_.unfinalized_objects_head = header;
+ } else {
+ current_unfinalized_->SetNextUnfinalized(header);
+ current_unfinalized_ = header;
+ }
+#else // !defined(CPPGC_CAGED_HEAP)
result_.unfinalized_objects.push_back({header});
+#endif // !defined(CPPGC_CAGED_HEAP)
found_finalizer_ = true;
} else {
SetMemoryInaccessible(header, size);
@@ -254,6 +275,7 @@ class DeferredFinalizationBuilder final : public FreeHandler {
private:
ResultType result_;
+ HeapObjectHeader* current_unfinalized_ = 0;
bool found_finalizer_ = false;
};
@@ -369,11 +391,27 @@ class SweepFinalizer final {
BasePage* page = page_state->page;
// Call finalizers.
- for (HeapObjectHeader* object : page_state->unfinalized_objects) {
- const size_t size = object->AllocatedSize();
- object->Finalize();
- SetMemoryInaccessible(object, size);
+ const auto finalize_header = [](HeapObjectHeader* header) {
+ const size_t size = header->AllocatedSize();
+ header->Finalize();
+ SetMemoryInaccessible(header, size);
+ };
+#if defined(CPPGC_CAGED_HEAP)
+ const uint64_t cage_base =
+ reinterpret_cast<uint64_t>(page->heap().caged_heap().base());
+ HeapObjectHeader* next_unfinalized = 0;
+
+ for (auto* unfinalized_header = page_state->unfinalized_objects_head;
+ unfinalized_header; unfinalized_header = next_unfinalized) {
+ next_unfinalized = unfinalized_header->GetNextUnfinalized(cage_base);
+ finalize_header(unfinalized_header);
+ }
+#else // !defined(CPPGC_CAGED_HEAP)
+ for (HeapObjectHeader* unfinalized_header :
+ page_state->unfinalized_objects) {
+ finalize_header(unfinalized_header);
}
+#endif // !defined(CPPGC_CAGED_HEAP)
// Unmap page if empty.
if (page_state->is_empty) {
@@ -576,10 +614,15 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
page.space().AddPage(&page);
return true;
}
+#if defined(CPPGC_CAGED_HEAP)
+ HeapObjectHeader* const unfinalized_objects =
+ header->IsFinalizable() ? page.ObjectHeader() : nullptr;
+#else // !defined(CPPGC_CAGED_HEAP)
std::vector<HeapObjectHeader*> unfinalized_objects;
if (header->IsFinalizable()) {
unfinalized_objects.push_back(page.ObjectHeader());
}
+#endif // !defined(CPPGC_CAGED_HEAP)
const size_t space_index = page.space().index();
DCHECK_GT(states_->size(), space_index);
SpaceState& state = (*states_)[space_index];
@@ -611,9 +654,15 @@ class PrepareForSweepVisitor final
PrepareForSweepVisitor(SpaceStates* states,
CompactableSpaceHandling compactable_space_handling)
: states_(states),
- compactable_space_handling_(compactable_space_handling) {}
+ compactable_space_handling_(compactable_space_handling) {
+ DCHECK_NOT_NULL(states);
+ }
- void Run(RawHeap& raw_heap) { Traverse(raw_heap); }
+ void Run(RawHeap& raw_heap) {
+ DCHECK(states_->empty());
+ *states_ = SpaceStates(raw_heap.size());
+ Traverse(raw_heap);
+ }
protected:
bool VisitNormalPageSpace(NormalPageSpace& space) {
@@ -655,9 +704,7 @@ class Sweeper::SweeperImpl final {
public:
SweeperImpl(RawHeap& heap, StatsCollector* stats_collector)
- : heap_(heap),
- stats_collector_(stats_collector),
- space_states_(heap.size()) {}
+ : heap_(heap), stats_collector_(stats_collector) {}
~SweeperImpl() { CancelSweepers(); }
@@ -704,14 +751,21 @@ class Sweeper::SweeperImpl final {
// allocate new memory.
if (is_sweeping_on_mutator_thread_) return false;
+ SpaceState& space_state = space_states_[space->index()];
+
+ // Bail out if there's no pages to be processed for the space at this
+ // moment.
+ if (space_state.swept_unfinalized_pages.IsEmpty() &&
+ space_state.unswept_pages.IsEmpty()) {
+ return false;
+ }
+
StatsCollector::EnabledScope stats_scope(stats_collector_,
StatsCollector::kIncrementalSweep);
StatsCollector::EnabledScope inner_scope(
stats_collector_, StatsCollector::kSweepOnAllocation);
MutatorThreadSweepingScope sweeping_in_progresss(*this);
- SpaceState& space_state = space_states_[space->index()];
-
{
// First, process unfinalized pages as finalizing a page is faster than
// sweeping.
@@ -777,6 +831,10 @@ class Sweeper::SweeperImpl final {
void FinalizeSweep() {
// Synchronize with the concurrent sweeper and call remaining finalizers.
SynchronizeAndFinalizeConcurrentSweeping();
+
+ // Clear space taken up by sweeper metadata.
+ space_states_.clear();
+
platform_ = nullptr;
is_in_progress_ = false;
notify_done_pending_ = true;
diff --git a/deps/v8/src/heap/cppgc/visitor.cc b/deps/v8/src/heap/cppgc/visitor.cc
index 2f786b99ac..b4cdee7a53 100644
--- a/deps/v8/src/heap/cppgc/visitor.cc
+++ b/deps/v8/src/heap/cppgc/visitor.cc
@@ -32,7 +32,7 @@ namespace {
void TraceConservatively(ConservativeTracingVisitor* conservative_visitor,
const HeapObjectHeader& header) {
- const auto object_view = ObjectView(header);
+ const auto object_view = ObjectView<>(header);
Address* object = reinterpret_cast<Address*>(object_view.Start());
for (size_t i = 0; i < (object_view.Size() / sizeof(Address)); ++i) {
Address maybe_ptr = object[i];
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index b18e82d1c0..08738af3f0 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -15,6 +15,7 @@ namespace v8 {
namespace internal {
void LocalEmbedderHeapTracer::SetRemoteTracer(EmbedderHeapTracer* tracer) {
+ CHECK_NULL(cpp_heap_);
if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
remote_tracer_ = tracer;
@@ -23,21 +24,49 @@ void LocalEmbedderHeapTracer::SetRemoteTracer(EmbedderHeapTracer* tracer) {
remote_tracer_->isolate_ = reinterpret_cast<v8::Isolate*>(isolate_);
}
+void LocalEmbedderHeapTracer::SetCppHeap(CppHeap* cpp_heap) {
+ CHECK_NULL(remote_tracer_);
+ cpp_heap_ = cpp_heap;
+}
+
+namespace {
+CppHeap::GarbageCollectionFlags ConvertTraceFlags(
+ EmbedderHeapTracer::TraceFlags flags) {
+ CppHeap::GarbageCollectionFlags result;
+ if (flags & EmbedderHeapTracer::TraceFlags::kForced)
+ result |= CppHeap::GarbageCollectionFlagValues::kForced;
+ if (flags & EmbedderHeapTracer::TraceFlags::kReduceMemory)
+ result |= CppHeap::GarbageCollectionFlagValues::kReduceMemory;
+ return result;
+}
+} // namespace
+
void LocalEmbedderHeapTracer::TracePrologue(
EmbedderHeapTracer::TraceFlags flags) {
if (!InUse()) return;
embedder_worklist_empty_ = false;
- remote_tracer_->TracePrologue(flags);
+ if (cpp_heap_)
+ cpp_heap()->TracePrologue(ConvertTraceFlags(flags));
+ else
+ remote_tracer_->TracePrologue(flags);
}
void LocalEmbedderHeapTracer::TraceEpilogue() {
if (!InUse()) return;
- EmbedderHeapTracer::TraceSummary summary;
- remote_tracer_->TraceEpilogue(&summary);
- if (summary.allocated_size == SIZE_MAX) return;
- UpdateRemoteStats(summary.allocated_size, summary.time);
+ // Resetting to state unknown as there may be follow up garbage collections
+ // triggered from callbacks that have a different stack state.
+ embedder_stack_state_ =
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
+
+ if (cpp_heap_) {
+ cpp_heap()->TraceEpilogue();
+ } else {
+ EmbedderHeapTracer::TraceSummary summary;
+ remote_tracer_->TraceEpilogue(&summary);
+ UpdateRemoteStats(summary.allocated_size, summary.time);
+ }
}
void LocalEmbedderHeapTracer::UpdateRemoteStats(size_t allocated_size,
@@ -55,21 +84,24 @@ void LocalEmbedderHeapTracer::UpdateRemoteStats(size_t allocated_size,
void LocalEmbedderHeapTracer::EnterFinalPause() {
if (!InUse()) return;
- remote_tracer_->EnterFinalPause(embedder_stack_state_);
- // Resetting to state unknown as there may be follow up garbage collections
- // triggered from callbacks that have a different stack state.
- embedder_stack_state_ =
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
+ if (cpp_heap_)
+ cpp_heap()->EnterFinalPause(embedder_stack_state_);
+ else
+ remote_tracer_->EnterFinalPause(embedder_stack_state_);
}
-bool LocalEmbedderHeapTracer::Trace(double deadline) {
+bool LocalEmbedderHeapTracer::Trace(double max_duration) {
if (!InUse()) return true;
- return remote_tracer_->AdvanceTracing(deadline);
+ if (cpp_heap_)
+ return cpp_heap()->AdvanceTracing(max_duration);
+ else
+ return remote_tracer_->AdvanceTracing(max_duration);
}
bool LocalEmbedderHeapTracer::IsRemoteTracingDone() {
- return !InUse() || remote_tracer_->IsTracingDone();
+ return !InUse() || (cpp_heap_ ? cpp_heap()->IsTracingDone()
+ : remote_tracer_->IsTracingDone());
}
void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
@@ -107,13 +139,16 @@ bool ExtractWrappableInfo(Isolate* isolate, JSObject js_object,
LocalEmbedderHeapTracer::ProcessingScope::ProcessingScope(
LocalEmbedderHeapTracer* tracer)
- : tracer_(tracer), wrapper_descriptor_(tracer->wrapper_descriptor_) {
+ : tracer_(tracer), wrapper_descriptor_(tracer->wrapper_descriptor()) {
wrapper_cache_.reserve(kWrapperCacheSize);
}
LocalEmbedderHeapTracer::ProcessingScope::~ProcessingScope() {
if (!wrapper_cache_.empty()) {
- tracer_->remote_tracer()->RegisterV8References(std::move(wrapper_cache_));
+ if (tracer_->cpp_heap_)
+ tracer_->cpp_heap()->RegisterV8References(std::move(wrapper_cache_));
+ else
+ tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
}
}
@@ -121,7 +156,7 @@ LocalEmbedderHeapTracer::WrapperInfo
LocalEmbedderHeapTracer::ExtractWrapperInfo(Isolate* isolate,
JSObject js_object) {
WrapperInfo info;
- if (ExtractWrappableInfo(isolate, js_object, wrapper_descriptor_, &info)) {
+ if (ExtractWrappableInfo(isolate, js_object, wrapper_descriptor(), &info)) {
return info;
}
return {nullptr, nullptr};
@@ -140,7 +175,10 @@ void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
void LocalEmbedderHeapTracer::ProcessingScope::FlushWrapperCacheIfFull() {
if (wrapper_cache_.size() == wrapper_cache_.capacity()) {
- tracer_->remote_tracer()->RegisterV8References(std::move(wrapper_cache_));
+ if (tracer_->cpp_heap_)
+ tracer_->cpp_heap()->RegisterV8References(std::move(wrapper_cache_));
+ else
+ tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
wrapper_cache_.clear();
wrapper_cache_.reserve(kWrapperCacheSize);
}
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 1f15a7e826..6b08488aa6 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -9,7 +9,9 @@
#include "include/v8-embedder-heap.h"
#include "include/v8-traced-handle.h"
#include "src/common/globals.h"
+#include "src/execution/isolate.h"
#include "src/flags/flags.h"
+#include "src/heap/cppgc-js/cpp-heap.h"
namespace v8 {
namespace internal {
@@ -76,12 +78,19 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
~LocalEmbedderHeapTracer() {
if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
+ // CppHeap is not detached from Isolate here. Detaching is done explciitly
+ // on Isolate/Heap/CppHeap destruction.
}
- bool InUse() const { return remote_tracer_ != nullptr; }
- EmbedderHeapTracer* remote_tracer() const { return remote_tracer_; }
+ bool InUse() const { return cpp_heap_ || (remote_tracer_ != nullptr); }
+ // This method doesn't take CppHeap into account.
+ EmbedderHeapTracer* remote_tracer() const {
+ DCHECK_NULL(cpp_heap_);
+ return remote_tracer_;
+ }
void SetRemoteTracer(EmbedderHeapTracer* tracer);
+ void SetCppHeap(CppHeap* cpp_heap);
void TracePrologue(EmbedderHeapTracer::TraceFlags flags);
void TraceEpilogue();
void EnterFinalPause();
@@ -124,6 +133,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
WrapperInfo ExtractWrapperInfo(Isolate* isolate, JSObject js_object);
void SetWrapperDescriptor(const WrapperDescriptor& wrapper_descriptor) {
+ DCHECK_NULL(cpp_heap_);
wrapper_descriptor_ = wrapper_descriptor;
}
@@ -135,6 +145,10 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
void NotifyEmptyEmbedderStack();
+ EmbedderHeapTracer::EmbedderStackState embedder_stack_state() const {
+ return embedder_stack_state_;
+ }
+
private:
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
@@ -150,8 +164,23 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
WrapperDescriptor::kUnknownEmbedderId);
}
+ CppHeap* cpp_heap() {
+ DCHECK_NOT_NULL(cpp_heap_);
+ DCHECK_NULL(remote_tracer_);
+ DCHECK_IMPLIES(isolate_, cpp_heap_ == isolate_->heap()->cpp_heap());
+ return cpp_heap_;
+ }
+
+ WrapperDescriptor wrapper_descriptor() {
+ if (cpp_heap_)
+ return cpp_heap()->wrapper_descriptor();
+ else
+ return wrapper_descriptor_;
+ }
+
Isolate* const isolate_;
EmbedderHeapTracer* remote_tracer_ = nullptr;
+ CppHeap* cpp_heap_ = nullptr;
DefaultEmbedderRootsHandler default_embedder_roots_handler_;
EmbedderHeapTracer::EmbedderStackState embedder_stack_state_ =
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index 794322e6dd..ff1056ee57 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -81,7 +81,10 @@ Handle<CodeDataContainer> FactoryBase<Impl>::NewCodeDataContainer(
SKIP_WRITE_BARRIER);
data_container.set_kind_specific_flags(flags, kRelaxedStore);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- impl()->SetExternalCodeSpaceInDataContainer(data_container);
+ Isolate* isolate_for_heap_sandbox = impl()->isolate_for_heap_sandbox();
+ data_container.AllocateExternalPointerEntries(isolate_for_heap_sandbox);
+ data_container.set_raw_code(Smi::zero(), SKIP_WRITE_BARRIER);
+ data_container.set_code_entry_point(isolate_for_heap_sandbox, kNullAddress);
}
data_container.clear_padding();
return handle(data_container, isolate());
@@ -306,6 +309,21 @@ Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfoForLiteral(
}
template <typename Impl>
+Handle<SharedFunctionInfo> FactoryBase<Impl>::CloneSharedFunctionInfo(
+ Handle<SharedFunctionInfo> other) {
+ Map map = read_only_roots().shared_function_info_map();
+
+ SharedFunctionInfo shared =
+ SharedFunctionInfo::cast(NewWithImmortalMap(map, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+
+ shared.CopyFrom(*other);
+ shared.clear_padding();
+
+ return handle(shared, isolate());
+}
+
+template <typename Impl>
Handle<PreparseData> FactoryBase<Impl>::NewPreparseData(int data_length,
int children_length) {
int size = PreparseData::SizeFor(data_length, children_length);
@@ -340,6 +358,29 @@ FactoryBase<Impl>::NewUncompiledDataWithPreparseData(
}
template <typename Impl>
+Handle<UncompiledDataWithoutPreparseDataWithJob>
+FactoryBase<Impl>::NewUncompiledDataWithoutPreparseDataWithJob(
+ Handle<String> inferred_name, int32_t start_position,
+ int32_t end_position) {
+ return TorqueGeneratedFactory<
+ Impl>::NewUncompiledDataWithoutPreparseDataWithJob(inferred_name,
+ start_position,
+ end_position,
+ kNullAddress,
+ AllocationType::kOld);
+}
+
+template <typename Impl>
+Handle<UncompiledDataWithPreparseDataAndJob>
+FactoryBase<Impl>::NewUncompiledDataWithPreparseDataAndJob(
+ Handle<String> inferred_name, int32_t start_position, int32_t end_position,
+ Handle<PreparseData> preparse_data) {
+ return TorqueGeneratedFactory<Impl>::NewUncompiledDataWithPreparseDataAndJob(
+ inferred_name, start_position, end_position, preparse_data, kNullAddress,
+ AllocationType::kOld);
+}
+
+template <typename Impl>
Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfo(
MaybeHandle<String> maybe_name, MaybeHandle<HeapObject> maybe_function_data,
Builtin builtin, FunctionKind kind) {
@@ -578,19 +619,22 @@ Handle<SeqTwoByteString> FactoryBase<Impl>::NewTwoByteInternalizedString(
}
template <typename Impl>
-MaybeHandle<SeqOneByteString> FactoryBase<Impl>::NewRawOneByteString(
- int length, AllocationType allocation) {
+template <typename SeqStringT>
+MaybeHandle<SeqStringT> FactoryBase<Impl>::NewRawStringWithMap(
+ int length, Map map, AllocationType allocation) {
+ DCHECK(SeqStringT::IsCompatibleMap(map, read_only_roots()));
+ DCHECK_IMPLIES(!StringShape(map).IsShared(),
+ RefineAllocationTypeForInPlaceInternalizableString(
+ allocation, map) == allocation);
if (length > String::kMaxLength || length < 0) {
- THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqOneByteString);
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqStringT);
}
DCHECK_GT(length, 0); // Use Factory::empty_string() instead.
- int size = SeqOneByteString::SizeFor(length);
- DCHECK_GE(SeqOneByteString::kMaxSize, size);
+ int size = SeqStringT::SizeFor(length);
+ DCHECK_GE(SeqStringT::kMaxSize, size);
- Map map = read_only_roots().one_byte_string_map();
- SeqOneByteString string = SeqOneByteString::cast(AllocateRawWithImmortalMap(
- size, RefineAllocationTypeForInPlaceInternalizableString(allocation, map),
- map));
+ SeqStringT string =
+ SeqStringT::cast(AllocateRawWithImmortalMap(size, allocation, map));
DisallowGarbageCollection no_gc;
string.set_length(length);
string.set_raw_hash_field(String::kEmptyHashField);
@@ -599,24 +643,37 @@ MaybeHandle<SeqOneByteString> FactoryBase<Impl>::NewRawOneByteString(
}
template <typename Impl>
-MaybeHandle<SeqTwoByteString> FactoryBase<Impl>::NewRawTwoByteString(
+MaybeHandle<SeqOneByteString> FactoryBase<Impl>::NewRawOneByteString(
int length, AllocationType allocation) {
- if (length > String::kMaxLength || length < 0) {
- THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqTwoByteString);
- }
- DCHECK_GT(length, 0); // Use Factory::empty_string() instead.
- int size = SeqTwoByteString::SizeFor(length);
- DCHECK_GE(SeqTwoByteString::kMaxSize, size);
+ Map map = read_only_roots().one_byte_string_map();
+ return NewRawStringWithMap<SeqOneByteString>(
+ length, map,
+ RefineAllocationTypeForInPlaceInternalizableString(allocation, map));
+}
+template <typename Impl>
+MaybeHandle<SeqTwoByteString> FactoryBase<Impl>::NewRawTwoByteString(
+ int length, AllocationType allocation) {
Map map = read_only_roots().string_map();
- SeqTwoByteString string = SeqTwoByteString::cast(AllocateRawWithImmortalMap(
- size, RefineAllocationTypeForInPlaceInternalizableString(allocation, map),
- map));
- DisallowGarbageCollection no_gc;
- string.set_length(length);
- string.set_raw_hash_field(String::kEmptyHashField);
- DCHECK_EQ(size, string.Size());
- return handle(string, isolate());
+ return NewRawStringWithMap<SeqTwoByteString>(
+ length, map,
+ RefineAllocationTypeForInPlaceInternalizableString(allocation, map));
+}
+
+template <typename Impl>
+MaybeHandle<SeqOneByteString> FactoryBase<Impl>::NewRawSharedOneByteString(
+ int length) {
+ return NewRawStringWithMap<SeqOneByteString>(
+ length, read_only_roots().shared_one_byte_string_map(),
+ AllocationType::kSharedOld);
+}
+
+template <typename Impl>
+MaybeHandle<SeqTwoByteString> FactoryBase<Impl>::NewRawSharedTwoByteString(
+ int length) {
+ return NewRawStringWithMap<SeqTwoByteString>(
+ length, read_only_roots().shared_string_map(),
+ AllocationType::kSharedOld);
}
template <typename Impl>
@@ -964,9 +1021,11 @@ MaybeHandle<Map> FactoryBase<Impl>::GetInPlaceInternalizedStringMap(
MaybeHandle<Map> map;
switch (instance_type) {
case STRING_TYPE:
+ case SHARED_STRING_TYPE:
map = read_only_roots().internalized_string_map_handle();
break;
case ONE_BYTE_STRING_TYPE:
+ case SHARED_ONE_BYTE_STRING_TYPE:
map = read_only_roots().one_byte_internalized_string_map_handle();
break;
case EXTERNAL_STRING_TYPE:
@@ -984,6 +1043,25 @@ MaybeHandle<Map> FactoryBase<Impl>::GetInPlaceInternalizedStringMap(
}
template <typename Impl>
+Handle<Map> FactoryBase<Impl>::GetStringMigrationSentinelMap(
+ InstanceType from_string_type) {
+ Handle<Map> map;
+ switch (from_string_type) {
+ case SHARED_STRING_TYPE:
+ map = read_only_roots().seq_string_migration_sentinel_map_handle();
+ break;
+ case SHARED_ONE_BYTE_STRING_TYPE:
+ map =
+ read_only_roots().one_byte_seq_string_migration_sentinel_map_handle();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ DCHECK_EQ(map->instance_type(), from_string_type);
+ return map;
+}
+
+template <typename Impl>
AllocationType
FactoryBase<Impl>::RefineAllocationTypeForInPlaceInternalizableString(
AllocationType allocation, Map string_map) {
diff --git a/deps/v8/src/heap/factory-base.h b/deps/v8/src/heap/factory-base.h
index ba44404b32..c3aa816d0b 100644
--- a/deps/v8/src/heap/factory-base.h
+++ b/deps/v8/src/heap/factory-base.h
@@ -162,6 +162,11 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
FunctionLiteral* literal, Handle<Script> script, bool is_toplevel);
+ // Create a copy of a given SharedFunctionInfo for use as a placeholder in
+ // off-thread compilation
+ Handle<SharedFunctionInfo> CloneSharedFunctionInfo(
+ Handle<SharedFunctionInfo> other);
+
Handle<PreparseData> NewPreparseData(int data_length, int children_length);
Handle<UncompiledDataWithoutPreparseData>
@@ -173,6 +178,17 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<String> inferred_name, int32_t start_position,
int32_t end_position, Handle<PreparseData>);
+ Handle<UncompiledDataWithoutPreparseDataWithJob>
+ NewUncompiledDataWithoutPreparseDataWithJob(Handle<String> inferred_name,
+ int32_t start_position,
+ int32_t end_position);
+
+ Handle<UncompiledDataWithPreparseDataAndJob>
+ NewUncompiledDataWithPreparseDataAndJob(Handle<String> inferred_name,
+ int32_t start_position,
+ int32_t end_position,
+ Handle<PreparseData>);
+
// Allocates a FeedbackMedata object and zeroes the data section.
Handle<FeedbackMetadata> NewFeedbackMetadata(
int slot_count, int create_closure_slot_count,
@@ -214,6 +230,11 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<String> left, Handle<String> right, int length, bool one_byte,
AllocationType allocation = AllocationType::kYoung);
+ V8_WARN_UNUSED_RESULT MaybeHandle<SeqOneByteString> NewRawSharedOneByteString(
+ int length);
+ V8_WARN_UNUSED_RESULT MaybeHandle<SeqTwoByteString> NewRawSharedTwoByteString(
+ int length);
+
// Allocates a new BigInt with {length} digits. Only to be used by
// MutableBigInt::New*.
Handle<FreshlyAllocatedBigInt> NewBigInt(
@@ -242,6 +263,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
MaybeHandle<Map> GetInPlaceInternalizedStringMap(Map from_string_map);
+ Handle<Map> GetStringMigrationSentinelMap(InstanceType from_string_type);
+
AllocationType RefineAllocationTypeForInPlaceInternalizableString(
AllocationType allocation, Map string_map);
@@ -259,7 +282,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
HeapObject AllocateRawWithImmortalMap(
int size, AllocationType allocation, Map map,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
HeapObject NewWithImmortalMap(Map map, AllocationType allocation);
Handle<FixedArray> NewFixedArrayWithFiller(Handle<Map> map, int length,
@@ -270,10 +293,14 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
MaybeHandle<String> maybe_name,
MaybeHandle<HeapObject> maybe_function_data, Builtin builtin,
- FunctionKind kind = kNormalFunction);
+ FunctionKind kind = FunctionKind::kNormalFunction);
Handle<String> MakeOrFindTwoCharacterString(uint16_t c1, uint16_t c2);
+ template <typename SeqStringT>
+ MaybeHandle<SeqStringT> NewRawStringWithMap(int length, Map map,
+ AllocationType allocation);
+
private:
friend class WebSnapshotDeserializer;
Impl* impl() { return static_cast<Impl*>(this); }
@@ -281,7 +308,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
ReadOnlyRoots read_only_roots() { return impl()->read_only_roots(); }
HeapObject AllocateRaw(int size, AllocationType allocation,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
friend TorqueGeneratedFactory<Impl>;
};
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index eddacd32c7..9e05c52472 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -378,10 +378,10 @@ HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
return result;
}
-Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
+Handle<HeapObject> Factory::NewFillerObject(int size,
+ AllocationAlignment alignment,
AllocationType allocation,
AllocationOrigin origin) {
- AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
Heap* heap = isolate()->heap();
HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
size, allocation, origin, alignment);
@@ -877,12 +877,12 @@ namespace {
} // namespace
-StringInternalizationStrategy Factory::ComputeInternalizationStrategyForString(
+StringTransitionStrategy Factory::ComputeInternalizationStrategyForString(
Handle<String> string, MaybeHandle<Map>* internalized_map) {
// Do not internalize young strings in-place: This allows us to ignore both
// string table and stub cache on scavenges.
if (Heap::InYoungGeneration(*string)) {
- return StringInternalizationStrategy::kCopy;
+ return StringTransitionStrategy::kCopy;
}
DCHECK_NOT_NULL(internalized_map);
DisallowGarbageCollection no_gc;
@@ -892,12 +892,12 @@ StringInternalizationStrategy Factory::ComputeInternalizationStrategyForString(
Map map = string->map();
*internalized_map = GetInPlaceInternalizedStringMap(map);
if (!internalized_map->is_null()) {
- return StringInternalizationStrategy::kInPlace;
+ return StringTransitionStrategy::kInPlace;
}
if (InstanceTypeChecker::IsInternalizedString(map.instance_type())) {
- return StringInternalizationStrategy::kAlreadyInternalized;
+ return StringTransitionStrategy::kAlreadyTransitioned;
}
- return StringInternalizationStrategy::kCopy;
+ return StringTransitionStrategy::kCopy;
}
template <class StringClass>
@@ -921,6 +921,31 @@ template Handle<ExternalOneByteString>
template Handle<ExternalTwoByteString>
Factory::InternalizeExternalString<ExternalTwoByteString>(Handle<String>);
+StringTransitionStrategy Factory::ComputeSharingStrategyForString(
+ Handle<String> string, MaybeHandle<Map>* shared_map) {
+ DCHECK(FLAG_shared_string_table);
+ // Do not share young strings in-place: there is no shared young space.
+ if (Heap::InYoungGeneration(*string)) {
+ return StringTransitionStrategy::kCopy;
+ }
+ DCHECK_NOT_NULL(shared_map);
+ DisallowGarbageCollection no_gc;
+ InstanceType instance_type = string->map().instance_type();
+ if (StringShape(instance_type).IsShared()) {
+ return StringTransitionStrategy::kAlreadyTransitioned;
+ }
+ switch (instance_type) {
+ case STRING_TYPE:
+ *shared_map = read_only_roots().shared_string_map_handle();
+ return StringTransitionStrategy::kInPlace;
+ case ONE_BYTE_STRING_TYPE:
+ *shared_map = read_only_roots().shared_one_byte_string_map_handle();
+ return StringTransitionStrategy::kInPlace;
+ default:
+ return StringTransitionStrategy::kCopy;
+ }
+}
+
Handle<String> Factory::LookupSingleCharacterStringFromCode(uint16_t code) {
if (code <= unibrow::Latin1::kMaxChar) {
{
@@ -1346,14 +1371,6 @@ void Factory::AddToScriptList(Handle<Script> script) {
isolate()->heap()->set_script_list(*scripts);
}
-void Factory::SetExternalCodeSpaceInDataContainer(
- CodeDataContainer data_container) {
- DCHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- data_container.AllocateExternalPointerEntries(isolate());
- data_container.set_raw_code(Smi::zero(), SKIP_WRITE_BARRIER);
- data_container.set_code_entry_point(isolate(), kNullAddress);
-}
-
Handle<Script> Factory::CloneScript(Handle<Script> script) {
Heap* heap = isolate()->heap();
int script_id = isolate()->GetNextScriptId();
@@ -1445,20 +1462,36 @@ Handle<Foreign> Factory::NewForeign(Address addr) {
Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(
Address type_address, Handle<Map> opt_parent, int instance_size_bytes,
Handle<WasmInstanceObject> instance) {
- // We pretenure WasmTypeInfo objects because they are refererenced by Maps,
- // which are assumed to be long-lived. The supertypes list is constant
- // after initialization, so we pretenure that too.
- // The subtypes list, however, is expected to grow (and hence be replaced),
- // so we don't pretenure it.
+ // We pretenure WasmTypeInfo objects for two reasons:
+ // (1) They are referenced by Maps, which are assumed to be long-lived,
+ // so pretenuring the WTI is a bit more efficient.
+ // (2) The object visitors need to read the WasmTypeInfo to find tagged
+ // fields in Wasm structs; in the middle of a GC cycle that's only
+ // safe to do if the WTI is in old space.
+ // The supertypes list is constant after initialization, so we pretenure
+ // that too. The subtypes list, however, is expected to grow (and hence be
+ // replaced), so we don't pretenure it.
Handle<ArrayList> subtypes = ArrayList::New(isolate(), 0);
Handle<FixedArray> supertypes;
if (opt_parent.is_null()) {
- supertypes = NewFixedArray(0);
+ supertypes = NewFixedArray(wasm::kMinimumSupertypeArraySize);
+ for (int i = 0; i < supertypes->length(); i++) {
+ supertypes->set(i, *undefined_value());
+ }
} else {
- supertypes = CopyArrayAndGrow(
- handle(opt_parent->wasm_type_info().supertypes(), isolate()), 1,
- AllocationType::kOld);
- supertypes->set(supertypes->length() - 1, *opt_parent);
+ Handle<FixedArray> parent_supertypes =
+ handle(opt_parent->wasm_type_info().supertypes(), isolate());
+ int last_defined_index = parent_supertypes->length() - 1;
+ while (last_defined_index >= 0 &&
+ parent_supertypes->get(last_defined_index).IsUndefined()) {
+ last_defined_index--;
+ }
+ if (last_defined_index == parent_supertypes->length() - 1) {
+ supertypes = CopyArrayAndGrow(parent_supertypes, 1, AllocationType::kOld);
+ } else {
+ supertypes = CopyFixedArray(parent_supertypes);
+ }
+ supertypes->set(last_defined_index + 1, *opt_parent);
}
Map map = *wasm_type_info_map();
WasmTypeInfo result = WasmTypeInfo::cast(AllocateRawWithImmortalMap(
@@ -1466,7 +1499,7 @@ Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(
DisallowGarbageCollection no_gc;
result.AllocateExternalPointerEntries(isolate());
result.set_foreign_address(isolate(), type_address);
- result.set_supertypes(*supertypes, SKIP_WRITE_BARRIER);
+ result.set_supertypes(*supertypes);
result.set_subtypes(*subtypes);
result.set_instance_size(instance_size_bytes);
result.set_instance(*instance);
@@ -1479,7 +1512,7 @@ Handle<WasmApiFunctionRef> Factory::NewWasmApiFunctionRef(
auto result = WasmApiFunctionRef::cast(AllocateRawWithImmortalMap(
map.instance_size(), AllocationType::kOld, map));
DisallowGarbageCollection no_gc;
- result.set_foreign_address(isolate(), isolate()->isolate_root());
+ result.set_isolate_root(isolate()->isolate_root());
result.set_native_context(*isolate()->native_context());
if (!callable.is_null()) {
result.set_callable(*callable);
@@ -1489,43 +1522,55 @@ Handle<WasmApiFunctionRef> Factory::NewWasmApiFunctionRef(
return handle(result, isolate());
}
+Handle<WasmInternalFunction> Factory::NewWasmInternalFunction(
+ Address opt_call_target, Handle<HeapObject> ref, Handle<Map> rtt) {
+ HeapObject raw = AllocateRaw(rtt->instance_size(), AllocationType::kOld);
+ raw.set_map_after_allocation(*rtt);
+ WasmInternalFunction result = WasmInternalFunction::cast(raw);
+ DisallowGarbageCollection no_gc;
+ result.AllocateExternalPointerEntries(isolate());
+ result.set_foreign_address(isolate(), opt_call_target);
+ result.set_ref(*ref);
+ // Default values, will be overwritten by the caller.
+ result.set_code(isolate()->builtins()->code(Builtin::kAbort));
+ result.set_external(*undefined_value());
+ return handle(result, isolate());
+}
+
Handle<WasmJSFunctionData> Factory::NewWasmJSFunctionData(
Address opt_call_target, Handle<JSReceiver> callable, int return_count,
int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
- Handle<Code> wrapper_code) {
+ Handle<Code> wrapper_code, Handle<Map> rtt) {
Handle<WasmApiFunctionRef> ref = NewWasmApiFunctionRef(callable);
+ Handle<WasmInternalFunction> internal =
+ NewWasmInternalFunction(opt_call_target, ref, rtt);
Map map = *wasm_js_function_data_map();
WasmJSFunctionData result =
WasmJSFunctionData::cast(AllocateRawWithImmortalMap(
map.instance_size(), AllocationType::kOld, map));
DisallowGarbageCollection no_gc;
- result.AllocateExternalPointerEntries(isolate());
- result.set_foreign_address(isolate(), opt_call_target);
- result.set_ref(*ref);
+ result.set_internal(*internal);
result.set_wrapper_code(*wrapper_code);
result.set_serialized_return_count(return_count);
result.set_serialized_parameter_count(parameter_count);
result.set_serialized_signature(*serialized_sig);
- // Default value, will be overwritten by the caller.
- result.set_wasm_to_js_wrapper_code(
- isolate()->builtins()->code(Builtin::kAbort));
return handle(result, isolate());
}
Handle<WasmExportedFunctionData> Factory::NewWasmExportedFunctionData(
Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
Address call_target, Handle<Object> ref, int func_index,
- Address sig_address, int wrapper_budget) {
+ Address sig_address, int wrapper_budget, Handle<Map> rtt) {
Handle<Foreign> sig_foreign = NewForeign(sig_address);
+ Handle<WasmInternalFunction> internal =
+ NewWasmInternalFunction(call_target, Handle<HeapObject>::cast(ref), rtt);
Map map = *wasm_exported_function_data_map();
WasmExportedFunctionData result =
WasmExportedFunctionData::cast(AllocateRawWithImmortalMap(
map.instance_size(), AllocationType::kOld, map));
DisallowGarbageCollection no_gc;
- result.AllocateExternalPointerEntries(isolate());
- result.set_foreign_address(isolate(), call_target);
DCHECK(ref->IsWasmInstanceObject() || ref->IsWasmApiFunctionRef());
- result.set_ref(*ref);
+ result.set_internal(*internal);
result.set_wrapper_code(*export_wrapper);
result.set_instance(*instance);
result.set_function_index(func_index);
@@ -1539,17 +1584,17 @@ Handle<WasmExportedFunctionData> Factory::NewWasmExportedFunctionData(
Handle<WasmCapiFunctionData> Factory::NewWasmCapiFunctionData(
Address call_target, Handle<Foreign> embedder_data,
- Handle<Code> wrapper_code,
+ Handle<Code> wrapper_code, Handle<Map> rtt,
Handle<PodArray<wasm::ValueType>> serialized_sig) {
Handle<WasmApiFunctionRef> ref = NewWasmApiFunctionRef(Handle<JSReceiver>());
+ Handle<WasmInternalFunction> internal =
+ NewWasmInternalFunction(call_target, ref, rtt);
Map map = *wasm_capi_function_data_map();
WasmCapiFunctionData result =
WasmCapiFunctionData::cast(AllocateRawWithImmortalMap(
map.instance_size(), AllocationType::kOld, map));
DisallowGarbageCollection no_gc;
- result.AllocateExternalPointerEntries(isolate());
- result.set_foreign_address(isolate(), call_target);
- result.set_ref(*ref);
+ result.set_internal(*internal);
result.set_wrapper_code(*wrapper_code);
result.set_embedder_data(*embedder_data);
result.set_serialized_signature(*serialized_sig);
@@ -1617,7 +1662,8 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmJSFunction(
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmCapiFunction(
Handle<WasmCapiFunctionData> data) {
return NewSharedFunctionInfo(MaybeHandle<String>(), data,
- Builtin::kNoBuiltinId, kConciseMethod);
+ Builtin::kNoBuiltinId,
+ FunctionKind::kConciseMethod);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1672,8 +1718,9 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
PropertyCell cell = PropertyCell::cast(AllocateRawWithImmortalMap(
PropertyCell::kSize, allocation, *global_property_cell_map()));
DisallowGarbageCollection no_gc;
- cell.set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
- SKIP_WRITE_BARRIER);
+ cell.set_dependent_code(
+ DependentCode::empty_dependent_code(ReadOnlyRoots(isolate())),
+ SKIP_WRITE_BARRIER);
WriteBarrierMode mode = allocation == AllocationType::kYoung
? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
@@ -1765,8 +1812,9 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
map.set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid),
SKIP_WRITE_BARRIER);
}
- map.set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
- SKIP_WRITE_BARRIER);
+ map.set_dependent_code(
+ DependentCode::empty_dependent_code(ReadOnlyRoots(isolate())),
+ SKIP_WRITE_BARRIER);
map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()),
SKIP_WRITE_BARRIER);
map.SetInObjectUnusedPropertyFields(inobject_properties);
@@ -2163,6 +2211,12 @@ Handle<JSObject> Factory::NewExternal(void* value) {
return external;
}
+Handle<DeoptimizationLiteralArray> Factory::NewDeoptimizationLiteralArray(
+ int length) {
+ return Handle<DeoptimizationLiteralArray>::cast(
+ NewWeakFixedArray(length, AllocationType::kOld));
+}
+
Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Address off_heap_entry) {
CHECK_NOT_NULL(isolate()->embedded_blob_code());
@@ -2309,12 +2363,10 @@ Handle<JSObject> Factory::NewSlowJSObjectWithNullProto() {
}
Handle<JSObject> Factory::NewJSObjectWithNullProto() {
- Handle<JSObject> result = NewJSObject(isolate()->object_function());
- Handle<Map> new_map = Map::Copy(
- isolate(), Handle<Map>(result->map(), isolate()), "ObjectWithNullProto");
- Map::SetPrototype(isolate(), new_map, null_value());
- JSObject::MigrateToMap(isolate(), result, new_map);
- return result;
+ Handle<Map> map(isolate()->object_function()->initial_map(), isolate());
+ Handle<Map> map_with_null_proto =
+ Map::TransitionToPrototype(isolate(), map, null_value());
+ return NewJSObjectFromMap(map_with_null_proto);
}
Handle<JSGlobalObject> Factory::NewJSGlobalObject(
@@ -2350,8 +2402,8 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
// Only accessors are expected.
- DCHECK_EQ(kAccessor, details.kind());
- PropertyDetails d(kAccessor, details.attributes(),
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
+ PropertyDetails d(PropertyKind::kAccessor, details.attributes(),
PropertyCellType::kMutable);
Handle<Name> name(descs->GetKey(i), isolate());
Handle<Object> value(descs->GetStrongValue(i), isolate());
@@ -2814,6 +2866,7 @@ Handle<JSArrayBufferView> Factory::NewJSArrayBufferView(
raw.set_buffer(*buffer, SKIP_WRITE_BARRIER);
raw.set_byte_offset(byte_offset);
raw.set_byte_length(byte_length);
+ raw.set_bit_field(0);
ZeroEmbedderFields(raw);
DCHECK_EQ(raw.GetEmbedderFieldCount(),
v8::ArrayBufferView::kEmbedderFieldCount);
@@ -2869,6 +2922,9 @@ Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
map, empty_fixed_array(), buffer, byte_offset, byte_length));
obj->set_data_pointer(
isolate(), static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
+ // TODO(v8:11111): Support creating length tracking DataViews via the API.
+ obj->set_is_length_tracking(false);
+ obj->set_is_backed_by_rab(!buffer->is_shared() && buffer->is_resizable());
return obj;
}
@@ -3763,7 +3819,7 @@ Handle<JSFunction> Factory::JSFunctionBuilder::Build() {
PrepareMap();
PrepareFeedbackCell();
- Handle<Code> code = handle(sfi_->GetCode(), isolate_);
+ Handle<Code> code = handle(FromCodeT(sfi_->GetCode()), isolate_);
Handle<JSFunction> result = BuildRaw(code);
if (code->kind() == CodeKind::BASELINE) {
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index f620f5eb3c..a5dd9ce5a9 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -278,15 +278,15 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Compute the internalization strategy for the input string.
//
- // Old-generation flat strings can be internalized by mutating their map
- // return kInPlace, along with the matching internalized string map for string
- // is stored in internalized_map.
+ // Old-generation sequential strings can be internalized by mutating their map
+ // and return kInPlace, along with the matching internalized string map for
+ // string stored in internalized_map.
//
- // Internalized strings return kAlreadyInternalized.
+ // Internalized strings return kAlreadyTransitioned.
//
// All other strings are internalized by flattening and copying and return
// kCopy.
- V8_WARN_UNUSED_RESULT StringInternalizationStrategy
+ V8_WARN_UNUSED_RESULT StringTransitionStrategy
ComputeInternalizationStrategyForString(Handle<String> string,
MaybeHandle<Map>* internalized_map);
@@ -295,6 +295,20 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
template <class StringClass>
Handle<StringClass> InternalizeExternalString(Handle<String> string);
+ // Compute the sharing strategy for the input string.
+ //
+ // Old-generation sequential and thin strings can be shared by mutating their
+ // map and return kInPlace, along with the matching shared string map for the
+ // string stored in shared_map.
+ //
+ // Already-shared strings return kAlreadyTransitioned.
+ //
+ // All other strings are shared by flattening and copying into a sequential
+ // string then sharing that sequential string, and return kCopy.
+ V8_WARN_UNUSED_RESULT StringTransitionStrategy
+ ComputeSharingStrategyForString(Handle<String> string,
+ MaybeHandle<Map>* shared_map);
+
// Creates a single character string where the character has given code.
// A cache is used for Latin1 codes.
Handle<String> LookupSingleCharacterStringFromCode(uint16_t code);
@@ -430,7 +444,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Allocate a block of memory of the given AllocationType (filled with a
// filler). Used as a fall-back for generated code when the space is full.
Handle<HeapObject> NewFillerObject(
- int size, bool double_align, AllocationType allocation,
+ int size, AllocationAlignment alignment, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@@ -570,21 +584,24 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Map> opt_parent,
int instance_size_bytes,
Handle<WasmInstanceObject> instance);
+ Handle<WasmInternalFunction> NewWasmInternalFunction(Address opt_call_target,
+ Handle<HeapObject> ref,
+ Handle<Map> rtt);
Handle<WasmCapiFunctionData> NewWasmCapiFunctionData(
Address call_target, Handle<Foreign> embedder_data,
- Handle<Code> wrapper_code,
+ Handle<Code> wrapper_code, Handle<Map> rtt,
Handle<PodArray<wasm::ValueType>> serialized_sig);
Handle<WasmExportedFunctionData> NewWasmExportedFunctionData(
Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
Address call_target, Handle<Object> ref, int func_index,
- Address sig_address, int wrapper_budget);
+ Address sig_address, int wrapper_budget, Handle<Map> rtt);
Handle<WasmApiFunctionRef> NewWasmApiFunctionRef(Handle<JSReceiver> callable);
// {opt_call_target} is kNullAddress for JavaScript functions, and
// non-null for exported Wasm functions.
Handle<WasmJSFunctionData> NewWasmJSFunctionData(
Address opt_call_target, Handle<JSReceiver> callable, int return_count,
int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
- Handle<Code> wrapper_code);
+ Handle<Code> wrapper_code, Handle<Map> rtt);
Handle<WasmStruct> NewWasmStruct(const wasm::StructType* type,
wasm::WasmValue* args, Handle<Map> map);
Handle<WasmArray> NewWasmArray(const wasm::ArrayType* type,
@@ -658,6 +675,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Create an External object for V8's external API.
Handle<JSObject> NewExternal(void* value);
+ Handle<DeoptimizationLiteralArray> NewDeoptimizationLiteralArray(int length);
+
// Allocates a new code object and initializes it as the trampoline to the
// given off-heap entry point.
Handle<Code> NewOffHeapTrampolineFor(Handle<Code> code,
@@ -723,7 +742,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<SharedFunctionInfo> NewSharedFunctionInfoForBuiltin(
MaybeHandle<String> name, Builtin builtin,
- FunctionKind kind = kNormalFunction);
+ FunctionKind kind = FunctionKind::kNormalFunction);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForWebSnapshot();
@@ -973,7 +992,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// ------
// Customization points for FactoryBase
HeapObject AllocateRaw(int size, AllocationType allocation,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
Isolate* isolate() const {
// Downcast to the privately inherited sub-class using c-style casts to
@@ -982,12 +1001,22 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// NOLINTNEXTLINE (google-readability-casting)
return (Isolate*)this; // NOLINT(readability/casting)
}
+
+ // This is the real Isolate that will be used for allocating and accessing
+ // external pointer entries when V8_HEAP_SANDBOX is enabled.
+ Isolate* isolate_for_heap_sandbox() const {
+#ifdef V8_HEAP_SANDBOX
+ return isolate();
+#else
+ return nullptr;
+#endif // V8_HEAP_SANDBOX
+ }
+
bool CanAllocateInReadOnlySpace();
bool EmptyStringRootIsInitialized();
AllocationType AllocationTypeForInPlaceInternalizableString();
void AddToScriptList(Handle<Script> shared);
- void SetExternalCodeSpaceInDataContainer(CodeDataContainer data_container);
// ------
HeapObject AllocateRawWithAllocationSite(
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 8ddd177c6b..655930859a 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -93,7 +93,7 @@ GCTracer::Scope::~Scope() {
tracer_->AddScopeSample(scope_, duration_ms);
if (scope_ == ScopeId::MC_INCREMENTAL ||
scope_ == ScopeId::MC_INCREMENTAL_START ||
- scope_ == MC_INCREMENTAL_FINALIZE) {
+ scope_ == ScopeId::MC_INCREMENTAL_FINALIZE) {
auto* long_task_stats =
tracer_->heap_->isolate()->GetCurrentLongTaskStats();
long_task_stats->gc_full_incremental_wall_clock_duration_us +=
@@ -411,10 +411,11 @@ void GCTracer::Stop(GarbageCollector collector) {
heap_->UpdateTotalGCTime(duration);
- if ((current_.type == Event::SCAVENGER ||
- current_.type == Event::MINOR_MARK_COMPACTOR) &&
- FLAG_trace_gc_ignore_scavenger)
- return;
+ if (current_.type == Event::SCAVENGER ||
+ current_.type == Event::MINOR_MARK_COMPACTOR) {
+ ReportYoungCycleToRecorder();
+ if (FLAG_trace_gc_ignore_scavenger) return;
+ }
if (FLAG_trace_gc_nvp) {
PrintNVP();
@@ -562,11 +563,12 @@ void GCTracer::Print() const {
Output(
"[%d:%p] "
"%8.0f ms: "
- "%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
+ "%s%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
"%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s %s\n",
base::OS::GetCurrentProcessId(),
reinterpret_cast<void*>(heap_->isolate()),
- heap_->isolate()->time_millis_since_init(), current_.TypeName(false),
+ heap_->isolate()->time_millis_since_init(),
+ heap_->IsShared() ? "Shared " : "", current_.TypeName(false),
current_.reduce_memory ? " (reduce)" : "",
static_cast<double>(current_.start_object_size) / MB,
static_cast<double>(current_.start_memory_size) / MB,
@@ -1444,5 +1446,39 @@ void GCTracer::ReportIncrementalMarkingStepToRecorder() {
}
}
+void GCTracer::ReportYoungCycleToRecorder() {
+ const std::shared_ptr<metrics::Recorder>& recorder =
+ heap_->isolate()->metrics_recorder();
+ DCHECK_NOT_NULL(recorder);
+ if (!recorder->HasEmbedderRecorder()) return;
+ v8::metrics::GarbageCollectionYoungCycle event;
+ // Total:
+ const double total_wall_clock_duration_in_us =
+ (current_.scopes[Scope::SCAVENGER] +
+ current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL]) *
+ base::Time::kMicrosecondsPerMillisecond;
+ event.total_wall_clock_duration_in_us =
+ static_cast<int64_t>(total_wall_clock_duration_in_us);
+ // MainThread:
+ const double main_thread_wall_clock_duration_in_us =
+ current_.scopes[Scope::SCAVENGER] *
+ base::Time::kMicrosecondsPerMillisecond;
+ event.main_thread_wall_clock_duration_in_us =
+ static_cast<int64_t>(main_thread_wall_clock_duration_in_us);
+ // Collection Rate:
+ event.collection_rate_in_percent =
+ static_cast<double>(current_.survived_young_object_size) /
+ current_.young_object_size;
+ // Efficiency:
+ auto freed_bytes =
+ current_.young_object_size - current_.survived_young_object_size;
+ event.efficiency_in_bytes_per_us =
+ freed_bytes / total_wall_clock_duration_in_us;
+ event.main_thread_efficiency_in_bytes_per_us =
+ freed_bytes / main_thread_wall_clock_duration_in_us;
+
+ recorder->AddMainThreadEvent(event, GetContextId(heap_->isolate()));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 6daeadc94b..2c9b7b01ec 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -75,8 +75,8 @@ class V8_EXPORT_PRIVATE GCTracer {
steps = 0;
}
- double duration;
- double longest_step;
+ double duration; // in ms
+ double longest_step; // in ms
int steps;
};
@@ -183,10 +183,11 @@ class V8_EXPORT_PRIVATE GCTracer {
// Bytes marked incrementally for INCREMENTAL_MARK_COMPACTOR
size_t incremental_marking_bytes;
- // Duration of incremental marking steps for INCREMENTAL_MARK_COMPACTOR.
+ // Duration (in ms) of incremental marking steps for
+ // INCREMENTAL_MARK_COMPACTOR.
double incremental_marking_duration;
- // Amounts of time spent in different scopes during GC.
+ // Amounts of time (in ms) spent in different scopes during GC.
double scopes[Scope::NUMBER_OF_SCOPES];
// Holds details for incremental marking scopes.
@@ -421,6 +422,7 @@ class V8_EXPORT_PRIVATE GCTracer {
void ReportFullCycleToRecorder();
void ReportIncrementalMarkingStepToRecorder();
+ void ReportYoungCycleToRecorder();
// Pointer to the heap that owns this tracer.
Heap* heap_;
@@ -436,8 +438,8 @@ class V8_EXPORT_PRIVATE GCTracer {
// the last mark compact GC.
size_t incremental_marking_bytes_;
- // Duration of incremental marking steps since the end of the last mark-
- // compact event.
+ // Duration (in ms) of incremental marking steps since the end of the last
+ // mark-compact event.
double incremental_marking_duration_;
double incremental_marking_start_time_;
@@ -460,7 +462,7 @@ class V8_EXPORT_PRIVATE GCTracer {
size_t old_generation_allocation_counter_bytes_;
size_t embedder_allocation_counter_bytes_;
- // Accumulated duration and allocated bytes since the last GC.
+ // Accumulated duration (in ms) and allocated bytes since the last GC.
double allocation_duration_since_gc_;
size_t new_space_allocation_in_bytes_since_gc_;
size_t old_generation_allocation_in_bytes_since_gc_;
diff --git a/deps/v8/src/heap/heap-controller.cc b/deps/v8/src/heap/heap-controller.cc
index 0854ceeb91..7651cebb24 100644
--- a/deps/v8/src/heap/heap-controller.cc
+++ b/deps/v8/src/heap/heap-controller.cc
@@ -145,10 +145,6 @@ size_t MemoryController<Trait>::CalculateAllocationLimit(
factor = 1.0 + FLAG_heap_growing_percent / 100.0;
}
- if (FLAG_heap_growing_percent > 0) {
- factor = 1.0 + FLAG_heap_growing_percent / 100.0;
- }
-
CHECK_LT(1.0, factor);
CHECK_LT(0, current_size);
const uint64_t limit =
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index edefe8e55d..68abf816b0 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -30,6 +30,7 @@
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/read-only-spaces.h"
+#include "src/heap/safepoint.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/third-party/heap-api.h"
#include "src/objects/allocation-site-inl.h"
@@ -206,7 +207,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
- alignment == AllocationAlignment::kWordAligned);
+ alignment == AllocationAlignment::kTaggedAligned);
DCHECK_EQ(gc_state(), NOT_IN_GC);
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
@@ -320,7 +321,7 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
DCHECK_EQ(gc_state(), NOT_IN_GC);
Heap* heap = isolate()->heap();
if (allocation == AllocationType::kYoung &&
- alignment == AllocationAlignment::kWordAligned &&
+ alignment == AllocationAlignment::kTaggedAligned &&
size <= MaxRegularHeapObjectSize(allocation) &&
V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
FLAG_gc_interval == -1)) {
@@ -791,13 +792,15 @@ AlwaysAllocateScopeForTesting::AlwaysAllocateScopeForTesting(Heap* heap)
CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
: heap_(heap) {
+ DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+ heap_->safepoint()->AssertActive();
if (heap_->write_protect_code_memory()) {
heap_->increment_code_space_memory_modification_scope_depth();
heap_->code_space()->SetCodeModificationPermissions();
LargePage* page = heap_->code_lo_space()->first_page();
while (page != nullptr) {
DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
+ DCHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetCodeModificationPermissions();
page = page->next_page();
}
@@ -811,7 +814,7 @@ CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
LargePage* page = heap_->code_lo_space()->first_page();
while (page != nullptr) {
DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
+ DCHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetDefaultCodePermissions();
page = page->next_page();
}
@@ -821,21 +824,17 @@ CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
CodePageCollectionMemoryModificationScope::
CodePageCollectionMemoryModificationScope(Heap* heap)
: heap_(heap) {
- if (heap_->write_protect_code_memory() &&
- !heap_->code_space_memory_modification_scope_depth()) {
- heap_->EnableUnprotectedMemoryChunksRegistry();
+ if (heap_->write_protect_code_memory()) {
heap_->IncrementCodePageCollectionMemoryModificationScopeDepth();
}
}
CodePageCollectionMemoryModificationScope::
~CodePageCollectionMemoryModificationScope() {
- if (heap_->write_protect_code_memory() &&
- !heap_->code_space_memory_modification_scope_depth()) {
+ if (heap_->write_protect_code_memory()) {
heap_->DecrementCodePageCollectionMemoryModificationScopeDepth();
if (heap_->code_page_collection_memory_modification_scope_depth() == 0) {
heap_->ProtectUnprotectedMemoryChunks();
- heap_->DisableUnprotectedMemoryChunksRegistry();
}
}
}
@@ -866,6 +865,16 @@ CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
}
}
+IgnoreLocalGCRequests::IgnoreLocalGCRequests(Heap* heap) : heap_(heap) {
+ DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+ heap_->ignore_local_gc_requests_depth_++;
+}
+
+IgnoreLocalGCRequests::~IgnoreLocalGCRequests() {
+ DCHECK_GT(heap_->ignore_local_gc_requests_depth_, 0);
+ heap_->ignore_local_gc_requests_depth_--;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap-layout-tracer.cc b/deps/v8/src/heap/heap-layout-tracer.cc
new file mode 100644
index 0000000000..53ac5726a7
--- /dev/null
+++ b/deps/v8/src/heap/heap-layout-tracer.cc
@@ -0,0 +1,73 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/heap-layout-tracer.h"
+
+#include <iostream>
+
+#include "src/heap/new-spaces.h"
+#include "src/heap/paged-spaces.h"
+#include "src/heap/read-only-spaces.h"
+#include "src/heap/spaces-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+void HeapLayoutTracer::GCProloguePrintHeapLayout(v8::Isolate* isolate,
+ v8::GCType gc_type,
+ v8::GCCallbackFlags flags,
+ void* data) {
+ Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
+ PrintF("Before GC:%d,", heap->gc_count());
+ PrintF("collector_name:%s\n", Heap::CollectorName(gc_type));
+ PrintHeapLayout(std::cout, heap);
+}
+
+// static
+void HeapLayoutTracer::GCEpiloguePrintHeapLayout(v8::Isolate* isolate,
+ v8::GCType gc_type,
+ v8::GCCallbackFlags flags,
+ void* data) {
+ Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
+ PrintF("After GC:%d,", heap->gc_count());
+ PrintF("collector_name:%s\n", Heap::CollectorName(gc_type));
+ PrintHeapLayout(std::cout, heap);
+}
+
+// static
+void HeapLayoutTracer::PrintBasicMemoryChunk(std::ostream& os,
+ BasicMemoryChunk* chunk,
+ const char* owner_name) {
+ os << "{owner:" << owner_name << ","
+ << "address:" << chunk << ","
+ << "size:" << chunk->size() << ","
+ << "allocated_bytes:" << chunk->allocated_bytes() << ","
+ << "wasted_memory:" << chunk->wasted_memory() << "}" << std::endl;
+}
+
+// static
+void HeapLayoutTracer::PrintHeapLayout(std::ostream& os, Heap* heap) {
+ for (PageIterator it = heap->new_space()->to_space().begin();
+ it != heap->new_space()->to_space().end(); ++it) {
+ PrintBasicMemoryChunk(os, *it, "to_space");
+ }
+
+ for (PageIterator it = heap->new_space()->from_space().begin();
+ it != heap->new_space()->from_space().end(); ++it) {
+ PrintBasicMemoryChunk(os, *it, "from_space");
+ }
+
+ OldGenerationMemoryChunkIterator it(heap);
+ MemoryChunk* chunk;
+ while ((chunk = it.next()) != nullptr) {
+ PrintBasicMemoryChunk(os, chunk, chunk->owner()->name());
+ }
+
+ for (ReadOnlyPage* page : heap->read_only_space()->pages()) {
+ PrintBasicMemoryChunk(os, page, "ro_space");
+ }
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/heap-layout-tracer.h b/deps/v8/src/heap/heap-layout-tracer.h
new file mode 100644
index 0000000000..c7d677807e
--- /dev/null
+++ b/deps/v8/src/heap/heap-layout-tracer.h
@@ -0,0 +1,33 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_LAYOUT_TRACER_H_
+#define V8_HEAP_HEAP_LAYOUT_TRACER_H_
+
+#include "include/v8-callbacks.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class BasicMemoryChunk;
+
+class HeapLayoutTracer : AllStatic {
+ public:
+ static void GCProloguePrintHeapLayout(v8::Isolate* isolate,
+ v8::GCType gc_type,
+ v8::GCCallbackFlags flags, void* data);
+ static void GCEpiloguePrintHeapLayout(v8::Isolate* isolate,
+ v8::GCType gc_type,
+ v8::GCCallbackFlags flags, void* data);
+
+ private:
+ static void PrintBasicMemoryChunk(std::ostream& os, BasicMemoryChunk* chunk,
+ const char* owner_name);
+ static void PrintHeapLayout(std::ostream& os, Heap* heap);
+};
+} // namespace internal
+} // namespace v8
+#endif // V8_HEAP_HEAP_LAYOUT_TRACER_H_
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index d3815617ae..a1b03256af 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -207,7 +207,7 @@ inline bool IsReadOnlyHeapObject(HeapObject object) {
return chunk->InReadOnlySpace();
}
-inline bool IsCodeObject(HeapObject object) {
+inline bool IsCodeSpaceObject(HeapObject object) {
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
return chunk->InCodeSpace();
@@ -275,6 +275,14 @@ void WriteBarrier::MarkingFromGlobalHandle(Object value) {
MarkingSlowFromGlobalHandle(*heap, heap_value);
}
+// static
+void WriteBarrier::MarkingFromInternalFields(JSObject host) {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return;
+ auto heap = GetHeapIfMarking(host);
+ if (!heap) return;
+ MarkingSlowFromInternalFields(*heap, host);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap-write-barrier.cc b/deps/v8/src/heap/heap-write-barrier.cc
index e401df6f09..dce052f00e 100644
--- a/deps/v8/src/heap/heap-write-barrier.cc
+++ b/deps/v8/src/heap/heap-write-barrier.cc
@@ -4,9 +4,11 @@
#include "src/heap/heap-write-barrier.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/marking-barrier.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/js-objects.h"
#include "src/objects/maybe-object.h"
#include "src/objects/slots-inl.h"
@@ -45,6 +47,17 @@ void WriteBarrier::MarkingSlowFromGlobalHandle(Heap* heap, HeapObject value) {
heap->marking_barrier()->WriteWithoutHost(value);
}
+// static
+void WriteBarrier::MarkingSlowFromInternalFields(Heap* heap, JSObject host) {
+ // We are not checking the mark bits of host here as (a) there's no
+ // synchronization with the marker and (b) we are writing into a live object
+ // (independent of the mark bits).
+ if (!heap->local_embedder_heap_tracer()->InUse()) return;
+ LocalEmbedderHeapTracer::ProcessingScope scope(
+ heap->local_embedder_heap_tracer());
+ scope.TracePossibleWrapper(host);
+}
+
void WriteBarrier::MarkingSlow(Heap* heap, Code host, RelocInfo* reloc_info,
HeapObject value) {
MarkingBarrier* marking_barrier = current_marking_barrier
diff --git a/deps/v8/src/heap/heap-write-barrier.h b/deps/v8/src/heap/heap-write-barrier.h
index e214dba680..b221fae2ed 100644
--- a/deps/v8/src/heap/heap-write-barrier.h
+++ b/deps/v8/src/heap/heap-write-barrier.h
@@ -8,6 +8,7 @@
#include "include/v8-internal.h"
#include "src/base/optional.h"
#include "src/common/globals.h"
+#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
@@ -57,6 +58,7 @@ class V8_EXPORT_PRIVATE WriteBarrier {
static int MarkingFromCode(Address raw_host, Address raw_slot);
// Invoked from global handles where no host object is available.
static inline void MarkingFromGlobalHandle(Object value);
+ static inline void MarkingFromInternalFields(JSObject host);
static void SetForThread(MarkingBarrier*);
static void ClearForThread(MarkingBarrier*);
@@ -74,6 +76,7 @@ class V8_EXPORT_PRIVATE WriteBarrier {
static void MarkingSlow(Heap* heap, DescriptorArray,
int number_of_own_descriptors);
static void MarkingSlowFromGlobalHandle(Heap* heap, HeapObject value);
+ static void MarkingSlowFromInternalFields(Heap* heap, JSObject host);
};
} // namespace internal
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index c3e549c29a..5f80f2fd4f 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -11,6 +11,7 @@
#include <unordered_map>
#include <unordered_set>
+#include "include/v8-locker.h"
#include "src/api/api-inl.h"
#include "src/base/bits.h"
#include "src/base/flags.h"
@@ -26,6 +27,7 @@
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/embedder-state.h"
#include "src/execution/isolate-utils-inl.h"
#include "src/execution/microtask-queue.h"
#include "src/execution/runtime-profiler.h"
@@ -49,6 +51,7 @@
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
+#include "src/heap/heap-layout-tracer.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/incremental-marking.h"
@@ -66,6 +69,7 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/paged-spaces-inl.h"
+#include "src/heap/parked-scope.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/safepoint.h"
@@ -193,9 +197,6 @@ bool Heap::GCCallbackTuple::operator==(
return other.callback == callback && other.data == data;
}
-Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
- const Heap::GCCallbackTuple& other) V8_NOEXCEPT = default;
-
class ScavengeTaskObserver : public AllocationObserver {
public:
ScavengeTaskObserver(Heap* heap, intptr_t step_size)
@@ -496,6 +497,11 @@ void Heap::SetGCState(HeapState state) {
gc_state_.store(state, std::memory_order_relaxed);
}
+bool Heap::IsGCWithoutStack() const {
+ return local_embedder_heap_tracer()->embedder_stack_state() ==
+ cppgc::EmbedderStackState::kNoHeapPointers;
+}
+
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
PrintIsolate(isolate_,
@@ -932,11 +938,6 @@ void Heap::GarbageCollectionPrologue() {
} else {
maximum_size_scavenges_ = 0;
}
- if (FLAG_track_retaining_path) {
- retainer_.clear();
- ephemeron_retainer_.clear();
- retaining_root_.clear();
- }
memory_allocator()->unmapper()->PrepareForGC();
}
@@ -1331,12 +1332,19 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
collection_barrier_->ResumeThreadsAwaitingCollection();
}
-void Heap::GarbageCollectionEpilogue() {
+void Heap::GarbageCollectionEpilogue(GarbageCollector collector) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
AllowGarbageCollection for_the_rest_of_the_epilogue;
UpdateMaximumCommitted();
+ if (FLAG_track_retaining_path &&
+ collector == GarbageCollector::MARK_COMPACTOR) {
+ retainer_.clear();
+ ephemeron_retainer_.clear();
+ retaining_root_.clear();
+ }
+
isolate_->counters()->alive_after_last_gc()->Set(
static_cast<int>(SizeOfObjects()));
@@ -1389,13 +1397,13 @@ void Heap::HandleGCRequest() {
} else if (CollectionRequested()) {
CheckCollectionRequested();
} else if (incremental_marking()->request_type() ==
- IncrementalMarking::COMPLETE_MARKING) {
+ IncrementalMarking::GCRequestType::COMPLETE_MARKING) {
incremental_marking()->reset_request_type();
CollectAllGarbage(current_gc_flags_,
GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
current_gc_callback_flags_);
} else if (incremental_marking()->request_type() ==
- IncrementalMarking::FINALIZATION &&
+ IncrementalMarking::GCRequestType::FINALIZATION &&
incremental_marking()->IsMarking() &&
!incremental_marking()->finalize_marking_completed()) {
incremental_marking()->reset_request_type();
@@ -1641,20 +1649,6 @@ void Heap::ReportExternalMemoryPressure() {
int64_t Heap::external_memory_limit() { return external_memory_.limit(); }
-void Heap::EnsureFillerObjectAtTop() {
- // There may be an allocation memento behind objects in new space. Upon
- // evacuation of a non-full new space (or if we are on the last page) there
- // may be uninitialized memory behind top. We fill the remainder of the page
- // with a filler.
- if (!new_space_) return;
- Address to_top = new_space_->top();
- Page* page = Page::FromAddress(to_top - kTaggedSize);
- if (page->Contains(to_top)) {
- int remaining_in_page = static_cast<int>(page->area_end() - to_top);
- CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
- }
-}
-
Heap::DevToolsTraceEventScope::DevToolsTraceEventScope(Heap* heap,
const char* event_name,
const char* event_type)
@@ -1714,7 +1708,11 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
#endif
- EnsureFillerObjectAtTop();
+ // There may be an allocation memento behind objects in new space. Upon
+ // evacuation of a non-full new space (or if we are on the last page) there
+ // may be uninitialized memory behind top. We fill the remainder of the page
+ // with a filler.
+ if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
if (IsYoungGenerationCollector(collector) &&
!incremental_marking()->IsStopped()) {
@@ -1765,9 +1763,22 @@ bool Heap::CollectGarbage(AllocationSpace space,
PROFILE(isolate_, CodeMovingGCEvent());
}
- GCType gc_type = collector == GarbageCollector::MARK_COMPACTOR
- ? kGCTypeMarkSweepCompact
- : kGCTypeScavenge;
+ GCType gc_type;
+
+ switch (collector) {
+ case GarbageCollector::MARK_COMPACTOR:
+ gc_type = kGCTypeMarkSweepCompact;
+ break;
+ case GarbageCollector::SCAVENGER:
+ gc_type = kGCTypeScavenge;
+ break;
+ case GarbageCollector::MINOR_MARK_COMPACTOR:
+ gc_type = kGCTypeMinorMarkCompact;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
{
GCCallbacksScope scope(this);
// Temporary override any embedder stack state as callbacks may create
@@ -1827,7 +1838,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
}
- GarbageCollectionEpilogue();
+ GarbageCollectionEpilogue(collector);
if (collector == GarbageCollector::MARK_COMPACTOR &&
FLAG_track_detached_contexts) {
isolate()->CheckDetachedContextsAfterGC();
@@ -1917,7 +1928,13 @@ void Heap::StartIncrementalMarking(int gc_flags,
CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
}
- SafepointScope safepoint(this);
+ base::Optional<SafepointScope> safepoint_scope;
+
+ {
+ AllowGarbageCollection allow_shared_gc;
+ IgnoreLocalGCRequests ignore_gc_requests(this);
+ safepoint_scope.emplace(this);
+ }
#ifdef DEBUG
VerifyCountersAfterSweeping();
@@ -2150,12 +2167,21 @@ size_t Heap::PerformGarbageCollection(
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
- SafepointScope safepoint_scope(this);
+ base::Optional<SafepointScope> safepoint_scope;
+
+ {
+ AllowGarbageCollection allow_shared_gc;
+ IgnoreLocalGCRequests ignore_gc_requests(this);
+ safepoint_scope.emplace(this);
+ }
collection_barrier_->StopTimeToCollectionTimer();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
+ // We don't really perform a GC here but need this scope for the nested
+ // SafepointScope inside Verify().
+ AllowGarbageCollection allow_gc;
Verify();
}
#endif
@@ -2226,6 +2252,9 @@ size_t Heap::PerformGarbageCollection(
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
+ // We don't really perform a GC here but need this scope for the nested
+ // SafepointScope inside Verify().
+ AllowGarbageCollection allow_gc;
Verify();
}
#endif
@@ -2251,37 +2280,27 @@ void Heap::CollectSharedGarbage(GarbageCollectionReason gc_reason) {
void Heap::PerformSharedGarbageCollection(Isolate* initiator,
GarbageCollectionReason gc_reason) {
DCHECK(IsShared());
- base::MutexGuard guard(isolate()->client_isolate_mutex());
+
+ // Stop all client isolates attached to this isolate
+ GlobalSafepointScope global_safepoint(initiator);
+
+ // Migrate shared isolate to the main thread of the initiator isolate.
+ v8::Locker locker(reinterpret_cast<v8::Isolate*>(isolate()));
+ v8::Isolate::Scope isolate_scope(reinterpret_cast<v8::Isolate*>(isolate()));
const char* collector_reason = nullptr;
GarbageCollector collector = GarbageCollector::MARK_COMPACTOR;
tracer()->Start(collector, gc_reason, collector_reason);
- isolate()->IterateClientIsolates([initiator](Isolate* client) {
- DCHECK_NOT_NULL(client->shared_isolate());
- Heap* client_heap = client->heap();
+ DCHECK_NOT_NULL(isolate()->global_safepoint());
- IsolateSafepoint::StopMainThread stop_main_thread =
- initiator == client ? IsolateSafepoint::StopMainThread::kNo
- : IsolateSafepoint::StopMainThread::kYes;
-
- client_heap->safepoint()->EnterSafepointScope(stop_main_thread);
- DCHECK(client_heap->deserialization_complete());
-
- client_heap->shared_old_allocator_->FreeLinearAllocationArea();
- client_heap->shared_map_allocator_->FreeLinearAllocationArea();
+ isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
+ client->heap()->FreeSharedLinearAllocationAreas();
});
PerformGarbageCollection(GarbageCollector::MARK_COMPACTOR);
- isolate()->IterateClientIsolates([initiator](Isolate* client) {
- IsolateSafepoint::StopMainThread stop_main_thread =
- initiator == client ? IsolateSafepoint::StopMainThread::kNo
- : IsolateSafepoint::StopMainThread::kYes;
- client->heap()->safepoint()->LeaveSafepointScope(stop_main_thread);
- });
-
tracer()->Stop(collector);
}
@@ -2657,14 +2676,14 @@ void Heap::ComputeFastPromotionMode() {
void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk,
UnprotectMemoryOrigin origin) {
- if (unprotected_memory_chunks_registry_enabled_) {
- base::Optional<base::MutexGuard> guard;
- if (origin != UnprotectMemoryOrigin::kMainThread) {
- guard.emplace(&unprotected_memory_chunks_mutex_);
- }
+ if (!write_protect_code_memory()) return;
+ if (code_page_collection_memory_modification_scope_depth_ > 0) {
+ base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
if (unprotected_memory_chunks_.insert(chunk).second) {
chunk->SetCodeModificationPermissions();
}
+ } else {
+ DCHECK_GT(code_space_memory_modification_scope_depth_, 0);
}
}
@@ -2678,10 +2697,10 @@ void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
}
void Heap::ProtectUnprotectedMemoryChunks() {
- DCHECK(unprotected_memory_chunks_registry_enabled_);
+ base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
for (auto chunk = unprotected_memory_chunks_.begin();
chunk != unprotected_memory_chunks_.end(); chunk++) {
- CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
+ DCHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
(*chunk)->SetDefaultCodePermissions();
}
unprotected_memory_chunks_.clear();
@@ -3019,13 +3038,12 @@ STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kTaggedSize));
STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment));
#endif
-#ifdef V8_HOST_ARCH_32_BIT
-STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
-#endif
+STATIC_ASSERT(!USE_ALLOCATION_ALIGNMENT_BOOL ||
+ (HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
switch (alignment) {
- case kWordAligned:
+ case kTaggedAligned:
return 0;
case kDoubleAligned:
case kDoubleUnaligned:
@@ -3320,6 +3338,10 @@ void Heap::OnMoveEvent(HeapObject target, HeapObject source,
LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(),
target.address()));
} else if (target.IsNativeContext()) {
+ if (isolate_->current_embedder_state() != nullptr) {
+ isolate_->current_embedder_state()->OnMoveEvent(source.address(),
+ target.address());
+ }
PROFILE(isolate_,
NativeContextMoveEvent(source.address(), target.address()));
}
@@ -3412,7 +3434,14 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
if (FLAG_enable_slow_asserts) {
// Make sure the stack or other roots (e.g., Handles) don't contain pointers
// to the original FixedArray (which is now the filler object).
- SafepointScope scope(this);
+ base::Optional<SafepointScope> safepoint_scope;
+
+ {
+ AllowGarbageCollection allow_gc;
+ IgnoreLocalGCRequests ignore_gc_requests(this);
+ safepoint_scope.emplace(this);
+ }
+
LeftTrimmerVerifierRootVisitor root_visitor(object);
ReadOnlyRoots(this).Iterate(&root_visitor);
IterateRoots(&root_visitor, {});
@@ -3524,13 +3553,45 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
void Heap::MakeHeapIterable() {
mark_compact_collector()->EnsureSweepingCompleted();
- MakeLocalHeapLabsIterable();
+ safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->MakeLinearAllocationAreaIterable();
+ });
+
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
+ space->MakeLinearAllocationAreaIterable();
+ }
+
+ if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
+}
+
+void Heap::FreeLinearAllocationAreas() {
+ safepoint()->IterateLocalHeaps(
+ [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
+
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
+ space->FreeLinearAllocationArea();
+ }
+
+ if (new_space()) new_space()->FreeLinearAllocationArea();
}
-void Heap::MakeLocalHeapLabsIterable() {
+void Heap::FreeSharedLinearAllocationAreas() {
+ if (!isolate()->shared_isolate()) return;
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
- local_heap->MakeLinearAllocationAreaIterable();
+ local_heap->FreeSharedLinearAllocationArea();
});
+ FreeMainThreadSharedLinearAllocationAreas();
+}
+
+void Heap::FreeMainThreadSharedLinearAllocationAreas() {
+ if (!isolate()->shared_isolate()) return;
+ shared_old_allocator_->FreeLinearAllocationArea();
+ shared_map_allocator_->FreeLinearAllocationArea();
+ main_thread_local_heap()->FreeSharedLinearAllocationArea();
}
namespace {
@@ -3748,6 +3809,7 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE,
ThreadKind::kMain);
+ IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope safepoint(this);
InvokeIncrementalMarkingPrologueCallbacks();
incremental_marking()->FinalizeIncrementally();
@@ -3796,7 +3858,7 @@ class SlotCollectingVisitor final : public ObjectVisitor {
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
code_slots_.push_back(slot);
#endif
}
@@ -3812,14 +3874,14 @@ class SlotCollectingVisitor final : public ObjectVisitor {
int number_of_slots() { return static_cast<int>(slots_.size()); }
MaybeObjectSlot slot(int i) { return slots_[i]; }
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
CodeObjectSlot code_slot(int i) { return code_slots_[i]; }
int number_of_code_slots() { return static_cast<int>(code_slots_.size()); }
#endif
private:
std::vector<MaybeObjectSlot> slots_;
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
std::vector<CodeObjectSlot> code_slots_;
#endif
};
@@ -3827,16 +3889,18 @@ class SlotCollectingVisitor final : public ObjectVisitor {
void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
if (!FLAG_verify_heap) return;
+ PtrComprCageBase cage_base(isolate());
+
// Check that Heap::NotifyObjectLayoutChange was called for object transitions
// that are not safe for concurrent marking.
// If you see this check triggering for a freshly allocated object,
// use object->set_map_after_allocation() to initialize its map.
if (pending_layout_change_object_.is_null()) {
- if (object.IsJSObject()) {
+ if (object.IsJSObject(cage_base)) {
// Without double unboxing all in-object fields of a JSObject are tagged.
return;
}
- if (object.IsString() &&
+ if (object.IsString(cage_base) &&
(new_map == ReadOnlyRoots(this).thin_string_map() ||
new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
// When transitioning a string to ThinString,
@@ -3844,7 +3908,7 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
// tagged fields are introduced.
return;
}
- if (FLAG_shared_string_table && object.IsString() &&
+ if (FLAG_shared_string_table && object.IsString(cage_base) &&
InstanceTypeChecker::IsInternalizedString(new_map.instance_type())) {
// In-place internalization does not change a string's fields.
//
@@ -3855,19 +3919,19 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
}
// Check that the set of slots before and after the transition match.
SlotCollectingVisitor old_visitor;
- object.IterateFast(&old_visitor);
- MapWord old_map_word = object.map_word(kRelaxedLoad);
+ object.IterateFast(cage_base, &old_visitor);
+ MapWord old_map_word = object.map_word(cage_base, kRelaxedLoad);
// Temporarily set the new map to iterate new slots.
object.set_map_word(MapWord::FromMap(new_map), kRelaxedStore);
SlotCollectingVisitor new_visitor;
- object.IterateFast(&new_visitor);
+ object.IterateFast(cage_base, &new_visitor);
// Restore the old map.
object.set_map_word(old_map_word, kRelaxedStore);
DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
for (int i = 0; i < new_visitor.number_of_slots(); i++) {
DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
}
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
DCHECK_EQ(new_visitor.number_of_code_slots(),
old_visitor.number_of_code_slots());
for (int i = 0; i < new_visitor.number_of_code_slots(); i++) {
@@ -4172,6 +4236,7 @@ std::unique_ptr<v8::MeasureMemoryDelegate> Heap::MeasureMemoryDelegate(
void Heap::CollectCodeStatistics() {
TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
+ IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope safepoint_scope(this);
MakeHeapIterable();
CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
@@ -4385,6 +4450,7 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
#ifdef VERIFY_HEAP
void Heap::Verify() {
CHECK(HasBeenSetUp());
+ IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope safepoint_scope(this);
HandleScope scope(isolate());
@@ -4582,8 +4648,9 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
// In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
chunk->mutex());
+ PtrComprCageBase cage_base(isolate());
Address start = object.address();
- Address end = start + object.Size();
+ Address end = start + object.Size(cage_base);
std::set<Address> old_to_new;
std::set<std::pair<SlotType, Address>> typed_old_to_new;
if (!InYoungGeneration(object)) {
@@ -4591,7 +4658,7 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
OldToNewSlotVerifyingVisitor visitor(isolate(), &old_to_new,
&typed_old_to_new,
&this->ephemeron_remembered_set_);
- object.IterateBody(&visitor);
+ object.IterateBody(cage_base, &visitor);
}
// TODO(v8:11797): Add old to old slot set verification once all weak objects
// have their own instance types and slots are recorded for all weak fields.
@@ -4600,8 +4667,6 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
#ifdef DEBUG
void Heap::VerifyCountersAfterSweeping() {
- MakeLocalHeapLabsIterable();
-
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
@@ -4685,7 +4750,13 @@ void Heap::IterateSmiRoots(RootVisitor* v) {
// the sweeper might actually free the underlying page).
class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor {
public:
- explicit ClearStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
+ explicit ClearStaleLeftTrimmedHandlesVisitor(Heap* heap)
+ : heap_(heap)
+#if V8_COMPRESS_POINTERS
+ ,
+ cage_base_(heap->isolate())
+#endif // V8_COMPRESS_POINTERS
+ {
USE(heap_);
}
@@ -4701,20 +4772,32 @@ class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
}
+ // The pointer compression cage base value used for decompression of all
+ // tagged values except references to Code objects.
+ PtrComprCageBase cage_base() const {
+#if V8_COMPRESS_POINTERS
+ return cage_base_;
+#else
+ return PtrComprCageBase{};
+#endif // V8_COMPRESS_POINTERS
+ }
+
private:
inline void FixHandle(FullObjectSlot p) {
if (!(*p).IsHeapObject()) return;
HeapObject current = HeapObject::cast(*p);
- if (!current.map_word(kRelaxedLoad).IsForwardingAddress() &&
- current.IsFreeSpaceOrFiller()) {
+ if (!current.map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() &&
+ current.IsFreeSpaceOrFiller(cage_base())) {
#ifdef DEBUG
// We need to find a FixedArrayBase map after walking the fillers.
- while (!current.map_word(kRelaxedLoad).IsForwardingAddress() &&
- current.IsFreeSpaceOrFiller()) {
+ while (
+ !current.map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() &&
+ current.IsFreeSpaceOrFiller(cage_base())) {
Address next = current.ptr();
- if (current.map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
+ if (current.map(cage_base()) ==
+ ReadOnlyRoots(heap_).one_pointer_filler_map()) {
next += kTaggedSize;
- } else if (current.map() ==
+ } else if (current.map(cage_base()) ==
ReadOnlyRoots(heap_).two_pointer_filler_map()) {
next += 2 * kTaggedSize;
} else {
@@ -4722,14 +4805,19 @@ class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
current = HeapObject::cast(Object(next));
}
- DCHECK(current.map_word(kRelaxedLoad).IsForwardingAddress() ||
- current.IsFixedArrayBase());
+ DCHECK(
+ current.map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() ||
+ current.IsFixedArrayBase(cage_base()));
#endif // DEBUG
p.store(Smi::zero());
}
}
Heap* heap_;
+
+#if V8_COMPRESS_POINTERS
+ const PtrComprCageBase cage_base_;
+#endif // V8_COMPRESS_POINTERS
};
void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
@@ -4873,9 +4961,12 @@ void Heap::IterateRootsIncludingClients(RootVisitor* v,
base::EnumSet<SkipRoot> options) {
IterateRoots(v, options);
- isolate()->IterateClientIsolates([v, options](Isolate* client) {
- client->heap()->IterateRoots(v, options);
- });
+ if (isolate()->is_shared()) {
+ isolate()->global_safepoint()->IterateClientIsolates(
+ [v, options](Isolate* client) {
+ client->heap()->IterateRoots(v, options);
+ });
+ }
}
void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
@@ -4886,8 +4977,12 @@ void Heap::IterateBuiltins(RootVisitor* v) {
Builtins* builtins = isolate()->builtins();
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- v->VisitRootPointer(Root::kBuiltins, Builtins::name(builtin),
- builtins->builtin_slot(builtin));
+ const char* name = Builtins::name(builtin);
+ v->VisitRootPointer(Root::kBuiltins, name, builtins->builtin_slot(builtin));
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ v->VisitRootPointer(Root::kBuiltins, name,
+ builtins->builtin_code_data_container_slot(builtin));
+ }
}
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLastTier0;
@@ -5420,7 +5515,7 @@ void Heap::DisableInlineAllocation() {
// Update inline allocation limit for old spaces.
PagedSpaceIterator spaces(this);
- CodeSpaceMemoryModificationScope modification_scope(this);
+ CodePageCollectionMemoryModificationScope modification_scope(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
base::MutexGuard guard(space->mutex());
@@ -5472,14 +5567,16 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
isolate()->counters()->gc_last_resort_from_handles()->Increment();
if (IsSharedAllocationType(allocation)) {
CollectSharedGarbage(GarbageCollectionReason::kLastResort);
+
+ AlwaysAllocateScope scope(isolate()->shared_isolate()->heap());
+ alloc = AllocateRaw(size, allocation, origin, alignment);
} else {
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
- }
- {
AlwaysAllocateScope scope(this);
alloc = AllocateRaw(size, allocation, origin, alignment);
}
+
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
@@ -5570,6 +5667,19 @@ void Heap::SetUp(LocalHeap* main_thread_local_heap) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
space_[i] = nullptr;
}
+
+ // Set up layout tracing callback.
+ if (V8_UNLIKELY(FLAG_trace_gc_heap_layout)) {
+ v8::GCType gc_type = kGCTypeMarkSweepCompact;
+ if (V8_UNLIKELY(!FLAG_trace_gc_heap_layout_ignore_minor_gc)) {
+ gc_type = static_cast<v8::GCType>(gc_type | kGCTypeScavenge |
+ kGCTypeMinorMarkCompact);
+ }
+ AddGCPrologueCallback(HeapLayoutTracer::GCProloguePrintHeapLayout, gc_type,
+ nullptr);
+ AddGCEpilogueCallback(HeapLayoutTracer::GCEpiloguePrintHeapLayout, gc_type,
+ nullptr);
+ }
}
void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
@@ -5610,16 +5720,17 @@ class StressConcurrentAllocationObserver : public AllocationObserver {
Heap* heap_;
};
-void Heap::SetUpSpaces() {
+void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
+ LinearAllocationArea* old_allocation_info) {
// Ensure SetUpFromReadOnlySpace has been ran.
DCHECK_NOT_NULL(read_only_space_);
const bool has_young_gen = !FLAG_single_generation && !IsShared();
if (has_young_gen) {
- space_[NEW_SPACE] = new_space_ =
- new NewSpace(this, memory_allocator_->data_page_allocator(),
- initial_semispace_size_, max_semi_space_size_);
+ space_[NEW_SPACE] = new_space_ = new NewSpace(
+ this, memory_allocator_->data_page_allocator(), initial_semispace_size_,
+ max_semi_space_size_, new_allocation_info);
}
- space_[OLD_SPACE] = old_space_ = new OldSpace(this);
+ space_[OLD_SPACE] = old_space_ = new OldSpace(this, old_allocation_info);
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
space_[MAP_SPACE] = map_space_ = new MapSpace(this);
space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
@@ -5818,11 +5929,13 @@ EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
void Heap::AttachCppHeap(v8::CppHeap* cpp_heap) {
CppHeap::From(cpp_heap)->AttachIsolate(isolate());
cpp_heap_ = cpp_heap;
+ local_embedder_heap_tracer()->SetCppHeap(CppHeap::From(cpp_heap));
}
void Heap::DetachCppHeap() {
CppHeap::From(cpp_heap_)->DetachIsolate();
cpp_heap_ = nullptr;
+ local_embedder_heap_tracer()->SetCppHeap(nullptr);
}
EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const {
@@ -5873,11 +5986,18 @@ void Heap::StartTearDown() {
// threads finish.
collection_barrier_->NotifyShutdownRequested();
+ // Main thread isn't going to allocate anymore.
+ main_thread_local_heap()->FreeLinearAllocationArea();
+
+ FreeMainThreadSharedLinearAllocationAreas();
+
#ifdef VERIFY_HEAP
// {StartTearDown} is called fairly early during Isolate teardown, so it's
// a good time to run heap verification (if requested), before starting to
// tear down parts of the Isolate.
if (FLAG_verify_heap) {
+ AllowGarbageCollection allow_gc;
+ IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope scope(this);
Verify();
}
@@ -6118,7 +6238,9 @@ void Heap::AddRetainedMap(Handle<NativeContext> context, Handle<Map> map) {
if (map->is_in_retained_map_list()) {
return;
}
- Handle<WeakArrayList> array(context->retained_maps(), isolate());
+
+ Handle<WeakArrayList> array(WeakArrayList::cast(context->retained_maps()),
+ isolate());
if (array->IsFull()) {
CompactRetainedMaps(*array);
}
@@ -6352,7 +6474,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
: ObjectVisitorWithCageBases(filter->heap_), filter_(filter) {}
void VisitMapPointer(HeapObject object) override {
- MarkHeapObject(Map::unchecked_cast(object.map()));
+ MarkHeapObject(Map::unchecked_cast(object.map(cage_base())));
}
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
@@ -6392,7 +6514,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
while (!marking_stack_.empty()) {
HeapObject obj = marking_stack_.back();
marking_stack_.pop_back();
- obj.Iterate(this);
+ obj.Iterate(cage_base(), this);
}
}
@@ -6819,7 +6941,7 @@ std::vector<WeakArrayList> Heap::FindAllRetainedMaps() {
Object context = native_contexts_list();
while (!context.IsUndefined(isolate())) {
NativeContext native_context = NativeContext::cast(context);
- result.push_back(native_context.retained_maps());
+ result.push_back(WeakArrayList::cast(native_context.retained_maps()));
context = native_context.next_context_link();
}
return result;
@@ -6846,6 +6968,7 @@ void VerifyPointersVisitor::VisitCodePointer(HeapObject host,
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
Object maybe_code = slot.load(code_cage_base());
HeapObject code;
+ // The slot might contain smi during CodeDataContainer creation.
if (maybe_code.GetHeapObject(&code)) {
VerifyCodeObjectImpl(code);
} else {
@@ -6900,7 +7023,7 @@ void VerifyPointersVisitor::VerifyPointers(HeapObject host,
// to one of objects in DATA_ONLY_VISITOR_ID_LIST. You can fix
// this by moving that object to POINTER_VISITOR_ID_LIST.
DCHECK_EQ(ObjectFields::kMaybePointers,
- Map::ObjectFieldsFrom(host.map().visitor_id()));
+ Map::ObjectFieldsFrom(host.map(cage_base()).visitor_id()));
VerifyPointersImpl(start, end);
}
@@ -6975,7 +7098,7 @@ Map Heap::GcSafeMapOfCodeSpaceObject(HeapObject object) {
PtrComprCageBase cage_base(isolate());
MapWord map_word = object.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
PtrComprCageBase code_cage_base(isolate()->code_cage_base());
#else
PtrComprCageBase code_cage_base = cage_base;
@@ -6995,7 +7118,8 @@ Code Heap::GcSafeCastToCode(HeapObject object, Address inner_pointer) {
bool Heap::GcSafeCodeContains(Code code, Address addr) {
Map map = GcSafeMapOfCodeSpaceObject(code);
DCHECK(map == ReadOnlyRoots(this).code_map());
- Builtin maybe_builtin = InstructionStream::TryLookupCode(isolate(), addr);
+ Builtin maybe_builtin =
+ OffHeapInstructionStream::TryLookupCode(isolate(), addr);
if (Builtins::IsBuiltinId(maybe_builtin) &&
code.builtin_id() == maybe_builtin) {
return true;
@@ -7007,7 +7131,7 @@ bool Heap::GcSafeCodeContains(Code code, Address addr) {
Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
Builtin maybe_builtin =
- InstructionStream::TryLookupCode(isolate(), inner_pointer);
+ OffHeapInstructionStream::TryLookupCode(isolate(), inner_pointer);
if (Builtins::IsBuiltinId(maybe_builtin)) {
return isolate()->builtins()->code(maybe_builtin);
}
@@ -7053,6 +7177,13 @@ Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
return code;
}
}
+ // TODO(1241665): Remove once the issue is solved.
+ isolate()->PushParamsAndDie(
+ reinterpret_cast<void*>(inner_pointer),
+ const_cast<uint8_t*>(isolate()->embedded_blob_code()),
+ const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobCode()),
+ reinterpret_cast<void*>(Isolate::CurrentEmbeddedBlobCodeSize()));
+
UNREACHABLE();
}
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index dbe03936bf..ef8d912bfb 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -86,6 +86,7 @@ class HeapObjectsFilter;
class HeapStats;
class Isolate;
class JSFinalizationRegistry;
+class LinearAllocationArea;
class LocalEmbedderHeapTracer;
class LocalHeap;
class MarkingBarrier;
@@ -499,6 +500,20 @@ class Heap {
return "Unknown collector";
}
+ static inline const char* CollectorName(v8::GCType gc_type) {
+ switch (gc_type) {
+ case kGCTypeScavenge:
+ return "Scavenger";
+ case kGCTypeMarkSweepCompact:
+ return "Mark-Compact";
+ case kGCTypeMinorMarkCompact:
+ return "Minor Mark-Compact";
+ default:
+ break;
+ }
+ return "Unknown collector";
+ }
+
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
@@ -668,18 +683,6 @@ class Heap {
void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
- void EnableUnprotectedMemoryChunksRegistry() {
- unprotected_memory_chunks_registry_enabled_ = true;
- }
-
- void DisableUnprotectedMemoryChunksRegistry() {
- unprotected_memory_chunks_registry_enabled_ = false;
- }
-
- bool unprotected_memory_chunks_registry_enabled() {
- return unprotected_memory_chunks_registry_enabled_;
- }
-
void IncrementCodePageCollectionMemoryModificationScopeDepth() {
code_page_collection_memory_modification_scope_depth_++;
}
@@ -699,8 +702,14 @@ class Heap {
bool IsTearingDown() const { return gc_state() == TEAR_DOWN; }
bool force_oom() const { return force_oom_; }
+ bool ignore_local_gc_requests() const {
+ return ignore_local_gc_requests_depth_ > 0;
+ }
+
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
+ bool IsGCWithoutStack() const;
+
// If an object has an AllocationMemento trailing it, return it, otherwise
// return a null AllocationMemento.
template <FindMementoMode mode>
@@ -848,7 +857,8 @@ class Heap {
void ReplaceReadOnlySpace(SharedReadOnlySpace* shared_ro_space);
// Sets up the heap memory without creating any objects.
- void SetUpSpaces();
+ void SetUpSpaces(LinearAllocationArea* new_allocation_info,
+ LinearAllocationArea* old_allocation_info);
// Prepares the heap, setting up for deserialization.
void InitializeMainThreadLocalHeap(LocalHeap* main_thread_local_heap);
@@ -880,6 +890,7 @@ class Heap {
NewSpace* new_space() { return new_space_; }
OldSpace* old_space() { return old_space_; }
+ OldSpace* shared_old_space() { return shared_old_space_; }
CodeSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
OldLargeObjectSpace* lo_space() { return lo_space_; }
@@ -902,7 +913,6 @@ class Heap {
}
inline Isolate* isolate();
- inline const Isolate* isolate() const;
MarkCompactCollector* mark_compact_collector() {
return mark_compact_collector_.get();
@@ -1068,7 +1078,7 @@ class Heap {
void IterateStackRoots(RootVisitor* v);
// ===========================================================================
- // Store buffer API. =========================================================
+ // Remembered set API. =======================================================
// ===========================================================================
// Used for query incremental marking status in generated code.
@@ -1078,10 +1088,6 @@ class Heap {
void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }
- V8_EXPORT_PRIVATE Address* store_buffer_top_address();
- static intptr_t store_buffer_mask_constant();
- static Address store_buffer_overflow_function_address();
-
void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
void ClearRecordedSlotRange(Address start, Address end);
static int InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot);
@@ -1666,6 +1672,10 @@ class Heap {
void UpdateEpochFull();
+ // Ensure that we have swept all spaces in such a way that we can iterate
+ // over all objects.
+ void MakeHeapIterable();
+
private:
using ExternalStringTableUpdaterCallback = String (*)(Heap* heap,
FullObjectSlot pointer);
@@ -1735,7 +1745,6 @@ class Heap {
: callback(callback), gc_type(gc_type), data(data) {}
bool operator==(const GCCallbackTuple& other) const;
- GCCallbackTuple& operator=(const GCCallbackTuple& other) V8_NOEXCEPT;
v8::Isolate::GCCallbackWithData callback;
GCType gc_type;
@@ -1790,17 +1799,14 @@ class Heap {
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);
- // Make sure there is a filler value behind the top of the new space
- // so that the GC does not confuse some unintialized/stale memory
- // with the allocation memento of the object at the top
- void EnsureFillerObjectAtTop();
+ // Free all LABs in the heap.
+ void FreeLinearAllocationAreas();
- // Ensure that we have swept all spaces in such a way that we can iterate
- // over all objects. May cause a GC.
- void MakeHeapIterable();
+ // Free all shared LABs.
+ void FreeSharedLinearAllocationAreas();
- // Ensure that LABs of local heaps are iterable.
- void MakeLocalHeapLabsIterable();
+ // Free all shared LABs of main thread.
+ void FreeMainThreadSharedLinearAllocationAreas();
// Performs garbage collection in a safepoint.
// Returns the number of freed global handles.
@@ -1943,7 +1949,7 @@ class Heap {
// reporting/verification activities when compiled with DEBUG set.
void GarbageCollectionPrologue();
void GarbageCollectionPrologueInSafepoint();
- void GarbageCollectionEpilogue();
+ void GarbageCollectionEpilogue(GarbageCollector collector);
void GarbageCollectionEpilogueInSafepoint(GarbageCollector collector);
// Performs a major collection in the whole heap.
@@ -2094,7 +2100,7 @@ class Heap {
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
// This method will try to allocate objects quickly (AllocationType::kYoung)
// otherwise it falls back to a slower path indicated by the mode.
@@ -2103,13 +2109,13 @@ class Heap {
V8_WARN_UNUSED_RESULT inline HeapObject AllocateRawWith(
int size, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
// Call AllocateRawWith with kRetryOrFail. Matches the method in LocalHeap.
V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
int size, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
@@ -2118,7 +2124,7 @@ class Heap {
// returned.
V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
@@ -2128,7 +2134,7 @@ class Heap {
// If the allocation still fails after that a fatal error is thrown.
V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
// Allocates a heap object based on the map.
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Handle<Map> map,
@@ -2260,7 +2266,8 @@ class Heap {
uintptr_t code_space_memory_modification_scope_depth_ = 0;
// Holds the number of open CodePageCollectionMemoryModificationScopes.
- uintptr_t code_page_collection_memory_modification_scope_depth_ = 0;
+ std::atomic<uintptr_t> code_page_collection_memory_modification_scope_depth_{
+ 0};
std::atomic<HeapState> gc_state_{NOT_IN_GC};
@@ -2459,6 +2466,8 @@ class Heap {
std::unique_ptr<CollectionBarrier> collection_barrier_;
+ int ignore_local_gc_requests_depth_ = 0;
+
int gc_callbacks_depth_ = 0;
bool deserialization_complete_ = false;
@@ -2476,7 +2485,6 @@ class Heap {
base::Mutex unprotected_memory_chunks_mutex_;
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
- bool unprotected_memory_chunks_registry_enabled_ = false;
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// If the --gc-interval flag is set to a positive value, this
@@ -2516,6 +2524,7 @@ class Heap {
friend class GCTracer;
friend class HeapObjectIterator;
friend class ScavengeTaskObserver;
+ friend class IgnoreLocalGCRequests;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class LargeObjectSpace;
@@ -2674,6 +2683,15 @@ class V8_NODISCARD CodePageMemoryModificationScope {
DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_)
};
+class V8_NODISCARD IgnoreLocalGCRequests {
+ public:
+ explicit inline IgnoreLocalGCRequests(Heap* heap);
+ inline ~IgnoreLocalGCRequests();
+
+ private:
+ Heap* heap_;
+};
+
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
@@ -2773,8 +2791,6 @@ class V8_EXPORT_PRIVATE HeapObjectIterator {
private:
HeapObject NextObject();
- DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_)
-
Heap* heap_;
std::unique_ptr<SafepointScope> safepoint_scope_;
HeapObjectsFiltering filtering_;
@@ -2783,6 +2799,8 @@ class V8_EXPORT_PRIVATE HeapObjectIterator {
SpaceIterator* space_iterator_;
// Object iterator for the space currently being iterated.
std::unique_ptr<ObjectIterator> object_iterator_;
+
+ DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_)
};
// Abstract base class for checking whether a weak object should be retained.
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index 92489422d4..2dc1555929 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -41,6 +41,14 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
return false;
}
+void IncrementalMarking::MarkRootObject(Root root, HeapObject obj) {
+ if (heap_->incremental_marking()->WhiteToGreyAndPush(obj)) {
+ if (V8_UNLIKELY(FLAG_track_retaining_path)) {
+ heap_->AddRetainingRoot(root, obj);
+ }
+ }
+}
+
void IncrementalMarking::RestartIfNotMarking() {
if (state_ == COMPLETE) {
state_ = MARKING;
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 55f2d6998c..a653877f40 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -47,13 +47,15 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
incremental_marking_->EnsureBlackAllocated(addr, size);
}
-IncrementalMarking::IncrementalMarking(Heap* heap,
- WeakObjects* weak_objects)
+IncrementalMarking::IncrementalMarking(Heap* heap, WeakObjects* weak_objects)
: heap_(heap),
collector_(heap->mark_compact_collector()),
weak_objects_(weak_objects),
new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
- old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
+ old_generation_observer_(this, kOldGenerationAllocatedThreshold),
+ marking_state_(heap->isolate()),
+ atomic_marking_state_(heap->isolate()),
+ non_atomic_marking_state_(heap->isolate()) {
SetState(STOPPED);
}
@@ -109,19 +111,19 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) override {
DCHECK(!MapWord::IsPacked((*p).ptr()));
- MarkObjectByPointer(p);
+ MarkObjectByPointer(root, p);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
for (FullObjectSlot p = start; p < end; ++p) {
DCHECK(!MapWord::IsPacked((*p).ptr()));
- MarkObjectByPointer(p);
+ MarkObjectByPointer(root, p);
}
}
private:
- void MarkObjectByPointer(FullObjectSlot p) {
+ void MarkObjectByPointer(Root root, FullObjectSlot p) {
Object object = *p;
if (!object.IsHeapObject()) return;
DCHECK(!MapWord::IsPacked(object.ptr()));
@@ -129,7 +131,7 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
BasicMemoryChunk* target_page =
BasicMemoryChunk::FromHeapObject(heap_object);
if (target_page->InSharedHeap()) return;
- heap_->incremental_marking()->WhiteToGreyAndPush(heap_object);
+ heap_->incremental_marking()->MarkRootObject(root, heap_object);
}
Heap* heap_;
@@ -231,7 +233,8 @@ void IncrementalMarking::StartMarking() {
heap_->InvokeIncrementalMarkingPrologueCallbacks();
- is_compacting_ = !FLAG_never_compact && collector_->StartCompaction();
+ is_compacting_ = collector_->StartCompaction(
+ MarkCompactCollector::StartCompactionMode::kIncremental);
collector_->StartMarking();
SetState(MARKING);
@@ -435,6 +438,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
collector_->local_marking_worklists()->Publish();
MarkingBarrier::PublishAll(heap());
+ PtrComprCageBase cage_base(heap_->isolate());
collector_->marking_worklists()->Update(
[
#ifdef DEBUG
@@ -444,11 +448,11 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
#ifdef ENABLE_MINOR_MC
minor_marking_state,
#endif
- filler_map](HeapObject obj, HeapObject* out) -> bool {
+ cage_base, filler_map](HeapObject obj, HeapObject* out) -> bool {
DCHECK(obj.IsHeapObject());
// Only pointers to from space have to be updated.
if (Heap::InFromPage(obj)) {
- MapWord map_word = obj.map_word(kRelaxedLoad);
+ MapWord map_word = obj.map_word(cage_base, kRelaxedLoad);
if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist
// anymore, e.g. left trimmed objects or objects from the root set
@@ -489,10 +493,10 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
return true;
}
DCHECK_IMPLIES(marking_state()->IsWhite(obj),
- obj.IsFreeSpaceOrFiller());
+ obj.IsFreeSpaceOrFiller(cage_base));
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
- if (obj.map() != filler_map) {
+ if (obj.map(cage_base) != filler_map) {
*out = obj;
return true;
}
@@ -500,6 +504,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
}
});
+ collector_->local_weak_objects()->Publish();
weak_objects_->UpdateAfterScavenge();
}
@@ -638,7 +643,7 @@ void IncrementalMarking::FinalizeMarking(CompletionAction action) {
"[IncrementalMarking] requesting finalization of incremental "
"marking.\n");
}
- request_type_ = FINALIZATION;
+ request_type_ = GCRequestType::FINALIZATION;
if (action == GC_VIA_STACK_GUARD) {
heap_->isolate()->stack_guard()->RequestGC();
}
@@ -708,7 +713,7 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Complete (normal).\n");
}
- request_type_ = COMPLETE_MARKING;
+ request_type_ = GCRequestType::COMPLETE_MARKING;
if (action == GC_VIA_STACK_GUARD) {
heap_->isolate()->stack_guard()->RequestGC();
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 40fb9b7dac..5ea92e6bad 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -33,7 +33,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
- enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
+ enum class GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
using MarkingState = MarkCompactCollector::MarkingState;
using AtomicMarkingState = MarkCompactCollector::AtomicMarkingState;
@@ -81,11 +81,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
static constexpr size_t kEmbedderActivationThreshold = 0;
#endif
-#ifdef V8_ATOMIC_MARKING_STATE
static const AccessMode kAtomicity = AccessMode::ATOMIC;
-#else
- static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
-#endif
IncrementalMarking(Heap* heap, WeakObjects* weak_objects);
@@ -123,17 +119,18 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
inline bool IsComplete() const { return state() == COMPLETE; }
inline bool IsReadyToOverApproximateWeakClosure() const {
- return request_type_ == FINALIZATION && !finalize_marking_completed_;
+ return request_type_ == GCRequestType::FINALIZATION &&
+ !finalize_marking_completed_;
}
inline bool NeedsFinalization() {
- return IsMarking() &&
- (request_type_ == FINALIZATION || request_type_ == COMPLETE_MARKING);
+ return IsMarking() && (request_type_ == GCRequestType::FINALIZATION ||
+ request_type_ == GCRequestType::COMPLETE_MARKING);
}
GCRequestType request_type() const { return request_type_; }
- void reset_request_type() { request_type_ = NONE; }
+ void reset_request_type() { request_type_ = GCRequestType::NONE; }
bool CanBeActivated();
@@ -181,6 +178,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// from white to grey.
V8_INLINE bool WhiteToGreyAndPush(HeapObject obj);
+ // Marks object referenced from roots.
+ V8_INLINE void MarkRootObject(Root root, HeapObject obj);
+
// This function is used to color the object black before it undergoes an
// unsafe layout change. This is a part of synchronization protocol with
// the concurrent marker.
@@ -310,7 +310,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
bool finalize_marking_completed_ = false;
IncrementalMarkingJob incremental_marking_job_;
- std::atomic<GCRequestType> request_type_{NONE};
+ std::atomic<GCRequestType> request_type_{GCRequestType::NONE};
Observer new_generation_observer_;
Observer old_generation_observer_;
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 546667b2b2..7d28b750e2 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -9,9 +9,7 @@
#include "src/heap/invalidated-slots.h"
#include "src/heap/spaces.h"
-#include "src/objects/objects-body-descriptors-inl.h"
-#include "src/objects/objects-body-descriptors.h"
-#include "src/objects/objects.h"
+#include "src/objects/objects-inl.h"
#include "src/utils/allocation.h"
namespace v8 {
diff --git a/deps/v8/src/heap/large-spaces.cc b/deps/v8/src/heap/large-spaces.cc
index 63dc4b4e12..7d79c5cdd4 100644
--- a/deps/v8/src/heap/large-spaces.cc
+++ b/deps/v8/src/heap/large-spaces.cc
@@ -186,6 +186,9 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
+ if (identity() == CODE_LO_SPACE) {
+ heap()->isolate()->AddCodeMemoryChunk(page);
+ }
return object;
}
@@ -264,7 +267,8 @@ void OldLargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK(page->IsLargePage());
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
- size_t object_size = static_cast<size_t>(page->GetObject().Size());
+ PtrComprCageBase cage_base(heap()->isolate());
+ size_t object_size = static_cast<size_t>(page->GetObject().Size(cage_base));
static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
page->ClearFlag(MemoryChunk::FROM_PAGE);
AddPage(page, object_size);
@@ -297,11 +301,12 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
// Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC.
size_t surviving_object_size = 0;
+ PtrComprCageBase cage_base(heap()->isolate());
while (current) {
LargePage* next_current = current->next_page();
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
- size_t size = static_cast<size_t>(object.Size());
+ size_t size = static_cast<size_t>(object.Size(cage_base));
if (marking_state->IsBlack(object)) {
Address free_start;
surviving_object_size += size;
@@ -313,7 +318,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory(
current, free_start, bytes_to_free,
- current->area_start() + object.Size());
+ current->area_start() + object.Size(cage_base));
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
@@ -403,7 +408,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// Byte arrays and strings don't have interior pointers.
if (object.IsAbstractCode(cage_base)) {
VerifyPointersVisitor code_visitor(heap());
- object.IterateBody(map, object.Size(), &code_visitor);
+ object.IterateBody(map, object.Size(cage_base), &code_visitor);
} else if (object.IsFixedArray(cage_base)) {
FixedArray array = FixedArray::cast(object);
for (int j = 0; j < array.length(); j++) {
@@ -517,11 +522,12 @@ void NewLargeObjectSpace::FreeDeadObjects(
bool is_marking = heap()->incremental_marking()->IsMarking();
size_t surviving_object_size = 0;
bool freed_pages = false;
+ PtrComprCageBase cage_base(heap()->isolate());
for (auto it = begin(); it != end();) {
LargePage* page = *it;
it++;
HeapObject object = page->GetObject();
- size_t size = static_cast<size_t>(object.Size());
+ size_t size = static_cast<size_t>(object.Size(cage_base));
if (is_dead(object)) {
freed_pages = true;
RemovePage(page, size);
diff --git a/deps/v8/src/heap/linear-allocation-area.h b/deps/v8/src/heap/linear-allocation-area.h
index a03285c046..93d0c16619 100644
--- a/deps/v8/src/heap/linear-allocation-area.h
+++ b/deps/v8/src/heap/linear-allocation-area.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_LINEAR_ALLOCATION_AREA_H_
#define V8_HEAP_LINEAR_ALLOCATION_AREA_H_
+// This header file is included outside of src/heap/.
+// Avoid including src/heap/ internals.
#include "include/v8-internal.h"
#include "src/common/checks.h"
@@ -100,6 +102,8 @@ class LinearAllocationArea final {
#endif // DEBUG
}
+ static constexpr int kSize = 3 * kSystemPointerSize;
+
private:
// The start of the LAB. Initially coincides with `top_`. As top is moved
// ahead, the area [start_, top_[ denotes a range of new objects. This range
@@ -111,6 +115,10 @@ class LinearAllocationArea final {
Address limit_ = kNullAddress;
};
+static_assert(sizeof(LinearAllocationArea) == LinearAllocationArea::kSize,
+ "LinearAllocationArea's size must be small because it "
+ "is included in IsolateData.");
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/local-allocator-inl.h b/deps/v8/src/heap/local-allocator-inl.h
index d28d1a6464..3d769906a6 100644
--- a/deps/v8/src/heap/local-allocator-inl.h
+++ b/deps/v8/src/heap/local-allocator-inl.h
@@ -85,7 +85,7 @@ AllocationResult EvacuationAllocator::AllocateInLAB(
bool EvacuationAllocator::NewLocalAllocationBuffer() {
if (lab_allocation_will_fail_) return false;
AllocationResult result =
- new_space_->AllocateRawSynchronized(kLabSize, kWordAligned);
+ new_space_->AllocateRawSynchronized(kLabSize, kTaggedAligned);
if (result.IsRetry()) {
lab_allocation_will_fail_ = true;
return false;
diff --git a/deps/v8/src/heap/local-factory.cc b/deps/v8/src/heap/local-factory.cc
index a581cfee60..d8c2ce898a 100644
--- a/deps/v8/src/heap/local-factory.cc
+++ b/deps/v8/src/heap/local-factory.cc
@@ -40,7 +40,8 @@ void LocalFactory::AddToScriptList(Handle<Script> shared) {
HeapObject LocalFactory::AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment) {
- DCHECK_EQ(allocation, AllocationType::kOld);
+ DCHECK(allocation == AllocationType::kOld ||
+ allocation == AllocationType::kSharedOld);
return HeapObject::FromAddress(isolate()->heap()->AllocateRawOrFail(
size, allocation, AllocationOrigin::kRuntime, alignment));
}
diff --git a/deps/v8/src/heap/local-factory.h b/deps/v8/src/heap/local-factory.h
index 4423e7ff45..8737e3bfa1 100644
--- a/deps/v8/src/heap/local-factory.h
+++ b/deps/v8/src/heap/local-factory.h
@@ -57,7 +57,7 @@ class V8_EXPORT_PRIVATE LocalFactory : public FactoryBase<LocalFactory> {
// ------
// Customization points for FactoryBase.
HeapObject AllocateRaw(int size, AllocationType allocation,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
LocalIsolate* isolate() {
// Downcast to the privately inherited sub-class using c-style casts to
@@ -66,19 +66,29 @@ class V8_EXPORT_PRIVATE LocalFactory : public FactoryBase<LocalFactory> {
// NOLINTNEXTLINE (google-readability-casting)
return (LocalIsolate*)this; // NOLINT(readability/casting)
}
+
+ // This is the real Isolate that will be used for allocating and accessing
+ // external pointer entries when V8_HEAP_SANDBOX is enabled.
+ Isolate* isolate_for_heap_sandbox() {
+#ifdef V8_HEAP_SANDBOX
+ return isolate_for_heap_sandbox_;
+#else
+ return nullptr;
+#endif // V8_HEAP_SANDBOX
+ }
+
inline bool CanAllocateInReadOnlySpace() { return false; }
inline bool EmptyStringRootIsInitialized() { return true; }
inline AllocationType AllocationTypeForInPlaceInternalizableString();
// ------
void AddToScriptList(Handle<Script> shared);
-
- void SetExternalCodeSpaceInDataContainer(CodeDataContainer data_container) {
- UNREACHABLE();
- }
// ------
ReadOnlyRoots roots_;
+#ifdef V8_HEAP_SANDBOX
+ Isolate* isolate_for_heap_sandbox_;
+#endif
#ifdef DEBUG
bool a_script_was_added_to_the_script_list_ = false;
#endif
diff --git a/deps/v8/src/heap/local-heap-inl.h b/deps/v8/src/heap/local-heap-inl.h
index a6dc45a161..030e5b1932 100644
--- a/deps/v8/src/heap/local-heap-inl.h
+++ b/deps/v8/src/heap/local-heap-inl.h
@@ -25,7 +25,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
- alignment == AllocationAlignment::kWordAligned);
+ alignment == AllocationAlignment::kTaggedAligned);
Heap::HeapState state = heap()->gc_state();
DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC);
DCHECK(IsRunning());
@@ -47,16 +47,24 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
}
HeapObject object;
if (alloc.To(&object) && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ heap()->UnprotectAndRegisterMemoryChunk(
+ object, UnprotectMemoryOrigin::kMaybeOffMainThread);
heap()->ZapCodeObject(object.address(), size_in_bytes);
}
return alloc;
}
- CHECK_EQ(type, AllocationType::kOld);
- if (large_object)
- return heap()->lo_space()->AllocateRawBackground(this, size_in_bytes);
- else
- return old_space_allocator()->AllocateRaw(size_in_bytes, alignment, origin);
+ if (type == AllocationType::kOld) {
+ if (large_object)
+ return heap()->lo_space()->AllocateRawBackground(this, size_in_bytes);
+ else
+ return old_space_allocator()->AllocateRaw(size_in_bytes, alignment,
+ origin);
+ }
+
+ DCHECK_EQ(type, AllocationType::kSharedOld);
+ return shared_old_space_allocator()->AllocateRaw(size_in_bytes, alignment,
+ origin);
}
Address LocalHeap::AllocateRawOrFail(int object_size, AllocationType type,
diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc
index e5edc993c9..0485158799 100644
--- a/deps/v8/src/heap/local-heap.cc
+++ b/deps/v8/src/heap/local-heap.cc
@@ -8,6 +8,7 @@
#include <memory>
#include "src/base/logging.h"
+#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
@@ -17,6 +18,7 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h"
+#include "src/heap/heap.h"
#include "src/heap/local-heap-inl.h"
#include "src/heap/marking-barrier.h"
#include "src/heap/parked-scope.h"
@@ -53,7 +55,9 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
next_(nullptr),
handles_(new LocalHandles),
persistent_handles_(std::move(persistent_handles)) {
+ DCHECK_IMPLIES(!is_main_thread(), heap_->deserialization_complete());
if (!is_main_thread()) SetUp();
+
heap_->safepoint()->AddLocalHeap(this, [this] {
if (!is_main_thread()) {
WriteBarrier::SetForThread(marking_barrier_.get());
@@ -108,6 +112,12 @@ void LocalHeap::SetUp() {
code_space_allocator_ =
std::make_unique<ConcurrentAllocator>(this, heap_->code_space());
+ DCHECK_NULL(shared_old_space_allocator_);
+ if (heap_->isolate()->shared_isolate()) {
+ shared_old_space_allocator_ =
+ std::make_unique<ConcurrentAllocator>(this, heap_->shared_old_space());
+ }
+
DCHECK_NULL(marking_barrier_);
marking_barrier_ = std::make_unique<MarkingBarrier>(this);
}
@@ -173,13 +183,42 @@ void LocalHeap::ParkSlowPath() {
DCHECK(current_state.IsRunning());
if (is_main_thread()) {
- DCHECK(current_state.IsCollectionRequested());
- heap_->CollectGarbageForBackground(this);
+ DCHECK(current_state.IsSafepointRequested() ||
+ current_state.IsCollectionRequested());
+
+ if (current_state.IsSafepointRequested()) {
+ ThreadState old_state = state_.SetParked();
+ heap_->safepoint()->NotifyPark();
+ if (old_state.IsCollectionRequested())
+ heap_->collection_barrier_->CancelCollectionAndResumeThreads();
+ return;
+ }
+
+ if (current_state.IsCollectionRequested()) {
+ if (!heap()->ignore_local_gc_requests()) {
+ heap_->CollectGarbageForBackground(this);
+ continue;
+ }
+
+ DCHECK(!current_state.IsSafepointRequested());
+
+ if (state_.CompareExchangeStrong(current_state,
+ current_state.SetParked())) {
+ heap_->collection_barrier_->CancelCollectionAndResumeThreads();
+ return;
+ } else {
+ continue;
+ }
+ }
} else {
DCHECK(current_state.IsSafepointRequested());
DCHECK(!current_state.IsCollectionRequested());
- CHECK(state_.CompareExchangeStrong(current_state,
- current_state.SetParked()));
+
+ ThreadState old_state = state_.SetParked();
+ CHECK(old_state.IsRunning());
+ CHECK(old_state.IsSafepointRequested());
+ CHECK(!old_state.IsCollectionRequested());
+
heap_->safepoint()->NotifyPark();
return;
}
@@ -196,52 +235,105 @@ void LocalHeap::UnparkSlowPath() {
DCHECK(current_state.IsParked());
if (is_main_thread()) {
- DCHECK(current_state.IsCollectionRequested());
- CHECK(state_.CompareExchangeStrong(current_state,
- current_state.SetRunning()));
- heap_->CollectGarbageForBackground(this);
- return;
+ DCHECK(current_state.IsSafepointRequested() ||
+ current_state.IsCollectionRequested());
+
+ if (current_state.IsSafepointRequested()) {
+ SleepInUnpark();
+ continue;
+ }
+
+ if (current_state.IsCollectionRequested()) {
+ DCHECK(!current_state.IsSafepointRequested());
+
+ if (!state_.CompareExchangeStrong(current_state,
+ current_state.SetRunning()))
+ continue;
+
+ if (!heap()->ignore_local_gc_requests()) {
+ heap_->CollectGarbageForBackground(this);
+ }
+
+ return;
+ }
} else {
DCHECK(current_state.IsSafepointRequested());
DCHECK(!current_state.IsCollectionRequested());
- TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_UNPARK,
- ThreadKind::kBackground);
- heap_->safepoint()->WaitInUnpark();
+
+ SleepInUnpark();
}
}
}
+void LocalHeap::SleepInUnpark() {
+ GCTracer::Scope::ScopeId scope_id;
+ ThreadKind thread_kind;
+
+ if (is_main_thread()) {
+ scope_id = GCTracer::Scope::UNPARK;
+ thread_kind = ThreadKind::kMain;
+ } else {
+ scope_id = GCTracer::Scope::BACKGROUND_UNPARK;
+ thread_kind = ThreadKind::kBackground;
+ }
+
+ TRACE_GC1(heap_->tracer(), scope_id, thread_kind);
+ heap_->safepoint()->WaitInUnpark();
+}
+
void LocalHeap::EnsureParkedBeforeDestruction() {
DCHECK_IMPLIES(!is_main_thread(), IsParked());
}
void LocalHeap::SafepointSlowPath() {
-#ifdef DEBUG
ThreadState current_state = state_.load_relaxed();
DCHECK(current_state.IsRunning());
-#endif
if (is_main_thread()) {
- DCHECK(current_state.IsCollectionRequested());
- heap_->CollectGarbageForBackground(this);
+ DCHECK(current_state.IsSafepointRequested() ||
+ current_state.IsCollectionRequested());
+
+ if (current_state.IsSafepointRequested()) {
+ SleepInSafepoint();
+ }
+
+ if (current_state.IsCollectionRequested()) {
+ heap_->CollectGarbageForBackground(this);
+ }
} else {
DCHECK(current_state.IsSafepointRequested());
DCHECK(!current_state.IsCollectionRequested());
- TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_SAFEPOINT,
- ThreadKind::kBackground);
-
- // Parking the running thread here is an optimization. We do not need to
- // wake this thread up to reach the next safepoint.
- ThreadState old_state = state_.SetParked();
- CHECK(old_state.IsRunning());
- CHECK(old_state.IsSafepointRequested());
- CHECK(!old_state.IsCollectionRequested());
+ SleepInSafepoint();
+ }
+}
- heap_->safepoint()->WaitInSafepoint();
+void LocalHeap::SleepInSafepoint() {
+ GCTracer::Scope::ScopeId scope_id;
+ ThreadKind thread_kind;
- Unpark();
+ if (is_main_thread()) {
+ scope_id = GCTracer::Scope::SAFEPOINT;
+ thread_kind = ThreadKind::kMain;
+ } else {
+ scope_id = GCTracer::Scope::BACKGROUND_SAFEPOINT;
+ thread_kind = ThreadKind::kBackground;
}
+
+ TRACE_GC1(heap_->tracer(), scope_id, thread_kind);
+
+ // Parking the running thread here is an optimization. We do not need to
+ // wake this thread up to reach the next safepoint.
+ ThreadState old_state = state_.SetParked();
+ CHECK(old_state.IsRunning());
+ CHECK(old_state.IsSafepointRequested());
+ CHECK_IMPLIES(old_state.IsCollectionRequested(), is_main_thread());
+
+ heap_->safepoint()->WaitInSafepoint();
+
+ base::Optional<IgnoreLocalGCRequests> ignore_gc_requests;
+ if (is_main_thread()) ignore_gc_requests.emplace(heap());
+ Unpark();
}
void LocalHeap::FreeLinearAllocationArea() {
@@ -249,6 +341,10 @@ void LocalHeap::FreeLinearAllocationArea() {
code_space_allocator_->FreeLinearAllocationArea();
}
+void LocalHeap::FreeSharedLinearAllocationArea() {
+ shared_old_space_allocator_->FreeLinearAllocationArea();
+}
+
void LocalHeap::MakeLinearAllocationAreaIterable() {
old_space_allocator_->MakeLinearAllocationAreaIterable();
code_space_allocator_->MakeLinearAllocationAreaIterable();
@@ -270,7 +366,7 @@ bool LocalHeap::TryPerformCollection() {
return true;
} else {
DCHECK(IsRunning());
- heap_->collection_barrier_->RequestGC();
+ if (!heap_->collection_barrier_->TryRequestGC()) return false;
LocalHeap* main_thread = heap_->main_thread_local_heap();
diff --git a/deps/v8/src/heap/local-heap.h b/deps/v8/src/heap/local-heap.h
index 8ea5a6f336..0b5e96ac1a 100644
--- a/deps/v8/src/heap/local-heap.h
+++ b/deps/v8/src/heap/local-heap.h
@@ -8,6 +8,7 @@
#include <atomic>
#include <memory>
+#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
@@ -99,6 +100,9 @@ class V8_EXPORT_PRIVATE LocalHeap {
ConcurrentAllocator* code_space_allocator() {
return code_space_allocator_.get();
}
+ ConcurrentAllocator* shared_old_space_allocator() {
+ return shared_old_space_allocator_.get();
+ }
void RegisterCodeObject(Handle<Code> code) {
heap()->RegisterCodeObject(code);
@@ -111,6 +115,9 @@ class V8_EXPORT_PRIVATE LocalHeap {
// Give up linear allocation areas. Used for mark-compact GC.
void FreeLinearAllocationArea();
+ // Free all shared LABs. Used by the shared mark-compact GC.
+ void FreeSharedLinearAllocationArea();
+
// Create filler object in linear allocation areas. Verifying requires
// iterable heap.
void MakeLinearAllocationAreaIterable();
@@ -130,14 +137,14 @@ class V8_EXPORT_PRIVATE LocalHeap {
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
// Allocates an uninitialized object and crashes when object
// cannot be allocated.
V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
inline void CreateFillerObjectAt(Address addr, int size,
ClearRecordedSlots clear_slots_mode);
@@ -278,6 +285,8 @@ class V8_EXPORT_PRIVATE LocalHeap {
void UnparkSlowPath();
void EnsureParkedBeforeDestruction();
void SafepointSlowPath();
+ void SleepInSafepoint();
+ void SleepInUnpark();
void EnsurePersistentHandles();
@@ -305,13 +314,16 @@ class V8_EXPORT_PRIVATE LocalHeap {
std::unique_ptr<ConcurrentAllocator> old_space_allocator_;
std::unique_ptr<ConcurrentAllocator> code_space_allocator_;
+ std::unique_ptr<ConcurrentAllocator> shared_old_space_allocator_;
friend class CollectionBarrier;
friend class ConcurrentAllocator;
+ friend class GlobalSafepoint;
friend class IsolateSafepoint;
friend class Heap;
friend class Isolate;
friend class ParkedScope;
+ friend class SafepointScope;
friend class UnparkedScope;
};
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index a623360197..e945c34cef 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -45,7 +45,7 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
if (Heap::InYoungGeneration(obj) &&
non_atomic_marking_state_.WhiteToGrey(obj)) {
- worklist_->Push(kMainThreadTask, obj);
+ main_thread_worklist_local_.Push(obj);
}
}
@@ -89,7 +89,7 @@ void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
}
void MarkCompactCollector::AddTransitionArray(TransitionArray array) {
- weak_objects_.transition_arrays.Push(kMainThreadTask, array);
+ local_weak_objects()->transition_arrays_local.Push(array);
}
template <typename MarkingState>
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 640b127d19..2977b4219d 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -6,6 +6,7 @@
#include <unordered_map>
+#include "src/base/logging.h"
#include "src/base/optional.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/compilation-cache.h"
@@ -20,6 +21,7 @@
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/gc-tracer.h"
+#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/index-generator.h"
#include "src/heap/invalidated-slots-inl.h"
@@ -39,7 +41,7 @@
#include "src/heap/safepoint.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
-#include "src/heap/worklist.h"
+#include "src/heap/weak-object-worklists.h"
#include "src/ic/stub-cache.h"
#include "src/init/v8.h"
#include "src/logging/tracing-flags.h"
@@ -118,7 +120,9 @@ class MarkingVerifier : public ObjectVisitorWithCageBases, public RootVisitor {
VerifyRootPointers(start, end);
}
- void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
+ void VisitMapPointer(HeapObject object) override {
+ VerifyMap(object.map(cage_base()));
+ }
void VerifyRoots();
void VerifyMarkingOnPage(const Page* page, Address start, Address end);
@@ -147,7 +151,7 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
if (current >= end) break;
CHECK(IsMarked(object));
CHECK(current >= next_object_must_be_here_or_later);
- object.Iterate(this);
+ object.Iterate(cage_base(), this);
next_object_must_be_here_or_later = current + size;
// The object is either part of a black area of black allocation or a
// regular black object
@@ -189,7 +193,7 @@ void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
LargeObjectSpaceObjectIterator it(lo_space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (IsBlackOrGrey(obj)) {
- obj.Iterate(this);
+ obj.Iterate(cage_base(), this);
}
}
}
@@ -240,6 +244,7 @@ class FullMarkingVerifier : public MarkingVerifier {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
Object maybe_code = slot.load(code_cage_base());
HeapObject code;
+ // The slot might contain smi during CodeDataContainer creation, so skip it.
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
}
@@ -314,7 +319,9 @@ class EvacuationVerifier : public ObjectVisitorWithCageBases,
VerifyRootPointers(start, end);
}
- void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
+ void VisitMapPointer(HeapObject object) override {
+ VerifyMap(object.map(cage_base()));
+ }
protected:
explicit EvacuationVerifier(Heap* heap)
@@ -345,8 +352,10 @@ void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
Address current = start;
while (current < end) {
HeapObject object = HeapObject::FromAddress(current);
- if (!object.IsFreeSpaceOrFiller(cage_base())) object.Iterate(this);
- current += object.Size();
+ if (!object.IsFreeSpaceOrFiller(cage_base())) {
+ object.Iterate(cage_base(), this);
+ }
+ current += object.Size(cage_base());
}
}
@@ -419,6 +428,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
Object maybe_code = slot.load(code_cage_base());
HeapObject code;
+ // The slot might contain smi during CodeDataContainer creation, so skip it.
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
}
@@ -466,16 +476,12 @@ int MarkCompactCollectorBase::NumberOfParallelCompactionTasks() {
MarkCompactCollector::MarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
- page_parallel_job_semaphore_(0),
#ifdef DEBUG
state_(IDLE),
#endif
is_shared_heap_(heap->IsShared()),
- was_marked_incrementally_(false),
- evacuation_(false),
- compacting_(false),
- black_allocation_(false),
- have_code_to_deoptimize_(false),
+ marking_state_(heap->isolate()),
+ non_atomic_marking_state_(heap->isolate()),
sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
}
@@ -490,12 +496,13 @@ void MarkCompactCollector::SetUp() {
void MarkCompactCollector::TearDown() {
AbortCompaction();
- AbortWeakObjects();
if (heap()->incremental_marking()->IsMarking()) {
local_marking_worklists()->Publish();
heap()->marking_barrier()->Publish();
// Marking barriers of LocalHeaps will be published in their destructors.
marking_worklists()->Clear();
+ local_weak_objects()->Publish();
+ weak_objects()->Clear();
}
sweeper()->TearDown();
}
@@ -522,28 +529,32 @@ static void TraceFragmentation(PagedSpace* space) {
static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
}
-bool MarkCompactCollector::StartCompaction() {
- if (!compacting_) {
- DCHECK(evacuation_candidates_.empty());
-
- if (FLAG_gc_experiment_less_compaction && !heap_->ShouldReduceMemory())
- return false;
+bool MarkCompactCollector::StartCompaction(StartCompactionMode mode) {
+ DCHECK(!compacting_);
+ DCHECK(evacuation_candidates_.empty());
- CollectEvacuationCandidates(heap()->old_space());
+ // Bailouts for completely disabled compaction.
+ if (!FLAG_compact ||
+ (mode == StartCompactionMode::kAtomic && !heap()->IsGCWithoutStack() &&
+ !FLAG_compact_with_stack) ||
+ (FLAG_gc_experiment_less_compaction && !heap_->ShouldReduceMemory())) {
+ return false;
+ }
- if (FLAG_compact_code_space) {
- CollectEvacuationCandidates(heap()->code_space());
- } else if (FLAG_trace_fragmentation) {
- TraceFragmentation(heap()->code_space());
- }
+ CollectEvacuationCandidates(heap()->old_space());
- if (FLAG_trace_fragmentation) {
- TraceFragmentation(heap()->map_space());
- }
+ if (FLAG_compact_code_space &&
+ (heap()->IsGCWithoutStack() || FLAG_compact_code_space_with_stack)) {
+ CollectEvacuationCandidates(heap()->code_space());
+ } else if (FLAG_trace_fragmentation) {
+ TraceFragmentation(heap()->code_space());
+ }
- compacting_ = !evacuation_candidates_.empty();
+ if (FLAG_trace_fragmentation) {
+ TraceFragmentation(heap()->map_space());
}
+ compacting_ = !evacuation_candidates_.empty();
return compacting_;
}
@@ -561,9 +572,11 @@ void MarkCompactCollector::StartMarking() {
marking_worklists()->CreateContextWorklists(contexts);
local_marking_worklists_ =
std::make_unique<MarkingWorklists::Local>(marking_worklists());
+ local_weak_objects_ = std::make_unique<WeakObjects::Local>(weak_objects());
marking_visitor_ = std::make_unique<MarkingVisitor>(
- marking_state(), local_marking_worklists(), weak_objects(), heap_,
- epoch(), code_flush_mode(), heap_->local_embedder_heap_tracer()->InUse(),
+ marking_state(), local_marking_worklists(), local_weak_objects_.get(),
+ heap_, epoch(), code_flush_mode(),
+ heap_->local_embedder_heap_tracer()->InUse(),
heap_->ShouldCurrentGCKeepAgesUnchanged());
// Marking bits are cleared by the sweeper.
#ifdef VERIFY_HEAP
@@ -731,7 +744,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
const bool in_standard_path =
!(FLAG_manual_evacuation_candidates_selection ||
FLAG_stress_compaction_random || FLAG_stress_compaction ||
- FLAG_always_compact);
+ FLAG_compact_on_every_full_gc);
// Those variables will only be initialized if |in_standard_path|, and are not
// used otherwise.
size_t max_evacuated_bytes;
@@ -843,7 +856,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
for (size_t i = 0; i < pages.size(); i++) {
size_t live_bytes = pages[i].first;
DCHECK_GE(area_size, live_bytes);
- if (FLAG_always_compact ||
+ if (FLAG_compact_on_every_full_gc ||
((total_live_bytes + live_bytes) <= max_evacuated_bytes)) {
candidate_count++;
total_live_bytes += live_bytes;
@@ -866,7 +879,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK_LE(estimated_new_pages, candidate_count);
int estimated_released_pages = candidate_count - estimated_new_pages;
// Avoid (compact -> expand) cycles.
- if ((estimated_released_pages == 0) && !FLAG_always_compact) {
+ if ((estimated_released_pages == 0) && !FLAG_compact_on_every_full_gc) {
candidate_count = 0;
}
for (int i = 0; i < candidate_count; i++) {
@@ -906,7 +919,6 @@ void MarkCompactCollector::Prepare() {
state_ = PREPARE_GC;
#endif
- DCHECK(!FLAG_never_compact || !FLAG_always_compact);
DCHECK(!sweeping_in_progress());
if (!was_marked_incrementally_) {
@@ -915,22 +927,18 @@ void MarkCompactCollector::Prepare() {
heap_->local_embedder_heap_tracer()->TracePrologue(
heap_->flags_for_embedder_tracer());
}
- if (!FLAG_never_compact) {
- StartCompaction();
- }
+ StartCompaction(StartCompactionMode::kAtomic);
StartMarking();
}
+ heap_->FreeLinearAllocationAreas();
+
PagedSpaceIterator spaces(heap());
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
space->PrepareForMarkCompact();
}
- // Fill and reset all background thread LABs
- heap_->safepoint()->IterateLocalHeaps(
- [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
-
// All objects are guaranteed to be initialized in atomic pause
if (heap()->new_lo_space()) {
heap()->new_lo_space()->ResetPendingObject();
@@ -987,6 +995,8 @@ void MarkCompactCollector::Finish() {
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+ local_weak_objects_->next_ephemerons_local.Publish();
+ local_weak_objects_.reset();
weak_objects_.next_ephemerons.Clear();
sweeper()->StartSweeperTasks();
@@ -1040,6 +1050,36 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
}
}
+ void VisitRunningCode(FullObjectSlot p) final {
+ Code code = Code::cast(*p);
+
+ // If Code is currently executing, then we must not remove its
+ // deoptimization literals, which it might need in order to successfully
+ // deoptimize.
+ //
+ // Must match behavior in RootsReferencesExtractor::VisitRunningCode, so
+ // that heap snapshots accurately describe the roots.
+ if (code.kind() != CodeKind::BASELINE) {
+ DeoptimizationData deopt_data =
+ DeoptimizationData::cast(code.deoptimization_data());
+ if (deopt_data.length() > 0) {
+ DeoptimizationLiteralArray literals = deopt_data.LiteralArray();
+ int literals_length = literals.length();
+ for (int i = 0; i < literals_length; ++i) {
+ MaybeObject maybe_literal = literals.Get(i);
+ HeapObject heap_literal;
+ if (maybe_literal.GetHeapObject(&heap_literal)) {
+ MarkObjectByPointer(Root::kStackRoots,
+ FullObjectSlot(&heap_literal));
+ }
+ }
+ }
+ }
+
+ // And then mark the Code itself.
+ VisitRootPointer(Root::kStackRoots, nullptr, p);
+ }
+
private:
V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
Object object = *p;
@@ -1090,9 +1130,7 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // At the moment, custom roots cannot contain CodeDataContainers - the only
- // objects that can contain Code pointers.
- UNREACHABLE();
+ MarkObject(host, slot.load(code_cage_base()));
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
@@ -1101,11 +1139,11 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
UNREACHABLE();
}
- // VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
MarkObject(host, target);
}
+
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
MarkObject(host, rinfo->target_object(cage_base()));
}
@@ -1119,6 +1157,70 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
MarkCompactCollector* const collector_;
};
+class MarkCompactCollector::SharedHeapObjectVisitor final
+ : public ObjectVisitorWithCageBases {
+ public:
+ explicit SharedHeapObjectVisitor(MarkCompactCollector* collector)
+ : ObjectVisitorWithCageBases(collector->isolate()),
+ collector_(collector) {}
+
+ void VisitPointer(HeapObject host, ObjectSlot p) final {
+ MarkObject(host, p.load(cage_base()));
+ }
+
+ void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
+ MaybeObject object = p.load(cage_base());
+ HeapObject heap_object;
+ if (object.GetHeapObject(&heap_object)) MarkObject(host, heap_object);
+ }
+
+ void VisitMapPointer(HeapObject host) final {
+ MarkObject(host, host.map(cage_base()));
+ }
+
+ void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
+ for (ObjectSlot p = start; p < end; ++p) {
+ // The map slot should be handled in VisitMapPointer.
+ DCHECK_NE(host.map_slot(), p);
+ DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
+ MarkObject(host, p.load(cage_base()));
+ }
+ }
+
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ MarkObject(host, slot.load(code_cage_base()));
+ }
+
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ for (MaybeObjectSlot p = start; p < end; ++p) {
+ // The map slot should be handled in VisitMapPointer.
+ DCHECK_NE(host.map_slot(), ObjectSlot(p));
+ VisitPointer(host, p);
+ }
+ }
+
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ MarkObject(host, target);
+ }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ MarkObject(host, rinfo->target_object(cage_base()));
+ }
+
+ private:
+ V8_INLINE void MarkObject(HeapObject host, Object object) {
+ DCHECK(!BasicMemoryChunk::FromHeapObject(host)->InSharedHeap());
+ if (!object.IsHeapObject()) return;
+ HeapObject heap_object = HeapObject::cast(object);
+ if (!BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap()) return;
+ collector_->MarkObject(host, heap_object);
+ }
+
+ MarkCompactCollector* const collector_;
+};
+
class InternalizedStringTableCleaner : public RootVisitor {
public:
explicit InternalizedStringTableCleaner(Heap* heap)
@@ -1389,6 +1491,14 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
protected:
enum MigrationMode { kFast, kObserved };
+ PtrComprCageBase cage_base() {
+#if V8_COMPRESS_POINTERS
+ return PtrComprCageBase{heap_->isolate()};
+#else
+ return PtrComprCageBase{};
+#endif // V8_COMPRESS_POINTERS
+ }
+
using MigrateFunction = void (*)(EvacuateVisitorBase* base, HeapObject dst,
HeapObject src, int size,
AllocationSpace dest);
@@ -1398,7 +1508,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
HeapObject src, int size, AllocationSpace dest) {
Address dst_addr = dst.address();
Address src_addr = src.address();
- DCHECK(base->heap_->AllowedToBeMigrated(src.map(), src, dest));
+ PtrComprCageBase cage_base = base->cage_base();
+ DCHECK(base->heap_->AllowedToBeMigrated(src.map(cage_base), src, dest));
DCHECK_NE(dest, LO_SPACE);
DCHECK_NE(dest, CODE_LO_SPACE);
if (dest == OLD_SPACE) {
@@ -1407,7 +1518,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst.IterateBodyFast(dst.map(), size, base->record_visitor_);
+ dst.IterateBodyFast(dst.map(cage_base), size, base->record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
base->record_visitor_->MarkArrayBufferExtensionPromoted(dst);
}
@@ -1418,7 +1529,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
code.Relocate(dst_addr - src_addr);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst.IterateBodyFast(dst.map(), size, base->record_visitor_);
+ dst.IterateBodyFast(dst.map(cage_base), size, base->record_visitor_);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
@@ -1447,7 +1558,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (FLAG_stress_compaction && AbortCompactionForTesting(object))
return false;
#endif // DEBUG
- Map map = object.map();
+ Map map = object.map(cage_base());
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation;
if (ShouldPromoteIntoSharedHeap(map)) {
@@ -1524,7 +1635,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
Heap* heap, EvacuationAllocator* local_allocator,
RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
- bool always_promote_young)
+ AlwaysPromoteYoung always_promote_young)
: EvacuateVisitorBase(heap, local_allocator, record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0),
@@ -1537,7 +1648,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
if (TryEvacuateWithoutCopy(object)) return true;
HeapObject target_object;
- if (always_promote_young_) {
+ if (always_promote_young_ == AlwaysPromoteYoung::kYes) {
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
@@ -1621,7 +1732,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
intptr_t semispace_copied_size_;
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
bool is_incremental_marking_;
- bool always_promote_young_;
+ AlwaysPromoteYoung always_promote_young_;
};
template <PageEvacuationMode mode>
@@ -1656,7 +1767,9 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
} else if (mode == NEW_TO_OLD) {
- object.IterateBodyFast(record_visitor_);
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
+ PtrComprCageBase cage_base = GetPtrComprCageBase(object);
+ object.IterateBodyFast(cage_base, record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
record_visitor_->MarkArrayBufferExtensionPromoted(object);
}
@@ -1684,7 +1797,8 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
HeapObject target_object;
if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
object, size, &target_object)) {
- DCHECK(object.map_word(kRelaxedLoad).IsForwardingAddress());
+ DCHECK(object.map_word(heap_->isolate(), kRelaxedLoad)
+ .IsForwardingAddress());
return true;
}
return false;
@@ -1698,7 +1812,9 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
inline bool Visit(HeapObject object, int size) override {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
&heap_->ephemeron_remembered_set_);
- object.IterateBodyFast(&visitor);
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
+ PtrComprCageBase cage_base = GetPtrComprCageBase(object);
+ object.IterateBodyFast(cage_base, &visitor);
return true;
}
@@ -1724,9 +1840,28 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
// Custom marking for top optimized frame.
ProcessTopOptimizedFrame(custom_root_body_visitor, isolate());
- isolate()->IterateClientIsolates(
- [this, custom_root_body_visitor](Isolate* client) {
- ProcessTopOptimizedFrame(custom_root_body_visitor, client);
+ if (isolate()->is_shared()) {
+ isolate()->global_safepoint()->IterateClientIsolates(
+ [this, custom_root_body_visitor](Isolate* client) {
+ ProcessTopOptimizedFrame(custom_root_body_visitor, client);
+ });
+ }
+}
+
+void MarkCompactCollector::MarkObjectsFromClientHeaps() {
+ if (!isolate()->is_shared()) return;
+
+ SharedHeapObjectVisitor visitor(this);
+
+ isolate()->global_safepoint()->IterateClientIsolates(
+ [&visitor](Isolate* client) {
+ Heap* heap = client->heap();
+ HeapObjectIterator iterator(heap, HeapObjectIterator::kNoFiltering);
+ PtrComprCageBase cage_base(client);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ obj.IterateFast(cage_base, &visitor);
+ }
});
}
@@ -1739,7 +1874,7 @@ void MarkCompactCollector::RevisitObject(HeapObject obj) {
DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->ProgressBar().IsEnabled(),
0u == MemoryChunk::FromHeapObject(obj)->ProgressBar().Value());
MarkingVisitor::RevisitScope revisit(marking_visitor_.get());
- marking_visitor_->Visit(obj.map(), obj);
+ marking_visitor_->Visit(obj.map(marking_visitor_->cage_base()), obj);
}
bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
@@ -1758,7 +1893,9 @@ bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
// Move ephemerons from next_ephemerons into current_ephemerons to
// drain them in this iteration.
- weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
+ DCHECK(
+ local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
+ weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
heap()->concurrent_marking()->set_another_ephemeron_iteration(false);
{
@@ -1774,8 +1911,10 @@ bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
FinishConcurrentMarking();
}
- CHECK(weak_objects_.current_ephemerons.IsEmpty());
- CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+ CHECK(
+ local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
+ CHECK(local_weak_objects()
+ ->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
++iterations;
} while (another_ephemeron_iteration_main_thread ||
@@ -1785,8 +1924,9 @@ bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
CHECK(local_marking_worklists()->IsEmpty());
- CHECK(weak_objects_.current_ephemerons.IsEmpty());
- CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+ CHECK(local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
+ CHECK(local_weak_objects()
+ ->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
return true;
}
@@ -1796,7 +1936,7 @@ bool MarkCompactCollector::ProcessEphemerons() {
// Drain current_ephemerons and push ephemerons where key and value are still
// unreachable into next_ephemerons.
- while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
+ while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
another_ephemeron_iteration = true;
}
@@ -1815,15 +1955,15 @@ bool MarkCompactCollector::ProcessEphemerons() {
// Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
// before) and push ephemerons where key and value are still unreachable into
// next_ephemerons.
- while (weak_objects_.discovered_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
+ while (local_weak_objects()->discovered_ephemerons_local.Pop(&ephemeron)) {
if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
another_ephemeron_iteration = true;
}
}
// Flush local ephemerons for main task to global pool.
- weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThreadTask);
- weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
+ local_weak_objects()->ephemeron_hash_tables_local.Publish();
+ local_weak_objects()->next_ephemerons_local.Publish();
return another_ephemeron_iteration;
}
@@ -1835,10 +1975,10 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
std::unordered_multimap<HeapObject, HeapObject, Object::Hasher> key_to_values;
Ephemeron ephemeron;
- DCHECK(weak_objects_.current_ephemerons.IsEmpty());
- weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
-
- while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
+ DCHECK(
+ local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
+ weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
+ while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
ProcessEphemeron(ephemeron.key, ephemeron.value);
if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
@@ -1865,8 +2005,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
kTrackNewlyDiscoveredObjects>(0);
}
- while (
- weak_objects_.discovered_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
+ while (local_weak_objects()->discovered_ephemerons_local.Pop(&ephemeron)) {
ProcessEphemeron(ephemeron.key, ephemeron.value);
if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
@@ -1877,6 +2016,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
if (ephemeron_marking_.newly_discovered_overflowed) {
// If newly_discovered was overflowed just visit all ephemerons in
// next_ephemerons.
+ local_weak_objects()->next_ephemerons_local.Publish();
weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
@@ -1904,7 +2044,8 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
work_to_do = !local_marking_worklists()->IsEmpty() ||
!local_marking_worklists()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
- CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+ CHECK(local_weak_objects()
+ ->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
}
ResetNewlyDiscovered();
@@ -1915,8 +2056,8 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
// Flush local ephemerons for main task to global pool.
- weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThreadTask);
- weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
+ local_weak_objects()->ephemeron_hash_tables_local.Publish();
+ local_weak_objects()->next_ephemerons_local.Publish();
}
void MarkCompactCollector::PerformWrapperTracing() {
@@ -2007,9 +2148,8 @@ bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
}
} else if (marking_state()->IsWhite(value)) {
- weak_objects_.next_ephemerons.Push(kMainThreadTask, Ephemeron{key, value});
+ local_weak_objects()->next_ephemerons_local.Push(Ephemeron{key, value});
}
-
return false;
}
@@ -2018,7 +2158,7 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
// Incremental marking might leave ephemerons in main task's local
// buffer, flush it into global pool.
- weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
+ local_weak_objects()->next_ephemerons_local.Publish();
if (!ProcessEphemeronsUntilFixpoint()) {
// Fixpoint iteration needed too many iterations and was cancelled. Use the
@@ -2030,9 +2170,10 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
if (FLAG_verify_heap) {
Ephemeron ephemeron;
- weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
-
- while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
+ DCHECK(
+ local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
+ weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
+ while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
CHECK(!ProcessEphemeron(ephemeron.key, ephemeron.value));
}
}
@@ -2050,7 +2191,8 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor,
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code code = it.frame()->LookupCode();
if (!code.CanDeoptAt(isolate, it.frame()->pc())) {
- Code::BodyDescriptor::IterateBody(code.map(), code, visitor);
+ PtrComprCageBase cage_base(isolate);
+ Code::BodyDescriptor::IterateBody(code.map(cage_base), code, visitor);
}
return;
}
@@ -2116,6 +2258,11 @@ void MarkCompactCollector::MarkLiveObjects() {
}
{
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_CLIENT_HEAPS);
+ MarkObjectsFromClientHeaps();
+ }
+
+ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleJobIfNeeded(
@@ -2279,8 +2426,8 @@ void MarkCompactCollector::ClearNonLiveReferences() {
void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
std::pair<HeapObject, Code> weak_object_in_code;
- while (weak_objects_.weak_objects_in_code.Pop(kMainThreadTask,
- &weak_object_in_code)) {
+ while (local_weak_objects()->weak_objects_in_code_local.Pop(
+ &weak_object_in_code)) {
HeapObject object = weak_object_in_code.first;
Code code = weak_object_in_code.second;
if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
@@ -2395,8 +2542,8 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
DCHECK(FLAG_flush_bytecode || FLAG_flush_baseline_code ||
weak_objects_.code_flushing_candidates.IsEmpty());
SharedFunctionInfo flushing_candidate;
- while (weak_objects_.code_flushing_candidates.Pop(kMainThreadTask,
- &flushing_candidate)) {
+ while (local_weak_objects()->code_flushing_candidates_local.Pop(
+ &flushing_candidate)) {
bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
flushing_candidate.GetBytecodeArray(isolate()));
if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineCode()) {
@@ -2446,8 +2593,8 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
void MarkCompactCollector::ClearFlushedJsFunctions() {
DCHECK(FLAG_flush_bytecode || weak_objects_.flushed_js_functions.IsEmpty());
JSFunction flushed_js_function;
- while (weak_objects_.flushed_js_functions.Pop(kMainThreadTask,
- &flushed_js_function)) {
+ while (local_weak_objects()->flushed_js_functions_local.Pop(
+ &flushed_js_function)) {
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
Object target) {
RecordSlot(object, slot, HeapObject::cast(target));
@@ -2460,8 +2607,8 @@ void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
DCHECK(FLAG_flush_baseline_code ||
weak_objects_.baseline_flushing_candidates.IsEmpty());
JSFunction flushed_js_function;
- while (weak_objects_.baseline_flushing_candidates.Pop(kMainThreadTask,
- &flushed_js_function)) {
+ while (local_weak_objects()->baseline_flushing_candidates_local.Pop(
+ &flushed_js_function)) {
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
Object target) {
RecordSlot(object, slot, HeapObject::cast(target));
@@ -2477,7 +2624,7 @@ void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
void MarkCompactCollector::ClearFullMapTransitions() {
TransitionArray array;
- while (weak_objects_.transition_arrays.Pop(kMainThreadTask, &array)) {
+ while (local_weak_objects()->transition_arrays_local.Pop(&array)) {
int num_transitions = array.number_of_entries();
if (num_transitions > 0) {
Map map;
@@ -2655,8 +2802,7 @@ void MarkCompactCollector::TrimEnumCache(Map map, DescriptorArray descriptors) {
void MarkCompactCollector::ClearWeakCollections() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
EphemeronHashTable table;
-
- while (weak_objects_.ephemeron_hash_tables.Pop(kMainThreadTask, &table)) {
+ while (local_weak_objects()->ephemeron_hash_tables_local.Pop(&table)) {
for (InternalIndex i : table.IterateEntries()) {
HeapObject key = HeapObject::cast(table.KeyAt(i));
#ifdef VERIFY_HEAP
@@ -2689,7 +2835,7 @@ void MarkCompactCollector::ClearWeakReferences() {
std::pair<HeapObject, HeapObjectSlot> slot;
HeapObjectReference cleared_weak_ref =
HeapObjectReference::ClearedValue(isolate());
- while (weak_objects_.weak_references.Pop(kMainThreadTask, &slot)) {
+ while (local_weak_objects()->weak_references_local.Pop(&slot)) {
HeapObject value;
// The slot could have been overwritten, so we have to treat it
// as MaybeObjectSlot.
@@ -2712,7 +2858,7 @@ void MarkCompactCollector::ClearWeakReferences() {
void MarkCompactCollector::ClearJSWeakRefs() {
JSWeakRef weak_ref;
- while (weak_objects_.js_weak_refs.Pop(kMainThreadTask, &weak_ref)) {
+ while (local_weak_objects()->js_weak_refs_local.Pop(&weak_ref)) {
HeapObject target = HeapObject::cast(weak_ref.target());
if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
weak_ref.set_target(ReadOnlyRoots(isolate()).undefined_value());
@@ -2723,7 +2869,7 @@ void MarkCompactCollector::ClearJSWeakRefs() {
}
}
WeakCell weak_cell;
- while (weak_objects_.weak_cells.Pop(kMainThreadTask, &weak_cell)) {
+ while (local_weak_objects()->weak_cells_local.Pop(&weak_cell)) {
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
Object target) {
if (target.IsHeapObject()) {
@@ -2777,21 +2923,6 @@ void MarkCompactCollector::ClearJSWeakRefs() {
heap()->PostFinalizationRegistryCleanupTaskIfNeeded();
}
-void MarkCompactCollector::AbortWeakObjects() {
- weak_objects_.transition_arrays.Clear();
- weak_objects_.ephemeron_hash_tables.Clear();
- weak_objects_.current_ephemerons.Clear();
- weak_objects_.next_ephemerons.Clear();
- weak_objects_.discovered_ephemerons.Clear();
- weak_objects_.weak_references.Clear();
- weak_objects_.weak_objects_in_code.Clear();
- weak_objects_.js_weak_refs.Clear();
- weak_objects_.weak_cells.Clear();
- weak_objects_.code_flushing_candidates.Clear();
- weak_objects_.baseline_flushing_candidates.Clear();
- weak_objects_.flushed_js_functions.Clear();
-}
-
bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
}
@@ -2834,6 +2965,12 @@ void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
HeapObject target) {
RecordRelocSlotInfo info = PrepareRecordRelocSlot(host, rinfo, target);
if (info.should_record) {
+ // Access to TypeSlots need to be protected, since LocalHeaps might
+ // publish code in the background thread.
+ base::Optional<base::MutexGuard> opt_guard;
+ if (FLAG_concurrent_sparkplug) {
+ opt_guard.emplace(info.memory_chunk->mutex());
+ }
RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
info.offset);
}
@@ -2978,10 +3115,13 @@ static inline SlotCallbackResult UpdateStrongCodeSlot(
} // namespace
+static constexpr bool kClientHeap = true;
+
// Visitor for updating root pointers and to-space pointers.
// It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor : public ObjectVisitorWithCageBases,
- public RootVisitor {
+template <bool in_client_heap = false>
+class PointersUpdatingVisitor final : public ObjectVisitorWithCageBases,
+ public RootVisitor {
public:
explicit PointersUpdatingVisitor(Heap* heap)
: ObjectVisitorWithCageBases(heap) {}
@@ -3035,14 +3175,34 @@ class PointersUpdatingVisitor : public ObjectVisitorWithCageBases,
}
}
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- // This visitor nevers visits code objects.
- UNREACHABLE();
+ void VisitMapPointer(HeapObject object) override {
+ if (in_client_heap) {
+ UpdateStrongSlotInternal(cage_base(), object.map_slot());
+ } else {
+ UNREACHABLE();
+ }
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- // This visitor nevers visits code objects.
- UNREACHABLE();
+ if (in_client_heap) {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ CHECK_WITH_MSG(!target.InSharedHeap(),
+ "refs into shared heap not yet supported here.");
+ } else {
+ // This visitor nevers visits code objects.
+ UNREACHABLE();
+ }
+ }
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ if (in_client_heap) {
+ HeapObject target = rinfo->target_object(cage_base());
+ CHECK_WITH_MSG(!target.InSharedHeap(),
+ "refs into shared heap not yet supported here.");
+ } else {
+ // This visitor nevers visits code objects.
+ UNREACHABLE();
+ }
}
private:
@@ -3120,13 +3280,13 @@ void MarkCompactCollector::EvacuatePrologue() {
}
void MarkCompactCollector::EvacuateEpilogue() {
- aborted_evacuation_candidates_.clear();
+ aborted_evacuation_candidates_due_to_oom_.clear();
+ aborted_evacuation_candidates_due_to_flags_.clear();
// New space.
if (heap()->new_space()) {
heap()->new_space()->set_age_mark(heap()->new_space()->top());
- DCHECK_IMPLIES(FLAG_always_promote_young_mc,
- heap()->new_space()->Size() == 0);
+ DCHECK_EQ(0, heap()->new_space()->Size());
}
// Deallocate unmarked large objects.
@@ -3195,7 +3355,8 @@ class Evacuator : public Malloced {
}
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor,
- EvacuationAllocator* local_allocator, bool always_promote_young)
+ EvacuationAllocator* local_allocator,
+ AlwaysPromoteYoung always_promote_young)
: heap_(heap),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(heap_, local_allocator, record_visitor,
@@ -3307,7 +3468,7 @@ class FullEvacuator : public Evacuator {
public:
explicit FullEvacuator(MarkCompactCollector* collector)
: Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
- FLAG_always_promote_young_mc),
+ AlwaysPromoteYoung::kYes),
record_visitor_(collector, &ephemeron_remembered_set_),
local_allocator_(heap_,
CompactionSpaceKind::kCompactionSpaceForMarkCompact),
@@ -3386,8 +3547,8 @@ void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
} else {
// Aborted compaction page. Actual processing happens on the main
// thread for simplicity reasons.
- collector_->ReportAbortedEvacuationCandidate(failed_object.address(),
- chunk);
+ collector_->ReportAbortedEvacuationCandidateDueToOOM(
+ failed_object.address(), static_cast<Page*>(chunk));
}
}
break;
@@ -3486,13 +3647,14 @@ size_t MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
return wanted_num_tasks;
}
-bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes,
- bool always_promote_young) {
+bool MarkCompactCollectorBase::ShouldMovePage(
+ Page* p, intptr_t live_bytes, AlwaysPromoteYoung always_promote_young) {
const bool reduce_memory = heap()->ShouldReduceMemory();
const Address age_mark = heap()->new_space()->age_mark();
return !reduce_memory && !p->NeverEvacuate() &&
(live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
- (always_promote_young || !p->Contains(age_mark)) &&
+ (always_promote_young == AlwaysPromoteYoung::kYes ||
+ !p->Contains(age_mark)) &&
heap()->CanExpandOldGeneration(live_bytes);
}
@@ -3526,24 +3688,33 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
if (live_bytes_on_page == 0) continue;
live_bytes += live_bytes_on_page;
- if (ShouldMovePage(page, live_bytes_on_page,
- FLAG_always_promote_young_mc)) {
- if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) ||
- FLAG_always_promote_young_mc) {
- EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
- DCHECK_EQ(heap()->old_space(), page->owner());
- // The move added page->allocated_bytes to the old space, but we are
- // going to sweep the page and add page->live_byte_count.
- heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
- page);
- } else {
- EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
- }
+ if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kYes)) {
+ EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
+ DCHECK_EQ(heap()->old_space(), page->owner());
+ // The move added page->allocated_bytes to the old space, but we are
+ // going to sweep the page and add page->live_byte_count.
+ heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
+ page);
}
evacuation_items.emplace_back(ParallelWorkItem{}, page);
}
+ if (!heap()->IsGCWithoutStack()) {
+ if (!FLAG_compact_with_stack || !FLAG_compact_code_space_with_stack) {
+ for (Page* page : old_space_evacuation_pages_) {
+ if (!FLAG_compact_with_stack || page->owner_identity() == CODE_SPACE) {
+ ReportAbortedEvacuationCandidateDueToFlags(page->area_start(), page);
+ // Set this flag early on in this case to allow filtering such pages
+ // below.
+ page->SetFlag(Page::COMPACTION_WAS_ABORTED);
+ }
+ }
+ }
+ }
+
for (Page* page : old_space_evacuation_pages_) {
+ if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) continue;
+
live_bytes += non_atomic_marking_state()->live_bytes(page);
evacuation_items.emplace_back(ParallelWorkItem{}, page);
}
@@ -3567,16 +3738,16 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
}
- if (evacuation_items.empty()) return;
-
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "MarkCompactCollector::EvacuatePagesInParallel", "pages",
- evacuation_items.size());
-
const size_t pages_count = evacuation_items.size();
- const size_t wanted_num_tasks =
- CreateAndExecuteEvacuationTasks<FullEvacuator>(
- this, std::move(evacuation_items), nullptr);
+ size_t wanted_num_tasks = 0;
+ if (!evacuation_items.empty()) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "MarkCompactCollector::EvacuatePagesInParallel", "pages",
+ evacuation_items.size());
+
+ wanted_num_tasks = CreateAndExecuteEvacuationTasks<FullEvacuator>(
+ this, std::move(evacuation_items), nullptr);
+ }
// After evacuation there might still be swept pages that weren't
// added to one of the compaction space but still reside in the
@@ -3866,10 +4037,10 @@ class ToSpaceUpdatingItem : public UpdatingItem {
void ProcessVisitAll() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ToSpaceUpdatingItem::ProcessVisitAll");
- PointersUpdatingVisitor visitor(heap_);
+ PointersUpdatingVisitor<> visitor(heap_);
for (Address cur = start_; cur < end_;) {
HeapObject object = HeapObject::FromAddress(cur);
- Map map = object.map();
+ Map map = object.map(visitor.cage_base());
int size = object.SizeFromMap(map);
object.IterateBodyFast(map, size, &visitor);
cur += size;
@@ -3881,10 +4052,10 @@ class ToSpaceUpdatingItem : public UpdatingItem {
"ToSpaceUpdatingItem::ProcessVisitLive");
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
- PointersUpdatingVisitor visitor(heap_);
+ PointersUpdatingVisitor<> visitor(heap_);
for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
chunk_, marking_state_->bitmap(chunk_))) {
- object_and_size.first.IterateBodyFast(&visitor);
+ object_and_size.first.IterateBodyFast(visitor.cage_base(), &visitor);
}
}
@@ -3984,9 +4155,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
},
SlotSet::FREE_EMPTY_BUCKETS);
- DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR &&
- FLAG_always_promote_young_mc,
- slots == 0);
+ DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR, slots == 0);
if (slots == 0) {
chunk_->ReleaseSlotSet<OLD_TO_NEW>();
@@ -4010,9 +4179,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
},
SlotSet::FREE_EMPTY_BUCKETS);
- DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR &&
- FLAG_always_promote_young_mc,
- slots == 0);
+ DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR, slots == 0);
if (slots == 0) {
chunk_->ReleaseSweepingSlotSet();
@@ -4049,7 +4216,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
(chunk_->slot_set<OLD_TO_CODE, AccessMode::NON_ATOMIC>() !=
nullptr)) {
PtrComprCageBase cage_base = heap_->isolate();
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
PtrComprCageBase code_cage_base(heap_->isolate()->code_cage_base());
#else
PtrComprCageBase code_cage_base = cage_base;
@@ -4110,12 +4277,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
RememberedSetUpdatingMode updating_mode_;
};
-std::unique_ptr<UpdatingItem> MarkCompactCollector::CreateToSpaceUpdatingItem(
- MemoryChunk* chunk, Address start, Address end) {
- return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
- heap(), chunk, start, end, non_atomic_marking_state());
-}
-
std::unique_ptr<UpdatingItem>
MarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
@@ -4124,24 +4285,6 @@ MarkCompactCollector::CreateRememberedSetUpdatingItem(
heap(), non_atomic_marking_state(), chunk, updating_mode);
}
-int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
- std::vector<std::unique_ptr<UpdatingItem>>* items) {
- if (!heap()->new_space()) return 0;
-
- // Seed to space pages.
- const Address space_start = heap()->new_space()->first_allocatable_address();
- const Address space_end = heap()->new_space()->top();
- int pages = 0;
- for (Page* page : PageRange(space_start, space_end)) {
- Address start =
- page->Contains(space_start) ? space_start : page->area_start();
- Address end = page->Contains(space_end) ? space_end : page->area_end();
- items->emplace_back(CreateToSpaceUpdatingItem(page, start, end));
- pages++;
- }
- return pages;
-}
-
template <typename IterateableSpace>
int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items, IterateableSpace* space,
@@ -4233,12 +4376,11 @@ class EphemeronTableUpdatingItem : public UpdatingItem {
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor(heap());
-
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
// The external string table is updated at the end.
+ PointersUpdatingVisitor<> updating_visitor(heap());
heap_->IterateRootsIncludingClients(
&updating_visitor,
base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable});
@@ -4246,6 +4388,12 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_CLIENT_HEAPS);
+ UpdatePointersInClientHeaps();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
std::vector<std::unique_ptr<UpdatingItem>> updating_items;
@@ -4260,7 +4408,11 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
RememberedSetUpdatingMode::ALL);
- CollectToSpaceUpdatingItems(&updating_items);
+ // Iterating to space may require a valid body descriptor for e.g.
+ // WasmStruct which races with updating a slot in Map. Since to space is
+ // empty after a full GC, such races can't happen.
+ DCHECK_IMPLIES(heap()->new_space(), heap()->new_space()->Size() == 0);
+
updating_items.push_back(
std::make_unique<EphemeronTableUpdatingItem>(heap()));
@@ -4285,52 +4437,88 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
}
-void MarkCompactCollector::ReportAbortedEvacuationCandidate(
- Address failed_start, MemoryChunk* chunk) {
+void MarkCompactCollector::UpdatePointersInClientHeaps() {
+ if (!isolate()->is_shared()) return;
+
+ PointersUpdatingVisitor<kClientHeap> visitor(heap());
+
+ isolate()->global_safepoint()->IterateClientIsolates(
+ [&visitor](Isolate* client) {
+ Heap* heap = client->heap();
+ HeapObjectIterator iterator(heap, HeapObjectIterator::kNoFiltering);
+ PtrComprCageBase cage_base(client);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ obj.IterateFast(cage_base, &visitor);
+ }
+ });
+}
+
+void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToOOM(
+ Address failed_start, Page* page) {
+ base::MutexGuard guard(&mutex_);
+ aborted_evacuation_candidates_due_to_oom_.push_back(
+ std::make_pair(failed_start, page));
+}
+
+void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToFlags(
+ Address failed_start, Page* page) {
base::MutexGuard guard(&mutex_);
+ aborted_evacuation_candidates_due_to_flags_.push_back(
+ std::make_pair(failed_start, page));
+}
- aborted_evacuation_candidates_.push_back(
- std::make_pair(failed_start, static_cast<Page*>(chunk)));
+namespace {
+
+void ReRecordPage(
+ Heap* heap,
+ v8::internal::MarkCompactCollector::NonAtomicMarkingState* marking_state,
+ Address failed_start, Page* page) {
+ page->SetFlag(Page::COMPACTION_WAS_ABORTED);
+ // Aborted compaction page. We have to record slots here, since we
+ // might not have recorded them in first place.
+
+ // Remove outdated slots.
+ RememberedSetSweeping::RemoveRange(page, page->address(), failed_start,
+ SlotSet::FREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
+ SlotSet::FREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
+ failed_start);
+
+ // Remove invalidated slots.
+ if (failed_start > page->area_start()) {
+ InvalidatedSlotsCleanup old_to_new_cleanup =
+ InvalidatedSlotsCleanup::OldToNew(page);
+ old_to_new_cleanup.Free(page->area_start(), failed_start);
+ }
+
+ // Recompute live bytes.
+ LiveObjectVisitor::RecomputeLiveBytes(page, marking_state);
+ // Re-record slots.
+ EvacuateRecordOnlyVisitor record_visitor(heap);
+ LiveObjectVisitor::VisitBlackObjectsNoFail(
+ page, marking_state, &record_visitor, LiveObjectVisitor::kKeepMarking);
+ // Array buffers will be processed during pointer updating.
}
+} // namespace
+
size_t MarkCompactCollector::PostProcessEvacuationCandidates() {
CHECK_IMPLIES(FLAG_crash_on_aborted_evacuation,
- aborted_evacuation_candidates_.empty());
-
- for (auto start_and_page : aborted_evacuation_candidates_) {
- Address failed_start = start_and_page.first;
- Page* page = start_and_page.second;
- page->SetFlag(Page::COMPACTION_WAS_ABORTED);
- // Aborted compaction page. We have to record slots here, since we
- // might not have recorded them in first place.
-
- // Remove outdated slots.
- RememberedSetSweeping::RemoveRange(page, page->address(), failed_start,
- SlotSet::FREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
- SlotSet::FREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
- failed_start);
-
- // Remove invalidated slots.
- if (failed_start > page->area_start()) {
- InvalidatedSlotsCleanup old_to_new_cleanup =
- InvalidatedSlotsCleanup::OldToNew(page);
- old_to_new_cleanup.Free(page->area_start(), failed_start);
- }
-
- // Recompute live bytes.
- LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
- // Re-record slots.
- EvacuateRecordOnlyVisitor record_visitor(heap());
- LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
- &record_visitor,
- LiveObjectVisitor::kKeepMarking);
- // Array buffers will be processed during pointer updating.
- }
- const int aborted_pages =
- static_cast<int>(aborted_evacuation_candidates_.size());
- int aborted_pages_verified = 0;
+ aborted_evacuation_candidates_due_to_oom_.empty());
+ for (auto start_and_page : aborted_evacuation_candidates_due_to_oom_) {
+ ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
+ start_and_page.second);
+ }
+ for (auto start_and_page : aborted_evacuation_candidates_due_to_flags_) {
+ ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
+ start_and_page.second);
+ }
+ const size_t aborted_pages =
+ aborted_evacuation_candidates_due_to_oom_.size() +
+ aborted_evacuation_candidates_due_to_flags_.size();
+ size_t aborted_pages_verified = 0;
for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
// After clearing the evacuation candidate flag the page is again in a
@@ -4547,8 +4735,12 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
}
void VerifyCodePointer(CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- Code code = Code::unchecked_cast(slot.load(code_cage_base()));
- VerifyHeapObjectImpl(code);
+ Object maybe_code = slot.load(code_cage_base());
+ HeapObject code;
+ // The slot might contain smi during CodeDataContainer creation, so skip it.
+ if (maybe_code.GetHeapObject(&code)) {
+ VerifyHeapObjectImpl(code);
+ }
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -4578,9 +4770,9 @@ class YoungGenerationMarkingVisitor final
public:
YoungGenerationMarkingVisitor(
Isolate* isolate, MinorMarkCompactCollector::MarkingState* marking_state,
- MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
+ MinorMarkCompactCollector::MarkingWorklist::Local* worklist_local)
: NewSpaceVisitor(isolate),
- worklist_(global_worklist, task_id),
+ worklist_local_(worklist_local),
marking_state_(marking_state) {}
V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
@@ -4649,11 +4841,11 @@ class YoungGenerationMarkingVisitor final
inline void MarkObjectViaMarkingWorklist(HeapObject object) {
if (marking_state_->WhiteToGrey(object)) {
// Marking deque overflow is unsupported for the young generation.
- CHECK(worklist_.Push(object));
+ worklist_local_->Push(object);
}
}
- MinorMarkCompactCollector::MarkingWorklist::View worklist_;
+ MinorMarkCompactCollector::MarkingWorklist::Local* worklist_local_;
MinorMarkCompactCollector::MarkingState* marking_state_;
};
@@ -4661,16 +4853,18 @@ void MinorMarkCompactCollector::SetUp() {}
void MinorMarkCompactCollector::TearDown() {}
+// static
+constexpr size_t MinorMarkCompactCollector::kMaxParallelTasks;
+
MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
+ main_thread_worklist_local_(worklist_),
+ marking_state_(heap->isolate()),
+ non_atomic_marking_state_(heap->isolate()),
main_marking_visitor_(new YoungGenerationMarkingVisitor(
- heap->isolate(), marking_state(), worklist_, kMainMarker)),
- page_parallel_job_semaphore_(0) {
- static_assert(
- kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
- "more marker tasks than marking deque can handle");
-}
+ heap->isolate(), marking_state(), &main_thread_worklist_local_)),
+ page_parallel_job_semaphore_(0) {}
MinorMarkCompactCollector::~MinorMarkCompactCollector() {
delete worklist_;
@@ -4768,7 +4962,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor(heap());
+ PointersUpdatingVisitor<> updating_visitor(heap());
std::vector<std::unique_ptr<UpdatingItem>> updating_items;
// Create batches of global handles.
@@ -4925,7 +5119,8 @@ void MinorMarkCompactCollector::MakeIterable(
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
- Map map = object.map(kAcquireLoad);
+ PtrComprCageBase cage_base(p->heap()->isolate());
+ Map map = object.map(cage_base, kAcquireLoad);
int size = object.SizeFromMap(map);
free_start = free_end + size;
}
@@ -5054,6 +5249,22 @@ void MinorMarkCompactCollector::EvacuateEpilogue() {
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
+int MinorMarkCompactCollector::CollectToSpaceUpdatingItems(
+ std::vector<std::unique_ptr<UpdatingItem>>* items) {
+ // Seed to space pages.
+ const Address space_start = heap()->new_space()->first_allocatable_address();
+ const Address space_end = heap()->new_space()->top();
+ int pages = 0;
+ for (Page* page : PageRange(space_start, space_end)) {
+ Address start =
+ page->Contains(space_start) ? space_start : page->area_start();
+ Address end = page->Contains(space_end) ? space_end : page->area_end();
+ items->emplace_back(CreateToSpaceUpdatingItem(page, start, end));
+ pages++;
+ }
+ return pages;
+}
+
std::unique_ptr<UpdatingItem>
MinorMarkCompactCollector::CreateToSpaceUpdatingItem(MemoryChunk* chunk,
Address start,
@@ -5078,10 +5289,10 @@ class YoungGenerationMarkingTask {
public:
YoungGenerationMarkingTask(
Isolate* isolate, MinorMarkCompactCollector* collector,
- MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
- : marking_worklist_(global_worklist, task_id),
+ MinorMarkCompactCollector::MarkingWorklist* global_worklist)
+ : marking_worklist_local_(global_worklist),
marking_state_(collector->marking_state()),
- visitor_(isolate, marking_state_, global_worklist, task_id) {
+ visitor_(isolate, marking_state_, &marking_worklist_local_) {
local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
Page::kPageSize);
}
@@ -5097,7 +5308,7 @@ class YoungGenerationMarkingTask {
void EmptyMarkingWorklist() {
HeapObject object;
- while (marking_worklist_.Pop(&object)) {
+ while (marking_worklist_local_.Pop(&object)) {
const int size = visitor_.Visit(object);
IncrementLiveBytes(object, size);
}
@@ -5114,7 +5325,7 @@ class YoungGenerationMarkingTask {
}
private:
- MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
+ MinorMarkCompactCollector::MarkingWorklist::Local marking_worklist_local_;
MinorMarkCompactCollector::MarkingState* marking_state_;
YoungGenerationMarkingVisitor visitor_;
std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
@@ -5221,13 +5432,13 @@ class YoungGenerationMarkingJob : public v8::JobTask {
// the amount of marking that is required.
const int kPagesPerTask = 2;
size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
- size_t num_tasks = std::max((items + 1) / kPagesPerTask,
- global_worklist_->GlobalPoolSize());
+ size_t num_tasks =
+ std::max((items + 1) / kPagesPerTask, global_worklist_->Size());
if (!FLAG_parallel_marking) {
num_tasks = std::min<size_t>(1, num_tasks);
}
- return std::min<size_t>(
- num_tasks, MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks);
+ return std::min<size_t>(num_tasks,
+ MinorMarkCompactCollector::kMaxParallelTasks);
}
private:
@@ -5235,8 +5446,7 @@ class YoungGenerationMarkingJob : public v8::JobTask {
double marking_time = 0.0;
{
TimedScope scope(&marking_time);
- YoungGenerationMarkingTask task(isolate_, collector_, global_worklist_,
- delegate->GetTaskId());
+ YoungGenerationMarkingTask task(isolate_, collector_, global_worklist_);
ProcessMarkingItems(&task);
task.EmptyMarkingWorklist();
task.FlushLiveBytes();
@@ -5303,7 +5513,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
// The main thread might hold local items, while GlobalPoolSize() == 0.
// Flush to ensure these items are visible globally and picked up by the
// job.
- worklist()->FlushToGlobal(kMainThreadTask);
+ main_thread_worklist_local_.Publish();
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
V8::GetCurrentPlatform()
->PostJob(v8::TaskPriority::kUserBlocking,
@@ -5312,6 +5522,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
->Join();
DCHECK(worklist()->IsEmpty());
+ DCHECK(main_thread_worklist_local_.IsLocalEmpty());
}
}
}
@@ -5348,17 +5559,16 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
}
void MinorMarkCompactCollector::DrainMarkingWorklist() {
- MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
PtrComprCageBase cage_base(isolate());
HeapObject object;
- while (marking_worklist.Pop(&object)) {
+ while (main_thread_worklist_local_.Pop(&object)) {
DCHECK(!object.IsFreeSpaceOrFiller(cage_base));
DCHECK(object.IsHeapObject());
DCHECK(heap()->Contains(object));
DCHECK(non_atomic_marking_state()->IsGrey(object));
main_marking_visitor()->Visit(object);
}
- DCHECK(marking_worklist.IsLocalEmpty());
+ DCHECK(main_thread_worklist_local_.IsLocalEmpty());
}
void MinorMarkCompactCollector::TraceFragmentation() {
@@ -5462,7 +5672,7 @@ class YoungGenerationEvacuator : public Evacuator {
public:
explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector)
: Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
- false),
+ AlwaysPromoteYoung::kNo),
record_visitor_(collector->heap()->mark_compact_collector()),
local_allocator_(
heap_, CompactionSpaceKind::kCompactionSpaceForMinorMarkCompact),
@@ -5550,7 +5760,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
if (live_bytes_on_page == 0) continue;
live_bytes += live_bytes_on_page;
- if (ShouldMovePage(page, live_bytes_on_page, false)) {
+ if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
} else {
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 5a7a450e38..ecfb5adc64 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -8,6 +8,8 @@
#include <atomic>
#include <vector>
+#include "include/v8-internal.h"
+#include "src/heap/base/worklist.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking-worklist.h"
@@ -183,8 +185,9 @@ class LiveObjectVisitor : AllStatic {
static void RecomputeLiveBytes(MemoryChunk* chunk, MarkingState* state);
};
+enum class AlwaysPromoteYoung { kYes, kNo };
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
-enum MarkingTreatmentMode { KEEP, CLEAR };
+enum class MarkingTreatmentMode { KEEP, CLEAR };
enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
// Base class for minor and full MC collectors.
@@ -214,8 +217,6 @@ class MarkCompactCollectorBase {
virtual void Evacuate() = 0;
virtual void EvacuatePagesInParallel() = 0;
virtual void UpdatePointersAfterEvacuation() = 0;
- virtual std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(
- MemoryChunk* chunk, Address start, Address end) = 0;
virtual std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
@@ -227,10 +228,9 @@ class MarkCompactCollectorBase {
MigrationObserver* migration_observer);
// Returns whether this page should be moved according to heuristics.
- bool ShouldMovePage(Page* p, intptr_t live_bytes, bool promote_young);
+ bool ShouldMovePage(Page* p, intptr_t live_bytes,
+ AlwaysPromoteYoung promote_young);
- int CollectToSpaceUpdatingItems(
- std::vector<std::unique_ptr<UpdatingItem>>* items);
template <typename IterateableSpace>
int CollectRememberedSetUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items,
@@ -244,6 +244,9 @@ class MarkCompactCollectorBase {
class MinorMarkingState final
: public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
public:
+ explicit MinorMarkingState(PtrComprCageBase cage_base)
+ : MarkingStateBase(cage_base) {}
+
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return MemoryChunk::cast(chunk)
@@ -267,6 +270,9 @@ class MinorNonAtomicMarkingState final
: public MarkingStateBase<MinorNonAtomicMarkingState,
AccessMode::NON_ATOMIC> {
public:
+ explicit MinorNonAtomicMarkingState(PtrComprCageBase cage_base)
+ : MarkingStateBase(cage_base) {}
+
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return MemoryChunk::cast(chunk)
@@ -293,6 +299,9 @@ class MinorNonAtomicMarkingState final
class MajorMarkingState final
: public MarkingStateBase<MajorMarkingState, AccessMode::ATOMIC> {
public:
+ explicit MajorMarkingState(PtrComprCageBase cage_base)
+ : MarkingStateBase(cage_base) {}
+
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return chunk->marking_bitmap<AccessMode::ATOMIC>();
@@ -318,6 +327,9 @@ class MajorMarkingState final
class MajorAtomicMarkingState final
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
public:
+ explicit MajorAtomicMarkingState(PtrComprCageBase cage_base)
+ : MarkingStateBase(cage_base) {}
+
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return chunk->marking_bitmap<AccessMode::ATOMIC>();
@@ -332,6 +344,9 @@ class MajorNonAtomicMarkingState final
: public MarkingStateBase<MajorNonAtomicMarkingState,
AccessMode::NON_ATOMIC> {
public:
+ explicit MajorNonAtomicMarkingState(PtrComprCageBase cage_base)
+ : MarkingStateBase(cage_base) {}
+
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
@@ -375,13 +390,13 @@ class MainMarkingVisitor final
MainMarkingVisitor(MarkingState* marking_state,
MarkingWorklists::Local* local_marking_worklists,
- WeakObjects* weak_objects, Heap* heap,
+ WeakObjects::Local* local_weak_objects, Heap* heap,
unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
bool embedder_tracing_enabled,
bool should_keep_ages_unchanged)
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
- kMainThreadTask, local_marking_worklists, weak_objects, heap,
+ local_marking_worklists, local_weak_objects, heap,
mark_compact_epoch, code_flush_mode, embedder_tracing_enabled,
should_keep_ages_unchanged),
marking_state_(marking_state),
@@ -429,11 +444,7 @@ class MainMarkingVisitor final
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
-#ifdef V8_ATOMIC_MARKING_STATE
using MarkingState = MajorMarkingState;
-#else
- using MarkingState = MajorNonAtomicMarkingState;
-#endif // V8_ATOMIC_MARKING_STATE
using AtomicMarkingState = MajorAtomicMarkingState;
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
@@ -441,6 +452,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
class RootMarkingVisitor;
class CustomRootBodyMarkingVisitor;
+ class SharedHeapObjectVisitor;
enum IterationMode {
kKeepMarking,
@@ -452,6 +464,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
kTrackNewlyDiscoveredObjects
};
+ enum class StartCompactionMode {
+ kIncremental,
+ kAtomic,
+ };
+
MarkingState* marking_state() { return &marking_state_; }
NonAtomicMarkingState* non_atomic_marking_state() {
@@ -475,7 +492,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// it to complete as requested by |stop_request|).
void FinishConcurrentMarking();
- bool StartCompaction();
+ // Returns whether compaction is running.
+ bool StartCompaction(StartCompactionMode mode);
void AbortCompaction();
@@ -531,6 +549,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
WeakObjects* weak_objects() { return &weak_objects_; }
+ WeakObjects::Local* local_weak_objects() { return local_weak_objects_.get(); }
+
inline void AddTransitionArray(TransitionArray array);
void AddNewlyDiscovered(HeapObject object) {
@@ -616,6 +636,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void MarkRoots(RootVisitor* root_visitor,
ObjectVisitor* custom_root_body_visitor);
+ // Mark all objects that are directly referenced from one of the clients
+ // heaps.
+ void MarkObjectsFromClientHeaps();
+
+ // Updates pointers to shared objects from client heaps.
+ void UpdatePointersInClientHeaps();
+
// Marks object reachable from harmony weak maps and wrapper tracing.
void ProcessEphemeronMarking();
@@ -698,8 +725,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// those with dead values.
void ClearJSWeakRefs();
- void AbortWeakObjects();
-
// Starts sweeping of spaces by contributing on the main thread and setting
// up other pages for sweeping. Does not start sweeper tasks.
void StartSweepSpaces();
@@ -711,17 +736,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
- std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
- Address start,
- Address end) override;
std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
void ReleaseEvacuationCandidates();
// Returns number of aborted pages.
size_t PostProcessEvacuationCandidates();
- void ReportAbortedEvacuationCandidate(Address failed_start,
- MemoryChunk* chunk);
+ void ReportAbortedEvacuationCandidateDueToOOM(Address failed_start,
+ Page* page);
+ void ReportAbortedEvacuationCandidateDueToFlags(Address failed_start,
+ Page* page);
static const int kEphemeronChunkSize = 8 * KB;
@@ -730,7 +754,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void RightTrimDescriptorArray(DescriptorArray array, int descriptors_to_trim);
base::Mutex mutex_;
- base::Semaphore page_parallel_job_semaphore_;
+ base::Semaphore page_parallel_job_semaphore_{0};
#ifdef DEBUG
enum CollectorState{IDLE,
@@ -747,17 +771,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
const bool is_shared_heap_;
- bool was_marked_incrementally_;
-
- bool evacuation_;
-
+ bool was_marked_incrementally_ = false;
+ bool evacuation_ = false;
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
- bool compacting_;
-
- bool black_allocation_;
-
- bool have_code_to_deoptimize_;
+ bool compacting_ = false;
+ bool black_allocation_ = false;
+ bool have_code_to_deoptimize_ = false;
MarkingWorklists marking_worklists_;
@@ -766,6 +786,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
std::unique_ptr<MarkingVisitor> marking_visitor_;
std::unique_ptr<MarkingWorklists::Local> local_marking_worklists_;
+ std::unique_ptr<WeakObjects::Local> local_weak_objects_;
NativeContextInferrer native_context_inferrer_;
NativeContextStats native_context_stats_;
@@ -774,13 +795,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Pages that are actually processed during evacuation.
std::vector<Page*> old_space_evacuation_pages_;
std::vector<Page*> new_space_evacuation_pages_;
- std::vector<std::pair<Address, Page*>> aborted_evacuation_candidates_;
-
- Sweeper* sweeper_;
+ std::vector<std::pair<Address, Page*>>
+ aborted_evacuation_candidates_due_to_oom_;
+ std::vector<std::pair<Address, Page*>>
+ aborted_evacuation_candidates_due_to_flags_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
+ Sweeper* sweeper_;
+
// Counts the number of major mark-compact collections. The counter is
// incremented right after marking. This is used for:
// - marking descriptor arrays. See NumberOfMarkedDescriptors. Only the lower
@@ -819,6 +843,8 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
using MarkingState = MinorMarkingState;
using NonAtomicMarkingState = MinorNonAtomicMarkingState;
+ static constexpr size_t kMaxParallelTasks = 8;
+
explicit MinorMarkCompactCollector(Heap* heap);
~MinorMarkCompactCollector() override;
@@ -837,7 +863,8 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void CleanupSweepToIteratePages();
private:
- using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
+ using MarkingWorklist =
+ ::heap::base::Worklist<HeapObject, 64 /* segment size */>;
class RootMarkingVisitor;
static const int kNumMarkers = 8;
@@ -864,22 +891,26 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
Address start,
- Address end) override;
+ Address end);
std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
+ int CollectToSpaceUpdatingItems(
+ std::vector<std::unique_ptr<UpdatingItem>>* items);
+
void SweepArrayBufferExtensions();
MarkingWorklist* worklist_;
+ MarkingWorklist::Local main_thread_worklist_local_;
+
+ MarkingState marking_state_;
+ NonAtomicMarkingState non_atomic_marking_state_;
YoungGenerationMarkingVisitor* main_marking_visitor_;
base::Semaphore page_parallel_job_semaphore_;
std::vector<Page*> new_space_evacuation_pages_;
std::vector<Page*> sweep_to_iterate_pages_;
- MarkingState marking_state_;
- NonAtomicMarkingState non_atomic_marking_state_;
-
friend class YoungGenerationMarkingTask;
friend class YoungGenerationMarkingJob;
friend class YoungGenerationMarkingVisitor;
diff --git a/deps/v8/src/heap/marking-barrier-inl.h b/deps/v8/src/heap/marking-barrier-inl.h
index 03e89a68e4..656abe5883 100644
--- a/deps/v8/src/heap/marking-barrier-inl.h
+++ b/deps/v8/src/heap/marking-barrier-inl.h
@@ -28,6 +28,8 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
// visits the host object.
return false;
}
+ BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(value);
+ if (is_shared_heap_ != target_page->InSharedHeap()) return false;
if (WhiteToGreyAndPush(value)) {
if (is_main_thread_barrier_) {
incremental_marking_->RestartIfNotMarking();
diff --git a/deps/v8/src/heap/marking-barrier.cc b/deps/v8/src/heap/marking-barrier.cc
index 1b9931d2d5..6a7571af79 100644
--- a/deps/v8/src/heap/marking-barrier.cc
+++ b/deps/v8/src/heap/marking-barrier.cc
@@ -27,14 +27,18 @@ MarkingBarrier::MarkingBarrier(Heap* heap)
collector_(heap_->mark_compact_collector()),
incremental_marking_(heap_->incremental_marking()),
worklist_(collector_->marking_worklists()->shared()),
- is_main_thread_barrier_(true) {}
+ marking_state_(heap_->isolate()),
+ is_main_thread_barrier_(true),
+ is_shared_heap_(heap_->IsShared()) {}
MarkingBarrier::MarkingBarrier(LocalHeap* local_heap)
: heap_(local_heap->heap()),
collector_(heap_->mark_compact_collector()),
incremental_marking_(nullptr),
worklist_(collector_->marking_worklists()->shared()),
- is_main_thread_barrier_(false) {}
+ marking_state_(heap_->isolate()),
+ is_main_thread_barrier_(false),
+ is_shared_heap_(heap_->IsShared()) {}
MarkingBarrier::~MarkingBarrier() { DCHECK(worklist_.IsLocalEmpty()); }
@@ -156,6 +160,12 @@ void MarkingBarrier::Publish() {
worklist_.Publish();
for (auto& it : typed_slots_map_) {
MemoryChunk* memory_chunk = it.first;
+ // Access to TypeSlots need to be protected, since LocalHeaps might
+ // publish code in the background thread.
+ base::Optional<base::MutexGuard> opt_guard;
+ if (FLAG_concurrent_sparkplug) {
+ opt_guard.emplace(memory_chunk->mutex());
+ }
std::unique_ptr<TypedSlots>& typed_slots = it.second;
RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
std::move(typed_slots));
diff --git a/deps/v8/src/heap/marking-barrier.h b/deps/v8/src/heap/marking-barrier.h
index deb49a46d0..d7cc79315f 100644
--- a/deps/v8/src/heap/marking-barrier.h
+++ b/deps/v8/src/heap/marking-barrier.h
@@ -72,6 +72,7 @@ class MarkingBarrier {
bool is_compacting_ = false;
bool is_activated_ = false;
bool is_main_thread_barrier_;
+ bool is_shared_heap_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index ab84a32b1a..8f65a61dab 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -63,7 +63,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessWeakHeapObject(
// If we do not know about liveness of the value, we have to process
// the reference when we know the liveness of the whole transitive
// closure.
- weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
+ local_weak_objects_->weak_references_local.Push(std::make_pair(host, slot));
}
}
@@ -114,8 +114,8 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEmbeddedPointer(
rinfo->target_object(ObjectVisitorWithCageBases::cage_base());
if (!concrete_visitor()->marking_state()->IsBlackOrGrey(object)) {
if (host.IsWeakObject(object)) {
- weak_objects_->weak_objects_in_code.Push(task_id_,
- std::make_pair(object, host));
+ local_weak_objects_->weak_objects_in_code_local.Push(
+ std::make_pair(object, host));
} else {
MarkObject(host, object);
}
@@ -155,7 +155,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction(
int size = concrete_visitor()->VisitJSObjectSubclass(map, js_function);
if (js_function.ShouldFlushBaselineCode(code_flush_mode_)) {
DCHECK(IsBaselineCodeFlushingEnabled(code_flush_mode_));
- weak_objects_->baseline_flushing_candidates.Push(task_id_, js_function);
+ local_weak_objects_->baseline_flushing_candidates_local.Push(js_function);
} else {
VisitPointer(js_function, js_function.RawField(JSFunction::kCodeOffset));
// TODO(mythria): Consider updating the check for ShouldFlushBaselineCode to
@@ -163,7 +163,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction(
// baseline code and remove this check here.
if (IsByteCodeFlushingEnabled(code_flush_mode_) &&
js_function.NeedsResetDueToFlushedBytecode()) {
- weak_objects_->flushed_js_functions.Push(task_id_, js_function);
+ local_weak_objects_->flushed_js_functions_local.Push(js_function);
}
}
return size;
@@ -194,11 +194,11 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitSharedFunctionInfo(
VisitPointer(baseline_code,
baseline_code.RawField(
Code::kDeoptimizationDataOrInterpreterDataOffset));
- weak_objects_->code_flushing_candidates.Push(task_id_, shared_info);
+ local_weak_objects_->code_flushing_candidates_local.Push(shared_info);
} else {
// In other cases, record as a flushing candidate since we have old
// bytecode.
- weak_objects_->code_flushing_candidates.Push(task_id_, shared_info);
+ local_weak_objects_->code_flushing_candidates_local.Push(shared_info);
}
return size;
}
@@ -306,7 +306,7 @@ template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEphemeronHashTable(
Map map, EphemeronHashTable table) {
if (!concrete_visitor()->ShouldVisit(table)) return 0;
- weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
+ local_weak_objects_->ephemeron_hash_tables_local.Push(table);
for (InternalIndex i : table.IterateEntries()) {
ObjectSlot key_slot =
@@ -332,8 +332,8 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEphemeronHashTable(
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
if (concrete_visitor()->marking_state()->IsWhite(value)) {
- weak_objects_->discovered_ephemerons.Push(task_id_,
- Ephemeron{key, value});
+ local_weak_objects_->discovered_ephemerons_local.Push(
+ Ephemeron{key, value});
}
}
}
@@ -357,7 +357,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSWeakRef(
} else {
// JSWeakRef points to a potentially dead object. We have to process
// them when we know the liveness of the whole transitive closure.
- weak_objects_->js_weak_refs.Push(task_id_, weak_ref);
+ local_weak_objects_->js_weak_refs_local.Push(weak_ref);
}
}
return size;
@@ -387,7 +387,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitWeakCell(
// WeakCell points to a potentially dead object or a dead unregister
// token. We have to process them when we know the liveness of the whole
// transitive closure.
- weak_objects_->weak_cells.Push(task_id_, weak_cell);
+ local_weak_objects_->weak_cells_local.Push(weak_cell);
}
return size;
}
@@ -505,7 +505,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitTransitionArray(
this->VisitMapPointer(array);
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
- weak_objects_->transition_arrays.Push(task_id_, array);
+ local_weak_objects_->transition_arrays_local.Push(array);
return size;
}
diff --git a/deps/v8/src/heap/marking-visitor.h b/deps/v8/src/heap/marking-visitor.h
index 8be5ab065b..26ebf5713f 100644
--- a/deps/v8/src/heap/marking-visitor.h
+++ b/deps/v8/src/heap/marking-visitor.h
@@ -12,7 +12,6 @@
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
#include "src/heap/weak-object-worklists.h"
-#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
@@ -26,6 +25,23 @@ struct EphemeronMarking {
template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
public:
+ explicit MarkingStateBase(PtrComprCageBase cage_base)
+#if V8_COMPRESS_POINTERS
+ : cage_base_(cage_base)
+#endif
+ {
+ }
+
+ // The pointer compression cage base value used for decompression of all
+ // tagged values except references to Code objects.
+ V8_INLINE PtrComprCageBase cage_base() const {
+#if V8_COMPRESS_POINTERS
+ return cage_base_;
+#else
+ return PtrComprCageBase{};
+#endif // V8_COMPRESS_POINTERS
+ }
+
V8_INLINE MarkBit MarkBitFrom(HeapObject obj) {
return MarkBitFrom(BasicMemoryChunk::FromHeapObject(obj), obj.ptr());
}
@@ -73,14 +89,23 @@ class MarkingStateBase {
MarkBit markbit = MarkBitFrom(chunk, obj.address());
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
static_cast<ConcreteState*>(this)->IncrementLiveBytes(
- MemoryChunk::cast(chunk), obj.Size());
+ MemoryChunk::cast(chunk), obj.Size(cage_base()));
return true;
}
+ V8_INLINE bool GreyToBlackUnaccounted(HeapObject obj) {
+ return Marking::GreyToBlack<access_mode>(MarkBitFrom(obj));
+ }
+
void ClearLiveness(MemoryChunk* chunk) {
static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
}
+
+ private:
+#if V8_COMPRESS_POINTERS
+ const PtrComprCageBase cage_base_;
+#endif // V8_COMPRESS_POINTERS
};
// The base class for all marking visitors. It implements marking logic with
@@ -101,18 +126,17 @@ class MarkingStateBase {
template <typename ConcreteVisitor, typename MarkingState>
class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
public:
- MarkingVisitorBase(int task_id,
- MarkingWorklists::Local* local_marking_worklists,
- WeakObjects* weak_objects, Heap* heap,
- unsigned mark_compact_epoch,
+ MarkingVisitorBase(MarkingWorklists::Local* local_marking_worklists,
+ WeakObjects::Local* local_weak_objects,
+ // WeakObjects* weak_objects,
+ Heap* heap, unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
bool is_embedder_tracing_enabled,
bool should_keep_ages_unchanged)
: HeapVisitor<int, ConcreteVisitor>(heap),
local_marking_worklists_(local_marking_worklists),
- weak_objects_(weak_objects),
+ local_weak_objects_(local_weak_objects),
heap_(heap),
- task_id_(task_id),
mark_compact_epoch_(mark_compact_epoch),
code_flush_mode_(code_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
@@ -205,9 +229,8 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
MarkingWorklists::Local* const local_marking_worklists_;
- WeakObjects* const weak_objects_;
+ WeakObjects::Local* const local_weak_objects_;
Heap* const heap_;
- const int task_id_;
const unsigned mark_compact_epoch_;
const base::EnumSet<CodeFlushMode> code_flush_mode_;
const bool is_embedder_tracing_enabled_;
diff --git a/deps/v8/src/heap/memory-allocator.cc b/deps/v8/src/heap/memory-allocator.cc
index c2cff9fc66..d9552149c2 100644
--- a/deps/v8/src/heap/memory-allocator.cc
+++ b/deps/v8/src/heap/memory-allocator.cc
@@ -409,7 +409,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
MemoryChunk* chunk =
MemoryChunk::Initialize(basic_chunk, isolate_->heap(), executable);
+#ifdef DEBUG
if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
+#endif // DEBUG
return chunk;
}
@@ -458,7 +460,11 @@ void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
if (executable == EXECUTABLE) {
DCHECK_GE(size_executable_, size);
size_executable_ -= size;
+#ifdef DEBUG
UnregisterExecutableMemoryChunk(static_cast<MemoryChunk*>(chunk));
+#endif // DEBUG
+ chunk->heap()->UnregisterUnprotectedMemoryChunk(
+ static_cast<MemoryChunk*>(chunk));
}
chunk->SetFlag(MemoryChunk::UNREGISTERED);
}
@@ -578,10 +584,8 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
ReadOnlySpace* owner) {
- BasicMemoryChunk* chunk = nullptr;
- if (chunk == nullptr) {
- chunk = AllocateBasicChunk(size, size, NOT_EXECUTABLE, owner);
- }
+ BasicMemoryChunk* chunk =
+ AllocateBasicChunk(size, size, NOT_EXECUTABLE, owner);
if (chunk == nullptr) return nullptr;
return owner->InitializePage(chunk);
}
@@ -679,7 +683,7 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
PageAllocator::kNoAccess)) {
// Commit the executable code body.
if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
- PageAllocator::kReadWrite)) {
+ MemoryChunk::GetCodeModificationPermission())) {
// Create the post-code guard page.
if (vm->SetPermissions(post_guard_page, page_size,
PageAllocator::kNoAccess)) {
diff --git a/deps/v8/src/heap/memory-allocator.h b/deps/v8/src/heap/memory-allocator.h
index d405aefa53..49b5a769cf 100644
--- a/deps/v8/src/heap/memory-allocator.h
+++ b/deps/v8/src/heap/memory-allocator.h
@@ -18,7 +18,6 @@
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
#include "src/heap/code-range.h"
-#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
#include "src/tasks/cancelable-task.h"
@@ -226,11 +225,14 @@ class MemoryAllocator {
void PartialFreeMemory(BasicMemoryChunk* chunk, Address start_free,
size_t bytes_to_free, Address new_area_end);
+#ifdef DEBUG
// Checks if an allocated MemoryChunk was intended to be used for executable
// memory.
bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
+ base::MutexGuard guard(&executable_memory_mutex_);
return executable_memory_.find(chunk) != executable_memory_.end();
}
+#endif // DEBUG
// Commit memory region owned by given reservation object. Returns true if
// it succeeded and false otherwise.
@@ -311,6 +313,7 @@ class MemoryAllocator {
}
}
+#ifdef DEBUG
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
base::MutexGuard guard(&executable_memory_mutex_);
DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
@@ -322,8 +325,8 @@ class MemoryAllocator {
base::MutexGuard guard(&executable_memory_mutex_);
DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
executable_memory_.erase(chunk);
- chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
}
+#endif // DEBUG
Isolate* isolate_;
@@ -359,9 +362,12 @@ class MemoryAllocator {
VirtualMemory last_chunk_;
Unmapper unmapper_;
+#ifdef DEBUG
// Data structure to remember allocated executable memory chunks.
+ // This data structure is used only in DCHECKs.
std::unordered_set<MemoryChunk*> executable_memory_;
base::Mutex executable_memory_mutex_;
+#endif // DEBUG
friend class heap::TestCodePageAllocatorScope;
friend class heap::TestMemoryAllocatorScope;
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
index 959501724f..d4d1116683 100644
--- a/deps/v8/src/heap/memory-chunk.cc
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -93,10 +93,9 @@ void MemoryChunk::SetCodeModificationPermissions() {
// We may use RWX pages to write code. Some CPUs have optimisations to push
// updates to code to the icache through a fast path, and they may filter
// updates based on the written memory being executable.
- CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
- FLAG_write_code_using_rwx
- ? PageAllocator::kReadWriteExecute
- : PageAllocator::kReadWrite));
+ CHECK(reservation_.SetPermissions(
+ unprotect_start, unprotect_size,
+ MemoryChunk::GetCodeModificationPermission()));
}
}
@@ -390,7 +389,7 @@ void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
}
- if (!FLAG_always_promote_young_mc || slot_set_[OLD_TO_NEW] != nullptr)
+ if (slot_set_[OLD_TO_NEW] != nullptr)
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
}
diff --git a/deps/v8/src/heap/memory-chunk.h b/deps/v8/src/heap/memory-chunk.h
index 761ea9a83a..de6f09234b 100644
--- a/deps/v8/src/heap/memory-chunk.h
+++ b/deps/v8/src/heap/memory-chunk.h
@@ -189,6 +189,11 @@ class MemoryChunk : public BasicMemoryChunk {
// MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence();
+ static PageAllocator::Permission GetCodeModificationPermission() {
+ return FLAG_write_code_using_rwx ? PageAllocator::kReadWriteExecute
+ : PageAllocator::kReadWrite;
+ }
+
V8_EXPORT_PRIVATE void SetReadable();
V8_EXPORT_PRIVATE void SetReadAndExecutable();
diff --git a/deps/v8/src/heap/memory-measurement-inl.h b/deps/v8/src/heap/memory-measurement-inl.h
index f6c75b6ca6..6924bbf1b1 100644
--- a/deps/v8/src/heap/memory-measurement-inl.h
+++ b/deps/v8/src/heap/memory-measurement-inl.h
@@ -29,6 +29,7 @@ bool NativeContextInferrer::Infer(Isolate* isolate, Map map, HeapObject object,
native_context);
case kVisitJSApiObject:
case kVisitJSArrayBuffer:
+ case kVisitJSFinalizationRegistry:
case kVisitJSObject:
case kVisitJSObjectFast:
case kVisitJSTypedArray:
diff --git a/deps/v8/src/heap/memory-measurement.cc b/deps/v8/src/heap/memory-measurement.cc
index 0ef5d7550b..0aeef39910 100644
--- a/deps/v8/src/heap/memory-measurement.cc
+++ b/deps/v8/src/heap/memory-measurement.cc
@@ -338,11 +338,12 @@ std::unique_ptr<v8::MeasureMemoryDelegate> MemoryMeasurement::DefaultDelegate(
bool NativeContextInferrer::InferForContext(Isolate* isolate, Context context,
Address* native_context) {
- Map context_map = context.map(kAcquireLoad);
+ PtrComprCageBase cage_base(isolate);
+ Map context_map = context.map(cage_base, kAcquireLoad);
Object maybe_native_context =
TaggedField<Object, Map::kConstructorOrBackPointerOrNativeContextOffset>::
- Acquire_Load(isolate, context_map);
- if (maybe_native_context.IsNativeContext()) {
+ Acquire_Load(cage_base, context_map);
+ if (maybe_native_context.IsNativeContext(cage_base)) {
*native_context = maybe_native_context.ptr();
return true;
}
@@ -401,7 +402,7 @@ void NativeContextStats::IncrementExternalSize(Address context, Map map,
InstanceType instance_type = map.instance_type();
size_t external_size = 0;
if (instance_type == JS_ARRAY_BUFFER_TYPE) {
- external_size = JSArrayBuffer::cast(object).allocation_length();
+ external_size = JSArrayBuffer::cast(object).GetByteLength();
} else {
DCHECK(InstanceTypeChecker::IsExternalString(instance_type));
external_size = ExternalString::cast(object).ExternalPayloadSize();
diff --git a/deps/v8/src/heap/new-spaces-inl.h b/deps/v8/src/heap/new-spaces-inl.h
index c47c949388..72112d2426 100644
--- a/deps/v8/src/heap/new-spaces-inl.h
+++ b/deps/v8/src/heap/new-spaces-inl.h
@@ -96,7 +96,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationResult result;
- if (alignment != kWordAligned) {
+ if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin);
} else {
result = AllocateFastUnaligned(size_in_bytes, origin);
@@ -111,11 +111,11 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationOrigin origin) {
- if (!allocation_info_.CanIncrementTop(size_in_bytes)) {
+ if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
return AllocationResult::Retry(NEW_SPACE);
}
HeapObject obj =
- HeapObject::FromAddress(allocation_info_.IncrementTop(size_in_bytes));
+ HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes));
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
@@ -130,15 +130,15 @@ AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationResult NewSpace::AllocateFastAligned(
int size_in_bytes, int* result_aligned_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin) {
- Address top = allocation_info_.top();
+ Address top = allocation_info_->top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
- if (!allocation_info_.CanIncrementTop(aligned_size_in_bytes)) {
+ if (!allocation_info_->CanIncrementTop(aligned_size_in_bytes)) {
return AllocationResult::Retry(NEW_SPACE);
}
HeapObject obj = HeapObject::FromAddress(
- allocation_info_.IncrementTop(aligned_size_in_bytes));
+ allocation_info_->IncrementTop(aligned_size_in_bytes));
if (result_aligned_size_in_bytes)
*result_aligned_size_in_bytes = aligned_size_in_bytes;
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index 70cbbe1799..6155a06f77 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -387,7 +387,7 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
size_t NewSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
- BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.IsCommitted()) {
size += from_space_.CommittedPhysicalMemory();
@@ -400,8 +400,9 @@ size_t NewSpace::CommittedPhysicalMemory() {
NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity,
- size_t max_semispace_capacity)
- : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
+ size_t max_semispace_capacity,
+ LinearAllocationArea* allocation_info)
+ : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList(), allocation_info),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace) {
DCHECK(initial_semispace_capacity <= max_semispace_capacity);
@@ -416,7 +417,7 @@ NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
}
void NewSpace::TearDown() {
- allocation_info_.Reset(kNullAddress, kNullAddress);
+ allocation_info_->Reset(kNullAddress, kNullAddress);
to_space_.TearDown();
from_space_.TearDown();
@@ -468,8 +469,8 @@ void NewSpace::UpdateLinearAllocationArea(Address known_top) {
AdvanceAllocationObservers();
Address new_top = known_top == 0 ? to_space_.page_low() : known_top;
- BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.Reset(new_top, to_space_.page_high());
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
+ allocation_info_->Reset(new_top, to_space_.page_high());
// The order of the following two stores is important.
// See the corresponding loads in ConcurrentMarking::Run.
{
@@ -499,7 +500,7 @@ void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
DCHECK_LE(top(), new_limit);
DCHECK_LE(new_limit, to_space_.page_high());
- allocation_info_.SetLimit(new_limit);
+ allocation_info_->SetLimit(new_limit);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
#if DEBUG
@@ -508,7 +509,7 @@ void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
}
bool NewSpace::AddFreshPage() {
- Address top = allocation_info_.top();
+ Address top = allocation_info_->top();
DCHECK(!OldSpace::IsAtPageStart(top));
// Clear remainder of current page.
@@ -566,7 +567,7 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment) {
AdvanceAllocationObservers();
- Address old_top = allocation_info_.top();
+ Address old_top = allocation_info_->top();
Address high = to_space_.page_high();
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
@@ -584,7 +585,7 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return false;
}
- old_top = allocation_info_.top();
+ old_top = allocation_info_->top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
@@ -595,8 +596,8 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
}
void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
- if (allocation_info_.MergeIfAdjacent(info)) {
- original_top_.store(allocation_info_.top(), std::memory_order_release);
+ if (allocation_info_->MergeIfAdjacent(info)) {
+ original_top_.store(allocation_info_->top(), std::memory_order_release);
}
#if DEBUG
@@ -611,29 +612,19 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
-#ifdef V8_HOST_ARCH_32_BIT
- return alignment != kWordAligned
+ return USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
-#else
-#ifdef V8_COMPRESS_POINTERS
- // TODO(ishell, v8:8875): Consider using aligned allocations once the
- // allocation alignment inconsistency is fixed. For now we keep using
- // unaligned access since both x64 and arm64 architectures (where pointer
- // compression is supported) allow unaligned access to doubles and full words.
-#endif // V8_COMPRESS_POINTERS
- return AllocateRawUnaligned(size_in_bytes, origin);
-#endif
}
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
- if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
+ if (!EnsureAllocation(size_in_bytes, kTaggedAligned)) {
return AllocationResult::Retry(NEW_SPACE);
}
- DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
+ DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
DCHECK(!result.IsRetry());
@@ -652,7 +643,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
return AllocationResult::Retry(NEW_SPACE);
}
- DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
+ DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
int aligned_size_in_bytes;
@@ -666,18 +657,33 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
return result;
}
+void NewSpace::MakeLinearAllocationAreaIterable() {
+ Address to_top = top();
+ Page* page = Page::FromAddress(to_top - kTaggedSize);
+ if (page->Contains(to_top)) {
+ int remaining_in_page = static_cast<int>(page->area_end() - to_top);
+ heap_->CreateFillerObjectAt(to_top, remaining_in_page,
+ ClearRecordedSlots::kNo);
+ }
+}
+
+void NewSpace::FreeLinearAllocationArea() {
+ MakeLinearAllocationAreaIterable();
+ UpdateInlineAllocationLimit(0);
+}
+
void NewSpace::VerifyTop() {
// Ensure validity of LAB: start <= top <= limit
- DCHECK_LE(allocation_info_.start(), allocation_info_.top());
- DCHECK_LE(allocation_info_.top(), allocation_info_.limit());
+ DCHECK_LE(allocation_info_->start(), allocation_info_->top());
+ DCHECK_LE(allocation_info_->top(), allocation_info_->limit());
// Ensure that original_top_ always >= LAB start. The delta between start_
// and top_ is still to be processed by allocation observers.
- DCHECK_GE(original_top_, allocation_info_.start());
+ DCHECK_GE(original_top_, allocation_info_->start());
// Ensure that limit() is <= original_limit_, original_limit_ always needs
// to be end of curent to space page.
- DCHECK_LE(allocation_info_.limit(), original_limit_);
+ DCHECK_LE(allocation_info_->limit(), original_limit_);
DCHECK_EQ(original_limit_, to_space_.page_high());
}
@@ -698,6 +704,7 @@ void NewSpace::Verify(Isolate* isolate) {
external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
+ PtrComprCageBase cage_base(isolate);
while (current != top()) {
if (!Page::IsAlignedToPageSize(current)) {
// The allocation pointer should not be in the middle of an object.
@@ -708,26 +715,27 @@ void NewSpace::Verify(Isolate* isolate) {
// The first word should be a map, and we expect all map pointers to
// be in map space or read-only space.
- Map map = object.map();
- CHECK(map.IsMap());
+ Map map = object.map(cage_base);
+ CHECK(map.IsMap(cage_base));
CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// The object should not be code or a map.
- CHECK(!object.IsMap());
- CHECK(!object.IsAbstractCode());
+ CHECK(!object.IsMap(cage_base));
+ CHECK(!object.IsAbstractCode(cage_base));
// The object itself should look OK.
object.ObjectVerify(isolate);
// All the interior pointers should be contained in the heap.
VerifyPointersVisitor visitor(heap());
- int size = object.Size();
+ int size = object.Size(cage_base);
object.IterateBody(map, size, &visitor);
- if (object.IsExternalString()) {
+ if (object.IsExternalString(cage_base)) {
ExternalString external_string = ExternalString::cast(object);
- size_t size = external_string.ExternalPayloadSize();
- external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
+ size_t string_size = external_string.ExternalPayloadSize();
+ external_space_bytes[ExternalBackingStoreType::kExternalString] +=
+ string_size;
}
current += size;
diff --git a/deps/v8/src/heap/new-spaces.h b/deps/v8/src/heap/new-spaces.h
index 45129acea1..b1bec1b032 100644
--- a/deps/v8/src/heap/new-spaces.h
+++ b/deps/v8/src/heap/new-spaces.h
@@ -233,7 +233,8 @@ class V8_EXPORT_PRIVATE NewSpace
using const_iterator = ConstPageIterator;
NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
- size_t initial_semispace_capacity, size_t max_semispace_capacity);
+ size_t initial_semispace_capacity, size_t max_semispace_capacity,
+ LinearAllocationArea* allocation_info);
~NewSpace() override { TearDown(); }
@@ -393,6 +394,10 @@ class V8_EXPORT_PRIVATE NewSpace
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
+ V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
// Reset the allocation pointer to the beginning of the active semispace.
void ResetLinearAllocationArea();
@@ -469,6 +474,12 @@ class V8_EXPORT_PRIVATE NewSpace
return &pending_allocation_mutex_;
}
+ // Creates a filler object in the linear allocation area.
+ void MakeLinearAllocationAreaIterable();
+
+ // Creates a filler object in the linear allocation area and closes it.
+ void FreeLinearAllocationArea();
+
private:
static const int kAllocationBufferParkingThreshold = 4 * KB;
@@ -505,10 +516,6 @@ class V8_EXPORT_PRIVATE NewSpace
AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
- V8_WARN_UNUSED_RESULT AllocationResult
- AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
@@ -521,9 +528,9 @@ class V8_EXPORT_PRIVATE NewSpace
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
- SLOW_DCHECK((space).page_low() <= (info).top() && \
- (info).top() <= (space).page_high() && \
- (info).limit() <= (space).page_high())
+ SLOW_DCHECK((space).page_low() <= (info)->top() && \
+ (info)->top() <= (space).page_high() && \
+ (info)->limit() <= (space).page_high())
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 294bff0e1a..379356a797 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -23,6 +23,7 @@
#include "src/objects/literal-objects-inl.h"
#include "src/objects/slots.h"
#include "src/objects/templates.h"
+#include "src/objects/visitors.h"
#include "src/utils/memcopy.h"
#include "src/utils/ostreams.h"
@@ -31,14 +32,15 @@ namespace internal {
static base::LazyMutex object_stats_mutex = LAZY_MUTEX_INITIALIZER;
-class FieldStatsCollector : public ObjectVisitor {
+class FieldStatsCollector : public ObjectVisitorWithCageBases {
public:
- FieldStatsCollector(size_t* tagged_fields_count,
+ FieldStatsCollector(Heap* heap, size_t* tagged_fields_count,
size_t* embedder_fields_count,
size_t* inobject_smi_fields_count,
size_t* boxed_double_fields_count,
size_t* string_data_count, size_t* raw_fields_count)
- : tagged_fields_count_(tagged_fields_count),
+ : ObjectVisitorWithCageBases(heap),
+ tagged_fields_count_(tagged_fields_count),
embedder_fields_count_(embedder_fields_count),
inobject_smi_fields_count_(inobject_smi_fields_count),
boxed_double_fields_count_(boxed_double_fields_count),
@@ -47,16 +49,16 @@ class FieldStatsCollector : public ObjectVisitor {
void RecordStats(HeapObject host) {
size_t old_pointer_fields_count = *tagged_fields_count_;
- host.Iterate(this);
+ host.Iterate(cage_base(), this);
size_t tagged_fields_count_in_object =
*tagged_fields_count_ - old_pointer_fields_count;
- int object_size_in_words = host.Size() / kTaggedSize;
+ int object_size_in_words = host.Size(cage_base()) / kTaggedSize;
DCHECK_LE(tagged_fields_count_in_object, object_size_in_words);
size_t raw_fields_count_in_object =
object_size_in_words - tagged_fields_count_in_object;
- if (host.IsJSObject()) {
+ if (host.IsJSObject(cage_base())) {
JSObjectFieldStats field_stats = GetInobjectFieldStats(host.map());
// Embedder fields are already included into pointer words.
DCHECK_LE(field_stats.embedded_fields_count_,
@@ -69,11 +71,11 @@ class FieldStatsCollector : public ObjectVisitor {
tagged_fields_count_in_object -= field_stats.smi_fields_count_;
*tagged_fields_count_ -= field_stats.smi_fields_count_;
*inobject_smi_fields_count_ += field_stats.smi_fields_count_;
- } else if (host.IsHeapNumber()) {
+ } else if (host.IsHeapNumber(cage_base())) {
DCHECK_LE(kDoubleSize / kTaggedSize, raw_fields_count_in_object);
raw_fields_count_in_object -= kDoubleSize / kTaggedSize;
*boxed_double_fields_count_ += 1;
- } else if (host.IsSeqString()) {
+ } else if (host.IsSeqString(cage_base())) {
int string_data = SeqString::cast(host).length(kAcquireLoad) *
(String::cast(host).IsOneByteRepresentation() ? 1 : 2) /
kTaggedSize;
@@ -456,7 +458,7 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()),
field_stats_collector_(
- &stats->tagged_fields_count_, &stats->embedder_fields_count_,
+ heap_, &stats->tagged_fields_count_, &stats->embedder_fields_count_,
&stats->inobject_smi_fields_count_,
&stats->boxed_double_fields_count_, &stats->string_data_count_,
&stats->raw_fields_count_) {}
@@ -1053,8 +1055,11 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code code) {
void ObjectStatsCollectorImpl::RecordVirtualContext(Context context) {
if (context.IsNativeContext()) {
RecordObjectStats(context, NATIVE_CONTEXT_TYPE, context.Size());
- RecordSimpleVirtualObjectStats(context, context.retained_maps(),
- ObjectStats::RETAINED_MAPS_TYPE);
+ if (context.retained_maps().IsWeakArrayList()) {
+ RecordSimpleVirtualObjectStats(
+ context, WeakArrayList::cast(context.retained_maps()),
+ ObjectStats::RETAINED_MAPS_TYPE);
+ }
} else if (context.IsFunctionContext()) {
RecordObjectStats(context, FUNCTION_CONTEXT_TYPE, context.Size());
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index cd85ef715c..858e279ec4 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -30,6 +30,7 @@ namespace internal {
V(FixedDoubleArray) \
V(JSArrayBuffer) \
V(JSDataView) \
+ V(JSFinalizationRegistry) \
V(JSFunction) \
V(JSObject) \
V(JSTypedArray) \
@@ -50,15 +51,17 @@ namespace internal {
V(Symbol) \
V(SyntheticModule) \
V(TransitionArray) \
+ IF_WASM(V, WasmApiFunctionRef) \
IF_WASM(V, WasmArray) \
IF_WASM(V, WasmCapiFunctionData) \
IF_WASM(V, WasmExportedFunctionData) \
IF_WASM(V, WasmFunctionData) \
IF_WASM(V, WasmIndirectFunctionTable) \
IF_WASM(V, WasmInstanceObject) \
+ IF_WASM(V, WasmInternalFunction) \
IF_WASM(V, WasmJSFunctionData) \
- IF_WASM(V, WasmApiFunctionRef) \
IF_WASM(V, WasmStruct) \
+ IF_WASM(V, WasmSuspenderObject) \
IF_WASM(V, WasmTypeInfo)
#define FORWARD_DECLARE(TypeName) class TypeName;
diff --git a/deps/v8/src/heap/paged-spaces-inl.h b/deps/v8/src/heap/paged-spaces-inl.h
index d59fd461e0..22b07c7442 100644
--- a/deps/v8/src/heap/paged-spaces-inl.h
+++ b/deps/v8/src/heap/paged-spaces-inl.h
@@ -29,7 +29,7 @@ HeapObject PagedSpaceObjectIterator::Next() {
HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
HeapObject obj = HeapObject::FromAddress(cur_addr_);
- const int obj_size = obj.Size();
+ const int obj_size = obj.Size(cage_base());
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
if (!obj.IsFreeSpaceOrFiller(cage_base())) {
@@ -79,38 +79,39 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
}
bool PagedSpace::TryFreeLast(Address object_address, int object_size) {
- if (allocation_info_.top() != kNullAddress) {
- return allocation_info_.DecrementTopIfAdjacent(object_address, object_size);
+ if (allocation_info_->top() != kNullAddress) {
+ return allocation_info_->DecrementTopIfAdjacent(object_address,
+ object_size);
}
return false;
}
bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
- if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
+ if (allocation_info_->top() + size_in_bytes <= allocation_info_->limit()) {
return true;
}
return RefillLabMain(size_in_bytes, origin);
}
AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) {
- if (!allocation_info_.CanIncrementTop(size_in_bytes)) {
+ if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
return AllocationResult::Retry(identity());
}
return AllocationResult(
- HeapObject::FromAddress(allocation_info_.IncrementTop(size_in_bytes)));
+ HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes)));
}
AllocationResult PagedSpace::AllocateFastAligned(
int size_in_bytes, int* aligned_size_in_bytes,
AllocationAlignment alignment) {
- Address current_top = allocation_info_.top();
+ Address current_top = allocation_info_->top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
int aligned_size = filler_size + size_in_bytes;
- if (!allocation_info_.CanIncrementTop(aligned_size)) {
+ if (!allocation_info_->CanIncrementTop(aligned_size)) {
return AllocationResult::Retry(identity());
}
HeapObject obj =
- HeapObject::FromAddress(allocation_info_.IncrementTop(aligned_size));
+ HeapObject::FromAddress(allocation_info_->IncrementTop(aligned_size));
if (aligned_size_in_bytes) *aligned_size_in_bytes = aligned_size;
if (filler_size > 0) {
obj = heap()->PrecedeWithFiller(obj, filler_size);
@@ -176,7 +177,7 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result;
- if (alignment != kWordAligned) {
+ if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment);
} else {
result = AllocateFastUnaligned(size_in_bytes);
diff --git a/deps/v8/src/heap/paged-spaces.cc b/deps/v8/src/heap/paged-spaces.cc
index c8feac3e65..0db2d5f989 100644
--- a/deps/v8/src/heap/paged-spaces.cc
+++ b/deps/v8/src/heap/paged-spaces.cc
@@ -37,8 +37,8 @@ PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
cage_base_(heap->isolate())
#endif // V8_COMPRESS_POINTERS
{
- space_->MakeLinearAllocationAreaIterable();
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->MakeHeapIterable();
+ USE(space_);
}
PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
@@ -54,8 +54,7 @@ PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
cage_base_(heap->isolate())
#endif // V8_COMPRESS_POINTERS
{
- space_->MakeLinearAllocationAreaIterable();
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->MakeHeapIterable();
#ifdef DEBUG
AllocationSpace owner = page->owner_identity();
DCHECK(owner == OLD_SPACE || owner == MAP_SPACE || owner == CODE_SPACE);
@@ -91,8 +90,9 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable, FreeList* free_list,
+ LinearAllocationArea* allocation_info_,
CompactionSpaceKind compaction_space_kind)
- : SpaceWithLinearArea(heap, space, free_list),
+ : SpaceWithLinearArea(heap, space, free_list, allocation_info_),
executable_(executable),
compaction_space_kind_(compaction_space_kind) {
area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
@@ -212,7 +212,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
size_t PagedSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
- BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
base::MutexGuard guard(mutex());
size_t size = 0;
for (Page* page : *this) {
@@ -283,8 +283,8 @@ void PagedSpace::RemovePage(Page* page) {
void PagedSpace::SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
- BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.Reset(top, limit);
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
+ allocation_info_->Reset(top, limit);
base::Optional<base::SharedMutexGuard<base::kExclusive>> optional_guard;
if (!is_compaction_space())
@@ -309,7 +309,7 @@ void PagedSpace::ResetFreeList() {
void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
- BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
FreeLinearAllocationArea();
ResetFreeList();
for (Page* page : *this) {
@@ -483,7 +483,7 @@ void PagedSpace::ReleasePage(Page* page) {
free_list_->EvictFreeListItems(page);
- if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
+ if (Page::FromAllocationAreaAddress(allocation_info_->top()) == page) {
SetTopAndLimit(kNullAddress, kNullAddress);
}
@@ -499,7 +499,7 @@ void PagedSpace::ReleasePage(Page* page) {
void PagedSpace::SetReadable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
+ DCHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadable();
}
}
@@ -507,7 +507,7 @@ void PagedSpace::SetReadable() {
void PagedSpace::SetReadAndExecutable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
+ DCHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndExecutable();
}
}
@@ -515,7 +515,7 @@ void PagedSpace::SetReadAndExecutable() {
void PagedSpace::SetCodeModificationPermissions() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
+ DCHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetCodeModificationPermissions();
}
}
@@ -559,7 +559,7 @@ bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page);
- DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
+ DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
Address start = new_node.address();
Address end = new_node.address() + new_node_size;
Address limit = ComputeLimit(start, end, size_in_bytes);
@@ -603,23 +603,26 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
- // Now contribute to sweeping from background thread and then try to
- // reallocate.
- Sweeper::FreeSpaceMayContainInvalidatedSlots
- invalidated_slots_in_free_space =
- Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
-
- const int kMaxPagesToSweep = 1;
- int max_freed = collector->sweeper()->ParallelSweepSpace(
- identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
- invalidated_slots_in_free_space);
-
- RefillFreeList();
-
- if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
- result = TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
- if (result) return result;
+ if (IsSweepingAllowedOnThread(local_heap)) {
+ // Now contribute to sweeping from background thread and then try to
+ // reallocate.
+ Sweeper::FreeSpaceMayContainInvalidatedSlots
+ invalidated_slots_in_free_space =
+ Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
+
+ const int kMaxPagesToSweep = 1;
+ int max_freed = collector->sweeper()->ParallelSweepSpace(
+ identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
+ invalidated_slots_in_free_space);
+
+ RefillFreeList();
+
+ if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
+ result = TryAllocationFromFreeListBackground(
+ local_heap, min_size_in_bytes, max_size_in_bytes, alignment,
+ origin);
+ if (result) return result;
+ }
}
}
@@ -634,7 +637,9 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (collector->sweeping_in_progress()) {
// Complete sweeping for this space.
- collector->DrainSweepingWorklistForSpace(identity());
+ if (IsSweepingAllowedOnThread(local_heap)) {
+ collector->DrainSweepingWorklistForSpace(identity());
+ }
RefillFreeList();
@@ -683,12 +688,21 @@ PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
DCHECK_LE(limit, end);
DCHECK_LE(min_size_in_bytes, limit - start);
if (limit != end) {
+ if (identity() == CODE_SPACE) {
+ heap()->UnprotectAndRegisterMemoryChunk(
+ page, UnprotectMemoryOrigin::kMaybeOffMainThread);
+ }
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
return std::make_pair(start, used_size_in_bytes);
}
+bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) {
+ // Code space sweeping is only allowed on main thread.
+ return local_heap->is_main_thread() || identity() != CODE_SPACE;
+}
+
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
@@ -696,7 +710,7 @@ void PagedSpace::Print() {}
#ifdef VERIFY_HEAP
void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
bool allocation_pointer_found_in_space =
- (allocation_info_.top() == allocation_info_.limit());
+ (allocation_info_->top() == allocation_info_->limit());
size_t external_space_bytes[kNumTypes];
size_t external_page_bytes[kNumTypes];
@@ -712,7 +726,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
- if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
+ if (page == Page::FromAllocationAreaAddress(allocation_info_->top())) {
allocation_pointer_found_in_space = true;
}
CHECK(page->SweepingDone());
@@ -725,8 +739,8 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
// The first word should be a map, and we expect all map pointers to
// be in map space.
- Map map = object.map();
- CHECK(map.IsMap());
+ Map map = object.map(cage_base);
+ CHECK(map.IsMap(cage_base));
CHECK(ReadOnlyHeap::Contains(map) ||
isolate->heap()->map_space()->Contains(map));
@@ -741,7 +755,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
}
// All the interior pointers should be contained in the heap.
- int size = object.Size();
+ int size = object.Size(cage_base);
object.IterateBody(map, size, visitor);
CHECK(object.address() + size <= top);
end_of_previous_object = object.address() + size;
@@ -780,6 +794,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
void PagedSpace::VerifyLiveBytes() {
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
+ PtrComprCageBase cage_base(heap()->isolate());
for (Page* page : *this) {
CHECK(page->SweepingDone());
PagedSpaceObjectIterator it(heap(), this, page);
@@ -787,7 +802,7 @@ void PagedSpace::VerifyLiveBytes() {
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
// All the interior pointers should be contained in the heap.
if (marking_state->IsBlack(object)) {
- black_size += object.Size();
+ black_size += object.Size(cage_base);
}
}
CHECK_LE(black_size, marking_state->live_bytes(page));
@@ -799,6 +814,7 @@ void PagedSpace::VerifyLiveBytes() {
void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
size_t total_capacity = 0;
size_t total_allocated = 0;
+ PtrComprCageBase cage_base(heap->isolate());
for (Page* page : *this) {
DCHECK(page->SweepingDone());
total_capacity += page->area_size();
@@ -806,7 +822,7 @@ void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
size_t real_allocated = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
if (!object.IsFreeSpaceOrFiller()) {
- real_allocated += object.Size();
+ real_allocated += object.Size(cage_base);
}
}
total_allocated += page->allocated_bytes();
@@ -845,7 +861,7 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
// Ensure there are no unaccounted allocations.
- DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
+ DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
Address new_limit = ComputeLimit(top(), limit(), min_size);
DCHECK_LE(top(), new_limit);
@@ -857,10 +873,6 @@ void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
// OldSpace implementation
void PagedSpace::PrepareForMarkCompact() {
- // We don't have a linear allocation area while sweeping. It will be restored
- // on the first allocation after the sweep.
- FreeLinearAllocationArea();
-
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_->Reset();
}
@@ -984,14 +996,10 @@ AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
kGCCallbackScheduleIdleGarbageCollection);
}
-#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result =
- alignment != kWordAligned
+ USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
-#else
- AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
-#endif
return result;
}
diff --git a/deps/v8/src/heap/paged-spaces.h b/deps/v8/src/heap/paged-spaces.h
index fd101446d6..bdc4dee23f 100644
--- a/deps/v8/src/heap/paged-spaces.h
+++ b/deps/v8/src/heap/paged-spaces.h
@@ -86,7 +86,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// Creates a space with an id.
PagedSpace(
Heap* heap, AllocationSpace id, Executability executable,
- FreeList* free_list,
+ FreeList* free_list, LinearAllocationArea* allocation_info_,
CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone);
~PagedSpace() override { TearDown(); }
@@ -357,6 +357,10 @@ class V8_EXPORT_PRIVATE PagedSpace
bool HasPages() { return first_page() != nullptr; }
+ // Returns whether sweeping of this space is safe on this thread. Code space
+ // sweeping is only allowed on the main thread.
+ bool IsSweepingAllowedOnThread(LocalHeap* local_heap);
+
// Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk.
void TearDown();
@@ -453,12 +457,15 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
CompactionSpaceKind compaction_space_kind)
: PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
- compaction_space_kind) {
+ &allocation_info_, compaction_space_kind) {
DCHECK(is_compaction_space());
}
const std::vector<Page*>& GetNewPages() { return new_pages_; }
+ private:
+ LinearAllocationArea allocation_info_;
+
protected:
V8_WARN_UNUSED_RESULT bool RefillLabMain(int size_in_bytes,
AllocationOrigin origin) override;
@@ -505,9 +512,9 @@ class OldSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
- explicit OldSpace(Heap* heap)
- : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
- FreeList::CreateFreeList()) {}
+ explicit OldSpace(Heap* heap, LinearAllocationArea* allocation_info)
+ : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
+ allocation_info) {}
static bool IsAtPageStart(Address addr) {
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
@@ -529,7 +536,11 @@ class CodeSpace : public PagedSpace {
// Creates an old space object. The constructor does not allocate pages
// from OS.
explicit CodeSpace(Heap* heap)
- : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
+ : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList(),
+ &paged_allocation_info_) {}
+
+ private:
+ LinearAllocationArea paged_allocation_info_;
};
// -----------------------------------------------------------------------------
@@ -539,8 +550,8 @@ class MapSpace : public PagedSpace {
public:
// Creates a map space object.
explicit MapSpace(Heap* heap)
- : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE,
- FreeList::CreateFreeList()) {}
+ : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
+ &paged_allocation_info_) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
@@ -555,6 +566,9 @@ class MapSpace : public PagedSpace {
#ifdef VERIFY_HEAP
void VerifyObject(HeapObject obj) override;
#endif
+
+ private:
+ LinearAllocationArea paged_allocation_info_;
};
// Iterates over the chunks (pages and large object pages) that can contain
diff --git a/deps/v8/src/heap/parked-scope.h b/deps/v8/src/heap/parked-scope.h
index c7bfa38ce1..76d863215e 100644
--- a/deps/v8/src/heap/parked-scope.h
+++ b/deps/v8/src/heap/parked-scope.h
@@ -44,6 +44,8 @@ class V8_NODISCARD UnparkedScope {
LocalHeap* const local_heap_;
};
+// Scope that automatically parks the thread while blocking on the given
+// base::Mutex.
class V8_NODISCARD ParkedMutexGuard {
public:
explicit ParkedMutexGuard(LocalIsolate* local_isolate, base::Mutex* mutex)
diff --git a/deps/v8/src/heap/read-only-spaces.cc b/deps/v8/src/heap/read-only-spaces.cc
index d88432bbbc..3fa267d26c 100644
--- a/deps/v8/src/heap/read-only-spaces.cc
+++ b/deps/v8/src/heap/read-only-spaces.cc
@@ -692,13 +692,10 @@ AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
-#ifdef V8_HOST_ARCH_32_BIT
- AllocationResult result = alignment != kWordAligned
- ? AllocateRawAligned(size_in_bytes, alignment)
- : AllocateRawUnaligned(size_in_bytes);
-#else
- AllocationResult result = AllocateRawUnaligned(size_in_bytes);
-#endif
+ AllocationResult result =
+ USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
+ ? AllocateRawAligned(size_in_bytes, alignment)
+ : AllocateRawUnaligned(size_in_bytes);
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj)) {
DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 5eefec989c..13a6fedf47 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -11,12 +11,12 @@
#include "src/base/memory.h"
#include "src/codegen/reloc-info.h"
#include "src/common/globals.h"
+#include "src/heap/base/worklist.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
-#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
@@ -180,7 +180,7 @@ class RememberedSet : public AllStatic {
template <typename Callback>
static int IterateAndTrackEmptyBuckets(
MemoryChunk* chunk, Callback callback,
- Worklist<MemoryChunk*, 64>::View empty_chunks) {
+ ::heap::base::Worklist<MemoryChunk*, 64>::Local* empty_chunks) {
SlotSet* slot_set = chunk->slot_set<type>();
int slots = 0;
if (slot_set != nullptr) {
@@ -189,7 +189,7 @@ class RememberedSet : public AllStatic {
slots += slot_set->IterateAndTrackEmptyBuckets(chunk->address(), 0,
chunk->buckets(), callback,
possibly_empty_buckets);
- if (!possibly_empty_buckets->IsEmpty()) empty_chunks.Push(chunk);
+ if (!possibly_empty_buckets->IsEmpty()) empty_chunks->Push(chunk);
}
return slots;
}
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index bf3e5eaf95..bd4c610004 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -7,6 +7,10 @@
#include <atomic>
#include "src/base/logging.h"
+#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/handles/local-handles.h"
#include "src/handles/persistent-handles.h"
@@ -14,7 +18,9 @@
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/local-heap.h"
+#include "src/heap/parked-scope.h"
#include "src/logging/counters-scopes.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -22,22 +28,80 @@ namespace internal {
IsolateSafepoint::IsolateSafepoint(Heap* heap)
: heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
-void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
- // Safepoints need to be initiated on the main thread.
- DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+void IsolateSafepoint::EnterLocalSafepointScope() {
+ // Safepoints need to be initiated on some main thread.
DCHECK_NULL(LocalHeap::Current());
+ DCHECK(AllowGarbageCollection::IsAllowed());
+ LockMutex(heap_->isolate()->main_thread_local_heap());
if (++active_safepoint_scopes_ > 1) return;
+ // Local safepoint can only be initiated on the isolate's main thread.
+ DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+
TimedHistogramScope timer(
heap_->isolate()->counters()->gc_time_to_safepoint());
TRACE_GC(heap_->tracer(), GCTracer::Scope::TIME_TO_SAFEPOINT);
- local_heaps_mutex_.Lock();
+ barrier_.Arm();
+ size_t running = SetSafepointRequestedFlags(IncludeMainThread::kNo);
+ barrier_.WaitUntilRunningThreadsInSafepoint(running);
+}
+
+class PerClientSafepointData final {
+ public:
+ explicit PerClientSafepointData(Isolate* isolate) : isolate_(isolate) {}
+
+ void set_locked_and_running(size_t running) {
+ locked_ = true;
+ running_ = running;
+ }
+
+ IsolateSafepoint* safepoint() const { return heap()->safepoint(); }
+ Heap* heap() const { return isolate_->heap(); }
+ Isolate* isolate() const { return isolate_; }
+
+ bool is_locked() const { return locked_; }
+ size_t running() const { return running_; }
+
+ private:
+ Isolate* const isolate_;
+ size_t running_ = 0;
+ bool locked_ = false;
+};
+void IsolateSafepoint::InitiateGlobalSafepointScope(
+ Isolate* initiator, PerClientSafepointData* client_data) {
+ IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
+ LockMutex(initiator->main_thread_local_heap());
+ InitiateGlobalSafepointScopeRaw(initiator, client_data);
+}
+
+void IsolateSafepoint::TryInitiateGlobalSafepointScope(
+ Isolate* initiator, PerClientSafepointData* client_data) {
+ if (!local_heaps_mutex_.TryLock()) return;
+ InitiateGlobalSafepointScopeRaw(initiator, client_data);
+}
+
+void IsolateSafepoint::InitiateGlobalSafepointScopeRaw(
+ Isolate* initiator, PerClientSafepointData* client_data) {
+ CHECK_EQ(++active_safepoint_scopes_, 1);
barrier_.Arm();
- int running = 0;
+ size_t running =
+ SetSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
+ client_data->set_locked_and_running(running);
+}
+
+IsolateSafepoint::IncludeMainThread
+IsolateSafepoint::IncludeMainThreadUnlessInitiator(Isolate* initiator) {
+ const bool is_initiator = heap_->isolate() == initiator;
+ return is_initiator ? IncludeMainThread::kNo : IncludeMainThread::kYes;
+}
+
+size_t IsolateSafepoint::SetSafepointRequestedFlags(
+ IncludeMainThread include_main_thread) {
+ size_t running = 0;
// There needs to be at least one LocalHeap for the main thread.
DCHECK_NOT_NULL(local_heaps_head_);
@@ -45,7 +109,7 @@ void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
local_heap = local_heap->next_) {
if (local_heap->is_main_thread() &&
- stop_main_thread == StopMainThread::kNo) {
+ include_main_thread == IncludeMainThread::kNo) {
continue;
}
@@ -58,21 +122,42 @@ void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
CHECK(!old_state.IsSafepointRequested());
}
- barrier_.WaitUntilRunningThreadsInSafepoint(running);
+ return running;
}
-void IsolateSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) {
- // Safepoints need to be initiated on the main thread.
- DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
- DCHECK_NULL(LocalHeap::Current());
+void IsolateSafepoint::LockMutex(LocalHeap* local_heap) {
+ if (!local_heaps_mutex_.TryLock()) {
+ ParkedScope parked_scope(local_heap);
+ local_heaps_mutex_.Lock();
+ }
+}
+
+void IsolateSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
+ local_heaps_mutex_.AssertHeld();
+ CHECK_EQ(--active_safepoint_scopes_, 0);
+ ClearSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
+ barrier_.Disarm();
+ local_heaps_mutex_.Unlock();
+}
+void IsolateSafepoint::LeaveLocalSafepointScope() {
+ local_heaps_mutex_.AssertHeld();
DCHECK_GT(active_safepoint_scopes_, 0);
- if (--active_safepoint_scopes_ > 0) return;
+ if (--active_safepoint_scopes_ == 0) {
+ ClearSafepointRequestedFlags(IncludeMainThread::kNo);
+ barrier_.Disarm();
+ }
+
+ local_heaps_mutex_.Unlock();
+}
+
+void IsolateSafepoint::ClearSafepointRequestedFlags(
+ IncludeMainThread include_main_thread) {
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
local_heap = local_heap->next_) {
if (local_heap->is_main_thread() &&
- stop_main_thread == StopMainThread::kNo) {
+ include_main_thread == IncludeMainThread::kNo) {
continue;
}
@@ -84,10 +169,6 @@ void IsolateSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) {
CHECK_IMPLIES(old_state.IsCollectionRequested(),
local_heap->is_main_thread());
}
-
- barrier_.Disarm();
-
- local_heaps_mutex_.Unlock();
}
void IsolateSafepoint::WaitInSafepoint() { barrier_.WaitInSafepoint(); }
@@ -96,6 +177,11 @@ void IsolateSafepoint::WaitInUnpark() { barrier_.WaitInUnpark(); }
void IsolateSafepoint::NotifyPark() { barrier_.NotifyPark(); }
+void IsolateSafepoint::WaitUntilRunningThreadsInSafepoint(
+ const PerClientSafepointData* client_data) {
+ barrier_.WaitUntilRunningThreadsInSafepoint(client_data->running());
+}
+
void IsolateSafepoint::Barrier::Arm() {
base::MutexGuard guard(&mutex_);
DCHECK(!IsArmed());
@@ -112,7 +198,7 @@ void IsolateSafepoint::Barrier::Disarm() {
}
void IsolateSafepoint::Barrier::WaitUntilRunningThreadsInSafepoint(
- int running) {
+ size_t running) {
base::MutexGuard guard(&mutex_);
DCHECK(IsArmed());
while (stopped_ < running) {
@@ -147,16 +233,8 @@ void IsolateSafepoint::Barrier::WaitInUnpark() {
}
}
-SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
- safepoint_->EnterSafepointScope(IsolateSafepoint::StopMainThread::kNo);
-}
-
-SafepointScope::~SafepointScope() {
- safepoint_->LeaveSafepointScope(IsolateSafepoint::StopMainThread::kNo);
-}
-
bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
- base::MutexGuard guard(&local_heaps_mutex_);
+ base::RecursiveMutexGuard guard(&local_heaps_mutex_);
LocalHeap* current = local_heaps_head_;
while (current) {
@@ -168,7 +246,7 @@ bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
}
bool IsolateSafepoint::ContainsAnyLocalHeap() {
- base::MutexGuard guard(&local_heaps_mutex_);
+ base::RecursiveMutexGuard guard(&local_heaps_mutex_);
return local_heaps_head_ != nullptr;
}
@@ -180,5 +258,138 @@ void IsolateSafepoint::Iterate(RootVisitor* visitor) {
}
}
+void IsolateSafepoint::AssertMainThreadIsOnlyThread() {
+ DCHECK_EQ(local_heaps_head_, heap_->main_thread_local_heap());
+ DCHECK_NULL(heap_->main_thread_local_heap()->next_);
+}
+
+SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
+ safepoint_->EnterLocalSafepointScope();
+}
+
+SafepointScope::~SafepointScope() { safepoint_->LeaveLocalSafepointScope(); }
+
+GlobalSafepoint::GlobalSafepoint(Isolate* isolate)
+ : shared_isolate_(isolate), shared_heap_(isolate->heap()) {}
+
+void GlobalSafepoint::AppendClient(Isolate* client) {
+ clients_mutex_.AssertHeld();
+
+ DCHECK_NULL(client->global_safepoint_prev_client_isolate_);
+ DCHECK_NULL(client->global_safepoint_next_client_isolate_);
+ DCHECK_NE(clients_head_, client);
+
+ if (clients_head_) {
+ clients_head_->global_safepoint_prev_client_isolate_ = client;
+ }
+
+ client->global_safepoint_prev_client_isolate_ = nullptr;
+ client->global_safepoint_next_client_isolate_ = clients_head_;
+
+ clients_head_ = client;
+ client->shared_isolate_ = shared_isolate_;
+}
+
+void GlobalSafepoint::RemoveClient(Isolate* client) {
+ DCHECK_EQ(client->heap()->gc_state(), Heap::TEAR_DOWN);
+
+ // A shared heap may have already acquired the client mutex to perform a
+ // shared GC. We need to park the Isolate here to allow for a shared GC.
+ IgnoreLocalGCRequests ignore_gc_requests(client->heap());
+ ParkedMutexGuard guard(client->main_thread_local_heap(), &clients_mutex_);
+
+ if (client->global_safepoint_next_client_isolate_) {
+ client->global_safepoint_next_client_isolate_
+ ->global_safepoint_prev_client_isolate_ =
+ client->global_safepoint_prev_client_isolate_;
+ }
+
+ if (client->global_safepoint_prev_client_isolate_) {
+ client->global_safepoint_prev_client_isolate_
+ ->global_safepoint_next_client_isolate_ =
+ client->global_safepoint_next_client_isolate_;
+ } else {
+ DCHECK_EQ(clients_head_, client);
+ clients_head_ = client->global_safepoint_next_client_isolate_;
+ }
+
+ client->shared_isolate_ = nullptr;
+}
+
+void GlobalSafepoint::AssertNoClients() { DCHECK_NULL(clients_head_); }
+
+void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
+ // Safepoints need to be initiated on some main thread.
+ DCHECK_NULL(LocalHeap::Current());
+
+ if (!clients_mutex_.TryLock()) {
+ IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
+ ParkedScope parked_scope(initiator->main_thread_local_heap());
+ clients_mutex_.Lock();
+ }
+
+ TimedHistogramScope timer(
+ initiator->counters()->gc_time_to_global_safepoint());
+ TRACE_GC(initiator->heap()->tracer(),
+ GCTracer::Scope::TIME_TO_GLOBAL_SAFEPOINT);
+
+ std::vector<PerClientSafepointData> clients;
+
+ // Try to initiate safepoint for all clients. Fail immediately when the
+ // local_heaps_mutex_ can't be locked without blocking.
+ IterateClientIsolates([&clients, initiator](Isolate* client) {
+ clients.emplace_back(client);
+ client->heap()->safepoint()->TryInitiateGlobalSafepointScope(
+ initiator, &clients.back());
+ });
+
+ // Iterate all clients again to initiate the safepoint for all of them - even
+ // if that means blocking.
+ for (PerClientSafepointData& client : clients) {
+ if (client.is_locked()) continue;
+ client.safepoint()->InitiateGlobalSafepointScope(initiator, &client);
+ }
+
+#if DEBUG
+ for (const PerClientSafepointData& client : clients) {
+ DCHECK_EQ(client.isolate()->shared_isolate(), shared_isolate_);
+ DCHECK(client.heap()->deserialization_complete());
+ }
+#endif // DEBUG
+
+ // Now that safepoints were initiated for all clients, wait until all threads
+ // of all clients reached a safepoint.
+ for (const PerClientSafepointData& client : clients) {
+ DCHECK(client.is_locked());
+ client.safepoint()->WaitUntilRunningThreadsInSafepoint(&client);
+ }
+}
+
+void GlobalSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
+ IterateClientIsolates([initiator](Isolate* client) {
+ Heap* client_heap = client->heap();
+ client_heap->safepoint()->LeaveGlobalSafepointScope(initiator);
+ });
+
+ clients_mutex_.Unlock();
+}
+
+GlobalSafepointScope::GlobalSafepointScope(Isolate* initiator)
+ : initiator_(initiator), shared_isolate_(initiator->shared_isolate()) {
+ if (shared_isolate_) {
+ shared_isolate_->global_safepoint()->EnterGlobalSafepointScope(initiator_);
+ } else {
+ initiator_->heap()->safepoint()->EnterLocalSafepointScope();
+ }
+}
+
+GlobalSafepointScope::~GlobalSafepointScope() {
+ if (shared_isolate_) {
+ shared_isolate_->global_safepoint()->LeaveGlobalSafepointScope(initiator_);
+ } else {
+ initiator_->heap()->safepoint()->LeaveLocalSafepointScope();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/safepoint.h b/deps/v8/src/heap/safepoint.h
index 961bfdf001..8a6823c603 100644
--- a/deps/v8/src/heap/safepoint.h
+++ b/deps/v8/src/heap/safepoint.h
@@ -7,6 +7,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/local-heap.h"
#include "src/objects/visitors.h"
@@ -16,23 +17,15 @@ namespace internal {
class Heap;
class LocalHeap;
+class PerClientSafepointData;
class RootVisitor;
-// Used to bring all threads with heap access to a safepoint such that e.g. a
-// garbage collection can be performed.
+// Used to bring all threads with heap access in an isolate to a safepoint such
+// that e.g. a garbage collection can be performed.
class IsolateSafepoint final {
public:
explicit IsolateSafepoint(Heap* heap);
- // Wait until unpark operation is safe again
- void WaitInUnpark();
-
- // Enter the safepoint from a running thread
- void WaitInSafepoint();
-
- // Running thread reached a safepoint by parking itself.
- void NotifyPark();
-
V8_EXPORT_PRIVATE bool ContainsLocalHeap(LocalHeap* local_heap);
V8_EXPORT_PRIVATE bool ContainsAnyLocalHeap();
@@ -51,6 +44,8 @@ class IsolateSafepoint final {
void AssertActive() { local_heaps_mutex_.AssertHeld(); }
+ void AssertMainThreadIsOnlyThread();
+
private:
class Barrier {
base::Mutex mutex_;
@@ -58,7 +53,7 @@ class IsolateSafepoint final {
base::ConditionVariable cv_stopped_;
bool armed_;
- int stopped_ = 0;
+ size_t stopped_ = 0;
bool IsArmed() { return armed_; }
@@ -67,23 +62,53 @@ class IsolateSafepoint final {
void Arm();
void Disarm();
- void WaitUntilRunningThreadsInSafepoint(int running);
+ void WaitUntilRunningThreadsInSafepoint(size_t running);
void WaitInSafepoint();
void WaitInUnpark();
void NotifyPark();
};
- enum class StopMainThread { kYes, kNo };
+ enum class IncludeMainThread { kYes, kNo };
+
+ // Wait until unpark operation is safe again.
+ void WaitInUnpark();
+
+ // Enter the safepoint from a running thread.
+ void WaitInSafepoint();
+
+ // Running thread reached a safepoint by parking itself.
+ void NotifyPark();
+
+ // Methods for entering/leaving local safepoint scopes.
+ void EnterLocalSafepointScope();
+ void LeaveLocalSafepointScope();
+
+ // Methods for entering/leaving global safepoint scopes.
+ void TryInitiateGlobalSafepointScope(Isolate* initiator,
+ PerClientSafepointData* client_data);
+ void InitiateGlobalSafepointScope(Isolate* initiator,
+ PerClientSafepointData* client_data);
+ void InitiateGlobalSafepointScopeRaw(Isolate* initiator,
+ PerClientSafepointData* client_data);
+ void LeaveGlobalSafepointScope(Isolate* initiator);
+
+ // Blocks until all running threads reached a safepoint.
+ void WaitUntilRunningThreadsInSafepoint(
+ const PerClientSafepointData* client_data);
+
+ IncludeMainThread IncludeMainThreadUnlessInitiator(Isolate* initiator);
+
+ void LockMutex(LocalHeap* local_heap);
- void EnterSafepointScope(StopMainThread stop_main_thread);
- void LeaveSafepointScope(StopMainThread stop_main_thread);
+ size_t SetSafepointRequestedFlags(IncludeMainThread include_main_thread);
+ void ClearSafepointRequestedFlags(IncludeMainThread include_main_thread);
template <typename Callback>
void AddLocalHeap(LocalHeap* local_heap, Callback callback) {
// Safepoint holds this lock in order to stop threads from starting or
// stopping.
- base::MutexGuard guard(&local_heaps_mutex_);
+ base::RecursiveMutexGuard guard(&local_heaps_mutex_);
// Additional code protected from safepoint
callback();
@@ -97,7 +122,7 @@ class IsolateSafepoint final {
template <typename Callback>
void RemoveLocalHeap(LocalHeap* local_heap, Callback callback) {
- base::MutexGuard guard(&local_heaps_mutex_);
+ base::RecursiveMutexGuard guard(&local_heaps_mutex_);
// Additional code protected from safepoint
callback();
@@ -113,12 +138,16 @@ class IsolateSafepoint final {
Barrier barrier_;
Heap* heap_;
- base::Mutex local_heaps_mutex_;
+ // Mutex is used both for safepointing and adding/removing threads. A
+ // RecursiveMutex is needed since we need to support nested SafepointScopes.
+ base::RecursiveMutex local_heaps_mutex_;
LocalHeap* local_heaps_head_;
int active_safepoint_scopes_;
friend class Heap;
+ friend class GlobalSafepoint;
+ friend class GlobalSafepointScope;
friend class LocalHeap;
friend class PersistentHandles;
friend class SafepointScope;
@@ -133,6 +162,48 @@ class V8_NODISCARD SafepointScope {
IsolateSafepoint* safepoint_;
};
+// Used for reaching a global safepoint, a safepoint across all client isolates
+// of the shared isolate.
+class GlobalSafepoint final {
+ public:
+ explicit GlobalSafepoint(Isolate* isolate);
+
+ void AppendClient(Isolate* client);
+ void RemoveClient(Isolate* client);
+
+ template <typename Callback>
+ void IterateClientIsolates(Callback callback) {
+ for (Isolate* current = clients_head_; current;
+ current = current->global_safepoint_next_client_isolate_) {
+ callback(current);
+ }
+ }
+
+ void AssertNoClients();
+
+ private:
+ void EnterGlobalSafepointScope(Isolate* initiator);
+ void LeaveGlobalSafepointScope(Isolate* initiator);
+
+ Isolate* const shared_isolate_;
+ Heap* const shared_heap_;
+ base::Mutex clients_mutex_;
+ Isolate* clients_head_ = nullptr;
+
+ friend class GlobalSafepointScope;
+ friend class Isolate;
+};
+
+class V8_NODISCARD GlobalSafepointScope {
+ public:
+ V8_EXPORT_PRIVATE explicit GlobalSafepointScope(Isolate* initiator);
+ V8_EXPORT_PRIVATE ~GlobalSafepointScope();
+
+ private:
+ Isolate* const initiator_;
+ Isolate* const shared_isolate_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 152bc03613..8a0a1da96b 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -16,93 +16,59 @@
namespace v8 {
namespace internal {
-void Scavenger::PromotionList::View::PushRegularObject(HeapObject object,
- int size) {
- promotion_list_->PushRegularObject(task_id_, object, size);
+void Scavenger::PromotionList::Local::PushRegularObject(HeapObject object,
+ int size) {
+ regular_object_promotion_list_local_.Push({object, size});
}
-void Scavenger::PromotionList::View::PushLargeObject(HeapObject object, Map map,
- int size) {
- promotion_list_->PushLargeObject(task_id_, object, map, size);
+void Scavenger::PromotionList::Local::PushLargeObject(HeapObject object,
+ Map map, int size) {
+ large_object_promotion_list_local_.Push({object, map, size});
}
-bool Scavenger::PromotionList::View::IsEmpty() {
- return promotion_list_->IsEmpty();
+size_t Scavenger::PromotionList::Local::LocalPushSegmentSize() const {
+ return regular_object_promotion_list_local_.PushSegmentSize() +
+ large_object_promotion_list_local_.PushSegmentSize();
}
-size_t Scavenger::PromotionList::View::LocalPushSegmentSize() {
- return promotion_list_->LocalPushSegmentSize(task_id_);
-}
-
-bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
- return promotion_list_->Pop(task_id_, entry);
-}
-
-void Scavenger::PromotionList::View::FlushToGlobal() {
- promotion_list_->FlushToGlobal(task_id_);
-}
-
-bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
- return promotion_list_->IsGlobalPoolEmpty();
-}
-
-bool Scavenger::PromotionList::View::ShouldEagerlyProcessPromotionList() {
- return promotion_list_->ShouldEagerlyProcessPromotionList(task_id_);
-}
-
-void Scavenger::PromotionList::PushRegularObject(int task_id, HeapObject object,
- int size) {
- regular_object_promotion_list_.Push(task_id, ObjectAndSize(object, size));
-}
-
-void Scavenger::PromotionList::PushLargeObject(int task_id, HeapObject object,
- Map map, int size) {
- large_object_promotion_list_.Push(task_id, {object, map, size});
-}
-
-bool Scavenger::PromotionList::IsEmpty() {
- return regular_object_promotion_list_.IsEmpty() &&
- large_object_promotion_list_.IsEmpty();
-}
-
-size_t Scavenger::PromotionList::LocalPushSegmentSize(int task_id) {
- return regular_object_promotion_list_.LocalPushSegmentSize(task_id) +
- large_object_promotion_list_.LocalPushSegmentSize(task_id);
-}
-
-bool Scavenger::PromotionList::Pop(int task_id,
- struct PromotionListEntry* entry) {
+bool Scavenger::PromotionList::Local::Pop(struct PromotionListEntry* entry) {
ObjectAndSize regular_object;
- if (regular_object_promotion_list_.Pop(task_id, &regular_object)) {
+ if (regular_object_promotion_list_local_.Pop(&regular_object)) {
entry->heap_object = regular_object.first;
entry->size = regular_object.second;
entry->map = entry->heap_object.map();
return true;
}
- return large_object_promotion_list_.Pop(task_id, entry);
-}
-
-void Scavenger::PromotionList::FlushToGlobal(int task_id) {
- regular_object_promotion_list_.FlushToGlobal(task_id);
- large_object_promotion_list_.FlushToGlobal(task_id);
+ return large_object_promotion_list_local_.Pop(entry);
}
-size_t Scavenger::PromotionList::GlobalPoolSize() const {
- return regular_object_promotion_list_.GlobalPoolSize() +
- large_object_promotion_list_.GlobalPoolSize();
+void Scavenger::PromotionList::Local::Publish() {
+ regular_object_promotion_list_local_.Publish();
+ large_object_promotion_list_local_.Publish();
}
-bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
- return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
- large_object_promotion_list_.IsGlobalPoolEmpty();
+bool Scavenger::PromotionList::Local::IsGlobalPoolEmpty() const {
+ return regular_object_promotion_list_local_.IsGlobalEmpty() &&
+ large_object_promotion_list_local_.IsGlobalEmpty();
}
-bool Scavenger::PromotionList::ShouldEagerlyProcessPromotionList(int task_id) {
+bool Scavenger::PromotionList::Local::ShouldEagerlyProcessPromotionList()
+ const {
// Threshold when to prioritize processing of the promotion list. Right
// now we only look into the regular object list.
const int kProcessPromotionListThreshold =
kRegularObjectPromotionListSegmentSize / 2;
- return LocalPushSegmentSize(task_id) < kProcessPromotionListThreshold;
+ return LocalPushSegmentSize() < kProcessPromotionListThreshold;
+}
+
+bool Scavenger::PromotionList::IsEmpty() const {
+ return regular_object_promotion_list_.IsEmpty() &&
+ large_object_promotion_list_.IsEmpty();
+}
+
+size_t Scavenger::PromotionList::Size() const {
+ return regular_object_promotion_list_.Size() +
+ large_object_promotion_list_.Size();
}
void Scavenger::PageMemoryFence(MaybeObject object) {
@@ -169,7 +135,7 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
}
HeapObjectReference::Update(slot, target);
if (object_fields == ObjectFields::kMaybePointers) {
- copied_list_.Push(ObjectAndSize(target, object_size));
+ copied_list_local_.Push(ObjectAndSize(target, object_size));
}
copied_size_ += object_size;
return CopyAndForwardResult::SUCCESS_YOUNG_GENERATION;
@@ -217,7 +183,7 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
}
HeapObjectReference::Update(slot, target);
if (object_fields == ObjectFields::kMaybePointers) {
- promotion_list_.PushRegularObject(target, object_size);
+ promotion_list_local_.PushRegularObject(target, object_size);
}
promoted_size_ += object_size;
return CopyAndForwardResult::SUCCESS_OLD_GENERATION;
@@ -246,7 +212,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
surviving_new_large_objects_.insert({object, map});
promoted_size_ += object_size;
if (object_fields == ObjectFields::kMaybePointers) {
- promotion_list_.PushLargeObject(object, map, object_size);
+ promotion_list_local_.PushLargeObject(object, map, object_size);
}
}
return true;
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 9faf71f9ee..3e3a67a5e6 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -212,9 +212,9 @@ size_t ScavengerCollector::JobTask::GetMaxConcurrency(
// GlobalPoolSize() of copied_list_ and promotion_list_.
return std::min<size_t>(
scavengers_->size(),
- std::max<size_t>(remaining_memory_chunks_.load(std::memory_order_relaxed),
- worker_count + copied_list_->GlobalPoolSize() +
- promotion_list_->GlobalPoolSize()));
+ std::max<size_t>(
+ remaining_memory_chunks_.load(std::memory_order_relaxed),
+ worker_count + copied_list_->Size() + promotion_list_->Size()));
}
void ScavengerCollector::JobTask::ProcessItems(JobDelegate* delegate,
@@ -272,11 +272,11 @@ void ScavengerCollector::CollectGarbage() {
DCHECK(surviving_new_large_objects_.empty());
std::vector<std::unique_ptr<Scavenger>> scavengers;
- Worklist<MemoryChunk*, 64> empty_chunks;
+ Scavenger::EmptyChunksList empty_chunks;
const int num_scavenge_tasks = NumberOfScavengeTasks();
- Scavenger::CopiedList copied_list(num_scavenge_tasks);
- Scavenger::PromotionList promotion_list(num_scavenge_tasks);
- EphemeronTableList ephemeron_table_list(num_scavenge_tasks);
+ Scavenger::CopiedList copied_list;
+ Scavenger::PromotionList promotion_list;
+ EphemeronTableList ephemeron_table_list;
{
Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
@@ -341,7 +341,7 @@ void ScavengerCollector::CollectGarbage() {
heap_->IterateRoots(&root_scavenge_visitor, options);
isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
&root_scavenge_visitor);
- scavengers[kMainThreadId]->Flush();
+ scavengers[kMainThreadId]->Publish();
}
{
// Parallel phase scavenging all copied and promoted objects.
@@ -428,9 +428,9 @@ void ScavengerCollector::CollectGarbage() {
{
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_FREE_REMEMBERED_SET);
+ Scavenger::EmptyChunksList::Local empty_chunks_local(&empty_chunks);
MemoryChunk* chunk;
-
- while (empty_chunks.Pop(kMainThreadId, &chunk)) {
+ while (empty_chunks_local.Pop(&chunk)) {
// Since sweeping was already restarted only check chunks that already got
// swept.
if (chunk->SweepingDone()) {
@@ -534,16 +534,22 @@ int ScavengerCollector::NumberOfScavengeTasks() {
return tasks;
}
+Scavenger::PromotionList::Local::Local(Scavenger::PromotionList* promotion_list)
+ : regular_object_promotion_list_local_(
+ &promotion_list->regular_object_promotion_list_),
+ large_object_promotion_list_local_(
+ &promotion_list->large_object_promotion_list_) {}
+
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
- Worklist<MemoryChunk*, 64>* empty_chunks,
- CopiedList* copied_list, PromotionList* promotion_list,
+ EmptyChunksList* empty_chunks, CopiedList* copied_list,
+ PromotionList* promotion_list,
EphemeronTableList* ephemeron_table_list, int task_id)
: collector_(collector),
heap_(heap),
- empty_chunks_(empty_chunks, task_id),
- promotion_list_(promotion_list, task_id),
- copied_list_(copied_list, task_id),
- ephemeron_table_list_(ephemeron_table_list, task_id),
+ empty_chunks_local_(empty_chunks),
+ promotion_list_local_(promotion_list),
+ copied_list_local_(copied_list),
+ ephemeron_table_list_local_(ephemeron_table_list),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
copied_size_(0),
promoted_size_(0),
@@ -602,7 +608,7 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndScavengeObject(heap_, slot);
},
- empty_chunks_);
+ &empty_chunks_local_);
}
if (page->sweeping_slot_set<AccessMode::NON_ATOMIC>() != nullptr) {
@@ -641,24 +647,24 @@ void Scavenger::Process(JobDelegate* delegate) {
do {
done = true;
ObjectAndSize object_and_size;
- while (promotion_list_.ShouldEagerlyProcessPromotionList() &&
- copied_list_.Pop(&object_and_size)) {
+ while (promotion_list_local_.ShouldEagerlyProcessPromotionList() &&
+ copied_list_local_.Pop(&object_and_size)) {
scavenge_visitor.Visit(object_and_size.first);
done = false;
if (delegate && ((++objects % kInterruptThreshold) == 0)) {
- if (!copied_list_.IsGlobalPoolEmpty()) {
+ if (!copied_list_local_.IsEmpty()) {
delegate->NotifyConcurrencyIncrease();
}
}
}
struct PromotionListEntry entry;
- while (promotion_list_.Pop(&entry)) {
+ while (promotion_list_local_.Pop(&entry)) {
HeapObject target = entry.heap_object;
IterateAndScavengePromotedObject(target, entry.map, entry.size);
done = false;
if (delegate && ((++objects % kInterruptThreshold) == 0)) {
- if (!promotion_list_.IsGlobalPoolEmpty()) {
+ if (!promotion_list_local_.IsGlobalPoolEmpty()) {
delegate->NotifyConcurrencyIncrease();
}
}
@@ -735,8 +741,8 @@ void Scavenger::Finalize() {
heap()->IncrementPromotedObjectsSize(promoted_size_);
collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
allocator_.Finalize();
- empty_chunks_.FlushToGlobal();
- ephemeron_table_list_.FlushToGlobal();
+ empty_chunks_local_.Publish();
+ ephemeron_table_list_local_.Publish();
for (auto it = ephemeron_remembered_set_.begin();
it != ephemeron_remembered_set_.end(); ++it) {
auto insert_result = heap()->ephemeron_remembered_set_.insert(
@@ -747,13 +753,13 @@ void Scavenger::Finalize() {
}
}
-void Scavenger::Flush() {
- copied_list_.FlushToGlobal();
- promotion_list_.FlushToGlobal();
+void Scavenger::Publish() {
+ copied_list_local_.Publish();
+ promotion_list_local_.Publish();
}
void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
- ephemeron_table_list_.Push(table);
+ ephemeron_table_list_local_.Push(table);
}
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 63b3f314db..0eb12a5f3d 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -6,12 +6,12 @@
#define V8_HEAP_SCAVENGER_H_
#include "src/base/platform/condition-variable.h"
+#include "src/heap/base/worklist.h"
#include "src/heap/index-generator.h"
#include "src/heap/local-allocator.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/parallel-work-item.h"
#include "src/heap/slot-set.h"
-#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
@@ -33,7 +33,7 @@ using SurvivingNewLargeObjectMapEntry = std::pair<HeapObject, Map>;
constexpr int kEphemeronTableListSegmentSize = 128;
using EphemeronTableList =
- Worklist<EphemeronHashTable, kEphemeronTableListSegmentSize>;
+ ::heap::base::Worklist<EphemeronHashTable, kEphemeronTableListSegmentSize>;
class ScavengerCollector;
@@ -47,58 +47,49 @@ class Scavenger {
class PromotionList {
public:
- class View {
+ static constexpr size_t kRegularObjectPromotionListSegmentSize = 256;
+ static constexpr size_t kLargeObjectPromotionListSegmentSize = 4;
+
+ using RegularObjectPromotionList =
+ ::heap::base::Worklist<ObjectAndSize,
+ kRegularObjectPromotionListSegmentSize>;
+ using LargeObjectPromotionList =
+ ::heap::base::Worklist<PromotionListEntry,
+ kLargeObjectPromotionListSegmentSize>;
+
+ class Local {
public:
- View(PromotionList* promotion_list, int task_id)
- : promotion_list_(promotion_list), task_id_(task_id) {}
+ explicit Local(PromotionList* promotion_list);
inline void PushRegularObject(HeapObject object, int size);
inline void PushLargeObject(HeapObject object, Map map, int size);
- inline bool IsEmpty();
- inline size_t LocalPushSegmentSize();
+ inline size_t LocalPushSegmentSize() const;
inline bool Pop(struct PromotionListEntry* entry);
- inline bool IsGlobalPoolEmpty();
- inline bool ShouldEagerlyProcessPromotionList();
- inline void FlushToGlobal();
+ inline bool IsGlobalPoolEmpty() const;
+ inline bool ShouldEagerlyProcessPromotionList() const;
+ inline void Publish();
private:
- PromotionList* promotion_list_;
- int task_id_;
+ RegularObjectPromotionList::Local regular_object_promotion_list_local_;
+ LargeObjectPromotionList::Local large_object_promotion_list_local_;
};
- explicit PromotionList(int num_tasks)
- : regular_object_promotion_list_(num_tasks),
- large_object_promotion_list_(num_tasks) {}
-
- inline void PushRegularObject(int task_id, HeapObject object, int size);
- inline void PushLargeObject(int task_id, HeapObject object, Map map,
- int size);
- inline bool IsEmpty();
- inline size_t GlobalPoolSize() const;
- inline size_t LocalPushSegmentSize(int task_id);
- inline bool Pop(int task_id, struct PromotionListEntry* entry);
- inline bool IsGlobalPoolEmpty();
- inline bool ShouldEagerlyProcessPromotionList(int task_id);
- inline void FlushToGlobal(int task_id);
+ inline bool IsEmpty() const;
+ inline size_t Size() const;
private:
- static const int kRegularObjectPromotionListSegmentSize = 256;
- static const int kLargeObjectPromotionListSegmentSize = 4;
-
- using RegularObjectPromotionList =
- Worklist<ObjectAndSize, kRegularObjectPromotionListSegmentSize>;
- using LargeObjectPromotionList =
- Worklist<PromotionListEntry, kLargeObjectPromotionListSegmentSize>;
-
RegularObjectPromotionList regular_object_promotion_list_;
LargeObjectPromotionList large_object_promotion_list_;
};
static const int kCopiedListSegmentSize = 256;
- using CopiedList = Worklist<ObjectAndSize, kCopiedListSegmentSize>;
+ using CopiedList =
+ ::heap::base::Worklist<ObjectAndSize, kCopiedListSegmentSize>;
+ using EmptyChunksList = ::heap::base::Worklist<MemoryChunk*, 64>;
+
Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
- Worklist<MemoryChunk*, 64>* empty_chunks, CopiedList* copied_list,
+ EmptyChunksList* empty_chunks, CopiedList* copied_list,
PromotionList* promotion_list,
EphemeronTableList* ephemeron_table_list, int task_id);
@@ -112,7 +103,7 @@ class Scavenger {
// Finalize the Scavenger. Needs to be called from the main thread.
void Finalize();
- void Flush();
+ void Publish();
void AddEphemeronHashTable(EphemeronHashTable table);
@@ -198,10 +189,10 @@ class Scavenger {
ScavengerCollector* const collector_;
Heap* const heap_;
- Worklist<MemoryChunk*, 64>::View empty_chunks_;
- PromotionList::View promotion_list_;
- CopiedList::View copied_list_;
- EphemeronTableList::View ephemeron_table_list_;
+ EmptyChunksList::Local empty_chunks_local_;
+ PromotionList::Local promotion_list_local_;
+ CopiedList::Local copied_list_local_;
+ EphemeronTableList::Local ephemeron_table_list_local_;
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
size_t copied_size_;
size_t promoted_size_;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 011fc5e53a..4e7b2afbdc 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -189,7 +189,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
void Heap::FinalizePartialMap(Map map) {
ReadOnlyRoots roots(this);
- map.set_dependent_code(DependentCode::cast(roots.empty_weak_fixed_array()));
+ map.set_dependent_code(DependentCode::empty_dependent_code(roots));
map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
map.SetInstanceDescriptors(isolate(), roots.empty_descriptor_array(), 0);
map.set_prototype(roots.null_value());
@@ -407,6 +407,9 @@ bool Heap::CreateInitialMaps() {
if (StringShape(entry.type).IsCons()) map.mark_unstable();
roots_table()[entry.index] = map.ptr();
}
+ ALLOCATE_VARSIZE_MAP(SHARED_STRING_TYPE, seq_string_migration_sentinel);
+ ALLOCATE_VARSIZE_MAP(SHARED_ONE_BYTE_STRING_TYPE,
+ one_byte_seq_string_migration_sentinel);
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
roots.fixed_double_array_map().set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
@@ -503,12 +506,14 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
code_data_container)
+ IF_WASM(ALLOCATE_MAP, WASM_API_FUNCTION_REF_TYPE, WasmApiFunctionRef::kSize,
+ wasm_api_function_ref)
IF_WASM(ALLOCATE_MAP, WASM_CAPI_FUNCTION_DATA_TYPE,
WasmCapiFunctionData::kSize, wasm_capi_function_data)
IF_WASM(ALLOCATE_MAP, WASM_EXPORTED_FUNCTION_DATA_TYPE,
WasmExportedFunctionData::kSize, wasm_exported_function_data)
- IF_WASM(ALLOCATE_MAP, WASM_API_FUNCTION_REF_TYPE, WasmApiFunctionRef::kSize,
- wasm_api_function_ref)
+ IF_WASM(ALLOCATE_MAP, WASM_INTERNAL_FUNCTION_TYPE,
+ WasmInternalFunction::kSize, wasm_internal_function)
IF_WASM(ALLOCATE_MAP, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData::kSize,
wasm_js_function_data)
IF_WASM(ALLOCATE_MAP, WASM_TYPE_INFO_TYPE, WasmTypeInfo::kSize,
@@ -804,6 +809,9 @@ void Heap::CreateInitialObjects() {
set_feedback_vectors_for_profiling_tools(roots.undefined_value());
set_pending_optimize_for_test_bytecode(roots.undefined_value());
set_shared_wasm_memories(roots.empty_weak_array_list());
+#ifdef V8_ENABLE_WEBASSEMBLY
+ set_active_continuation(roots.undefined_value());
+#endif // V8_ENABLE_WEBASSEMBLY
set_script_list(roots.empty_weak_array_list());
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index c36b02c22d..5e70cbc33d 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -12,11 +12,11 @@
#include "src/base/atomic-utils.h"
#include "src/base/bit-field.h"
#include "src/base/bits.h"
-#include "src/heap/worklist.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/slots.h"
#include "src/utils/allocation.h"
#include "src/utils/utils.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 230d004fe3..43d01f3989 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -136,7 +136,7 @@ Address SkipFillers(PtrComprCageBase cage_base, HeapObject filler,
while (addr < end) {
filler = HeapObject::FromAddress(addr);
CHECK(filler.IsFreeSpaceOrFiller(cage_base));
- addr = filler.address() + filler.Size();
+ addr = filler.address() + filler.Size(cage_base);
}
return addr;
}
@@ -184,7 +184,7 @@ size_t Page::ShrinkToHighWaterMark() {
this, address() + size() - unused, unused, area_end() - unused);
if (filler.address() != area_end()) {
CHECK(filler.IsFreeSpaceOrFiller(cage_base));
- CHECK_EQ(filler.address() + filler.Size(), area_end());
+ CHECK_EQ(filler.address() + filler.Size(cage_base), area_end());
}
}
return unused;
@@ -270,7 +270,7 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
return start + min_size;
} else if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
// Ensure there are no unaccounted allocations.
- DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
+ DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
// Generated code may allocate inline from the linear allocation area for.
// To make sure we can observe these allocations, we use a lower ©limit.
@@ -325,14 +325,7 @@ void LocalAllocationBuffer::MakeIterable() {
LocalAllocationBuffer::LocalAllocationBuffer(
Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT
: heap_(heap),
- allocation_info_(allocation_info) {
- if (IsValid()) {
- heap_->CreateFillerObjectAtBackground(
- allocation_info_.top(),
- static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
- ClearFreedMemoryMode::kDontClearFreedMemory);
- }
-}
+ allocation_info_(allocation_info) {}
LocalAllocationBuffer::LocalAllocationBuffer(LocalAllocationBuffer&& other)
V8_NOEXCEPT {
@@ -381,16 +374,16 @@ void SpaceWithLinearArea::ResumeAllocationObservers() {
}
void SpaceWithLinearArea::AdvanceAllocationObservers() {
- if (allocation_info_.top() &&
- allocation_info_.start() != allocation_info_.top()) {
- allocation_counter_.AdvanceAllocationObservers(allocation_info_.top() -
- allocation_info_.start());
+ if (allocation_info_->top() &&
+ allocation_info_->start() != allocation_info_->top()) {
+ allocation_counter_.AdvanceAllocationObservers(allocation_info_->top() -
+ allocation_info_->start());
MarkLabStartInitialized();
}
}
void SpaceWithLinearArea::MarkLabStartInitialized() {
- allocation_info_.ResetStart();
+ allocation_info_->ResetStart();
if (identity() == NEW_SPACE) {
heap()->new_space()->MoveOriginalTopForward();
@@ -420,12 +413,12 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
if (allocation_size >= allocation_counter_.NextBytes()) {
// Only the first object in a LAB should reach the next step.
- DCHECK_EQ(soon_object,
- allocation_info_.start() + aligned_size_in_bytes - size_in_bytes);
+ DCHECK_EQ(soon_object, allocation_info_->start() + aligned_size_in_bytes -
+ size_in_bytes);
// Right now the LAB only contains that one object.
- DCHECK_EQ(allocation_info_.top() + allocation_size - aligned_size_in_bytes,
- allocation_info_.limit());
+ DCHECK_EQ(allocation_info_->top() + allocation_size - aligned_size_in_bytes,
+ allocation_info_->limit());
// Ensure that there is a valid object
if (identity() == CODE_SPACE) {
@@ -439,7 +432,7 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
#if DEBUG
// Ensure that allocation_info_ isn't modified during one of the
// AllocationObserver::Step methods.
- LinearAllocationArea saved_allocation_info = allocation_info_;
+ LinearAllocationArea saved_allocation_info = *allocation_info_;
#endif
// Run AllocationObserver::Step through the AllocationCounter.
@@ -447,13 +440,13 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
allocation_size);
// Ensure that start/top/limit didn't change.
- DCHECK_EQ(saved_allocation_info.start(), allocation_info_.start());
- DCHECK_EQ(saved_allocation_info.top(), allocation_info_.top());
- DCHECK_EQ(saved_allocation_info.limit(), allocation_info_.limit());
+ DCHECK_EQ(saved_allocation_info.start(), allocation_info_->start());
+ DCHECK_EQ(saved_allocation_info.top(), allocation_info_->top());
+ DCHECK_EQ(saved_allocation_info.limit(), allocation_info_->limit());
}
DCHECK_IMPLIES(allocation_counter_.IsActive(),
- (allocation_info_.limit() - allocation_info_.start()) <
+ (allocation_info_->limit() - allocation_info_->start()) <
allocation_counter_.NextBytes());
}
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index eb71467f78..3ac1e00208 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -61,8 +61,7 @@ class SemiSpace;
// collection. The large object space is paged. Pages in large object space
// may be larger than the page size.
//
-// A store-buffer based write barrier is used to keep track of intergenerational
-// references. See heap/store-buffer.h.
+// A remembered set is used to keep track of intergenerational references.
//
// During scavenges and mark-sweep collections we sometimes (after a store
// buffer overflow) iterate intergenerational pointers without decoding heap
@@ -434,23 +433,24 @@ class LocalAllocationBuffer {
class SpaceWithLinearArea : public Space {
public:
- SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list)
- : Space(heap, id, free_list) {
- allocation_info_.Reset(kNullAddress, kNullAddress);
- }
+ SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list,
+ LinearAllocationArea* allocation_info)
+ : Space(heap, id, free_list), allocation_info_(allocation_info) {}
virtual bool SupportsAllocationObserver() = 0;
// Returns the allocation pointer in this space.
- Address top() { return allocation_info_.top(); }
- Address limit() { return allocation_info_.limit(); }
+ Address top() const { return allocation_info_->top(); }
+ Address limit() const { return allocation_info_->limit(); }
// The allocation top address.
- Address* allocation_top_address() { return allocation_info_.top_address(); }
+ Address* allocation_top_address() const {
+ return allocation_info_->top_address();
+ }
// The allocation limit address.
- Address* allocation_limit_address() {
- return allocation_info_.limit_address();
+ Address* allocation_limit_address() const {
+ return allocation_info_->limit_address();
}
// Methods needed for allocation observers.
@@ -484,7 +484,7 @@ class SpaceWithLinearArea : public Space {
protected:
// TODO(ofrobots): make these private after refactoring is complete.
- LinearAllocationArea allocation_info_;
+ LinearAllocationArea* const allocation_info_;
size_t allocations_origins_[static_cast<int>(
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 7e18fc2895..1b9a9b4eb7 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -412,8 +412,8 @@ int Sweeper::RawSweep(
CleanupInvalidTypedSlotsOfFreeRanges(p, free_ranges_map);
ClearMarkBitsAndHandleLivenessStatistics(p, live_bytes, free_list_mode);
- p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
if (code_object_registry) code_object_registry->Finalize();
+ p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
return static_cast<int>(
diff --git a/deps/v8/src/heap/weak-object-worklists.cc b/deps/v8/src/heap/weak-object-worklists.cc
index 50e268ab91..951657456d 100644
--- a/deps/v8/src/heap/weak-object-worklists.cc
+++ b/deps/v8/src/heap/weak-object-worklists.cc
@@ -6,7 +6,6 @@
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
-#include "src/heap/worklist.h"
#include "src/objects/hash-table.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-function.h"
@@ -19,12 +18,32 @@ namespace v8 {
namespace internal {
+WeakObjects::Local::Local(WeakObjects* weak_objects)
+ : WeakObjects::UnusedBase()
+#define INIT_LOCAL_WORKLIST(_, name, __) , name##_local(&weak_objects->name)
+ WEAK_OBJECT_WORKLISTS(INIT_LOCAL_WORKLIST)
+#undef INIT_LOCAL_WORKLIST
+{
+}
+
+void WeakObjects::Local::Publish() {
+#define INVOKE_PUBLISH(_, name, __) name##_local.Publish();
+ WEAK_OBJECT_WORKLISTS(INVOKE_PUBLISH)
+#undef INVOKE_PUBLISH
+}
+
void WeakObjects::UpdateAfterScavenge() {
#define INVOKE_UPDATE(_, name, Name) Update##Name(name);
WEAK_OBJECT_WORKLISTS(INVOKE_UPDATE)
#undef INVOKE_UPDATE
}
+void WeakObjects::Clear() {
+#define INVOKE_CLEAR(_, name, __) name.Clear();
+ WEAK_OBJECT_WORKLISTS(INVOKE_CLEAR)
+#undef INVOKE_CLEAR
+}
+
// static
void WeakObjects::UpdateTransitionArrays(
WeakObjectWorklist<TransitionArray>& transition_arrays) {
diff --git a/deps/v8/src/heap/weak-object-worklists.h b/deps/v8/src/heap/weak-object-worklists.h
index c61b15a0e9..6da8a661fc 100644
--- a/deps/v8/src/heap/weak-object-worklists.h
+++ b/deps/v8/src/heap/weak-object-worklists.h
@@ -6,7 +6,7 @@
#define V8_HEAP_WEAK_OBJECT_WORKLISTS_H_
#include "src/common/globals.h"
-#include "src/heap/worklist.h"
+#include "src/heap/base/worklist.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-weak-refs.h"
@@ -61,16 +61,32 @@ class TransitionArray;
F(JSFunction, baseline_flushing_candidates, BaselineFlushingCandidates) \
F(JSFunction, flushed_js_functions, FlushedJSFunctions)
-class WeakObjects {
+class WeakObjects final {
+ private:
+ class UnusedBase {}; // Base class to allow using macro in initializer list.
+
public:
template <typename Type>
- using WeakObjectWorklist = Worklist<Type, 64>;
+ using WeakObjectWorklist = ::heap::base::Worklist<Type, 64>;
+
+ class Local final : public UnusedBase {
+ public:
+ explicit Local(WeakObjects* weak_objects);
+
+ V8_EXPORT_PRIVATE void Publish();
+
+#define DECLARE_WORKLIST(Type, name, _) \
+ WeakObjectWorklist<Type>::Local name##_local;
+ WEAK_OBJECT_WORKLISTS(DECLARE_WORKLIST)
+#undef DECLARE_WORKLIST
+ };
#define DECLARE_WORKLIST(Type, name, _) WeakObjectWorklist<Type> name;
WEAK_OBJECT_WORKLISTS(DECLARE_WORKLIST)
#undef DECLARE_WORKLIST
void UpdateAfterScavenge();
+ void Clear();
private:
#define DECLARE_UPDATE_METHODS(Type, _, Name) \
diff --git a/deps/v8/src/heap/worklist.h b/deps/v8/src/heap/worklist.h
deleted file mode 100644
index 0f5f13cdf7..0000000000
--- a/deps/v8/src/heap/worklist.h
+++ /dev/null
@@ -1,453 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_WORKLIST_H_
-#define V8_HEAP_WORKLIST_H_
-
-#include <cstddef>
-#include <utility>
-
-#include "src/base/atomic-utils.h"
-#include "src/base/logging.h"
-#include "src/base/macros.h"
-#include "src/base/platform/mutex.h"
-#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
-
-namespace v8 {
-namespace internal {
-
-// A concurrent worklist based on segments. Each tasks gets private
-// push and pop segments. Empty pop segments are swapped with their
-// corresponding push segments. Full push segments are published to a global
-// pool of segments and replaced with empty segments.
-//
-// Work stealing is best effort, i.e., there is no way to inform other tasks
-// of the need of items.
-template <typename EntryType, int SEGMENT_SIZE>
-class Worklist {
- public:
- class View {
- public:
- View(Worklist<EntryType, SEGMENT_SIZE>* worklist, int task_id)
- : worklist_(worklist), task_id_(task_id) {}
-
- // Pushes an entry onto the worklist.
- bool Push(EntryType entry) { return worklist_->Push(task_id_, entry); }
-
- // Pops an entry from the worklist.
- bool Pop(EntryType* entry) { return worklist_->Pop(task_id_, entry); }
-
- // Returns true if the local portion of the worklist is empty.
- bool IsLocalEmpty() { return worklist_->IsLocalEmpty(task_id_); }
-
- // Returns true if the worklist is empty. Can only be used from the main
- // thread without concurrent access.
- bool IsEmpty() { return worklist_->IsEmpty(); }
-
- bool IsGlobalPoolEmpty() { return worklist_->IsGlobalPoolEmpty(); }
-
- size_t LocalPushSegmentSize() {
- return worklist_->LocalPushSegmentSize(task_id_);
- }
-
- void FlushToGlobal() { worklist_->FlushToGlobal(task_id_); }
-
- private:
- Worklist<EntryType, SEGMENT_SIZE>* worklist_;
- int task_id_;
- };
-
- static const int kMaxNumTasks = 8;
- static const size_t kSegmentCapacity = SEGMENT_SIZE;
-
- Worklist() : Worklist(kMaxNumTasks) {}
-
- explicit Worklist(int num_tasks) : num_tasks_(num_tasks) {
- DCHECK_LE(num_tasks, kMaxNumTasks);
- for (int i = 0; i < num_tasks_; i++) {
- private_push_segment(i) = NewSegment();
- private_pop_segment(i) = NewSegment();
- }
- }
-
- ~Worklist() {
- CHECK(IsEmpty());
- for (int i = 0; i < num_tasks_; i++) {
- DCHECK_NOT_NULL(private_push_segment(i));
- DCHECK_NOT_NULL(private_pop_segment(i));
- delete private_push_segment(i);
- delete private_pop_segment(i);
- }
- }
-
- // Swaps content with the given worklist. Local buffers need to
- // be empty, not thread safe.
- void Swap(Worklist<EntryType, SEGMENT_SIZE>& other) {
- CHECK(AreLocalsEmpty());
- CHECK(other.AreLocalsEmpty());
-
- global_pool_.Swap(other.global_pool_);
- }
-
- bool Push(int task_id, EntryType entry) {
- DCHECK_LT(task_id, num_tasks_);
- DCHECK_NOT_NULL(private_push_segment(task_id));
- if (!private_push_segment(task_id)->Push(entry)) {
- PublishPushSegmentToGlobal(task_id);
- bool success = private_push_segment(task_id)->Push(entry);
- USE(success);
- DCHECK(success);
- }
- return true;
- }
-
- bool Pop(int task_id, EntryType* entry) {
- DCHECK_LT(task_id, num_tasks_);
- DCHECK_NOT_NULL(private_pop_segment(task_id));
- if (!private_pop_segment(task_id)->Pop(entry)) {
- if (!private_push_segment(task_id)->IsEmpty()) {
- Segment* tmp = private_pop_segment(task_id);
- private_pop_segment(task_id) = private_push_segment(task_id);
- private_push_segment(task_id) = tmp;
- } else if (!StealPopSegmentFromGlobal(task_id)) {
- return false;
- }
- bool success = private_pop_segment(task_id)->Pop(entry);
- USE(success);
- DCHECK(success);
- }
- return true;
- }
-
- size_t LocalPushSegmentSize(int task_id) {
- return private_push_segment(task_id)->Size();
- }
-
- bool IsLocalEmpty(int task_id) {
- return private_pop_segment(task_id)->IsEmpty() &&
- private_push_segment(task_id)->IsEmpty();
- }
-
- bool IsGlobalPoolEmpty() { return global_pool_.IsEmpty(); }
-
- bool IsEmpty() {
- if (!AreLocalsEmpty()) return false;
- return global_pool_.IsEmpty();
- }
-
- bool AreLocalsEmpty() {
- for (int i = 0; i < num_tasks_; i++) {
- if (!IsLocalEmpty(i)) return false;
- }
- return true;
- }
-
- size_t LocalSize(int task_id) {
- return private_pop_segment(task_id)->Size() +
- private_push_segment(task_id)->Size();
- }
-
- // Thread-safe but may return an outdated result.
- size_t GlobalPoolSize() const { return global_pool_.Size(); }
-
- // Clears all segments. Frees the global segment pool.
- //
- // Assumes that no other tasks are running.
- void Clear() {
- for (int i = 0; i < num_tasks_; i++) {
- private_pop_segment(i)->Clear();
- private_push_segment(i)->Clear();
- }
- global_pool_.Clear();
- }
-
- // Calls the specified callback on each element of the deques and replaces
- // the element with the result of the callback.
- // The signature of the callback is
- // bool Callback(EntryType old, EntryType* new).
- // If the callback returns |false| then the element is removed from the
- // worklist. Otherwise the |new| entry is updated.
- //
- // Assumes that no other tasks are running.
- template <typename Callback>
- void Update(Callback callback) {
- for (int i = 0; i < num_tasks_; i++) {
- private_pop_segment(i)->Update(callback);
- private_push_segment(i)->Update(callback);
- }
- global_pool_.Update(callback);
- }
-
- // Calls the specified callback on each element of the deques.
- // The signature of the callback is:
- // void Callback(EntryType entry).
- //
- // Assumes that no other tasks are running.
- template <typename Callback>
- void Iterate(Callback callback) {
- for (int i = 0; i < num_tasks_; i++) {
- private_pop_segment(i)->Iterate(callback);
- private_push_segment(i)->Iterate(callback);
- }
- global_pool_.Iterate(callback);
- }
-
- template <typename Callback>
- void IterateGlobalPool(Callback callback) {
- global_pool_.Iterate(callback);
- }
-
- void FlushToGlobal(int task_id) {
- PublishPushSegmentToGlobal(task_id);
- PublishPopSegmentToGlobal(task_id);
- }
-
- void MergeGlobalPool(Worklist* other) {
- global_pool_.Merge(&other->global_pool_);
- }
-
- private:
- FRIEND_TEST(WorkListTest, SegmentCreate);
- FRIEND_TEST(WorkListTest, SegmentPush);
- FRIEND_TEST(WorkListTest, SegmentPushPop);
- FRIEND_TEST(WorkListTest, SegmentIsEmpty);
- FRIEND_TEST(WorkListTest, SegmentIsFull);
- FRIEND_TEST(WorkListTest, SegmentClear);
- FRIEND_TEST(WorkListTest, SegmentFullPushFails);
- FRIEND_TEST(WorkListTest, SegmentEmptyPopFails);
- FRIEND_TEST(WorkListTest, SegmentUpdateFalse);
- FRIEND_TEST(WorkListTest, SegmentUpdate);
-
- class Segment {
- public:
- static const size_t kCapacity = kSegmentCapacity;
-
- Segment() : index_(0) {}
-
- bool Push(EntryType entry) {
- if (IsFull()) return false;
- entries_[index_++] = entry;
- return true;
- }
-
- bool Pop(EntryType* entry) {
- if (IsEmpty()) return false;
- *entry = entries_[--index_];
- return true;
- }
-
- size_t Size() const { return index_; }
- bool IsEmpty() const { return index_ == 0; }
- bool IsFull() const { return index_ == kCapacity; }
- void Clear() { index_ = 0; }
-
- template <typename Callback>
- void Update(Callback callback) {
- size_t new_index = 0;
- for (size_t i = 0; i < index_; i++) {
- if (callback(entries_[i], &entries_[new_index])) {
- new_index++;
- }
- }
- index_ = new_index;
- }
-
- template <typename Callback>
- void Iterate(Callback callback) const {
- for (size_t i = 0; i < index_; i++) {
- callback(entries_[i]);
- }
- }
-
- Segment* next() const { return next_; }
- void set_next(Segment* segment) { next_ = segment; }
-
- private:
- Segment* next_;
- size_t index_;
- EntryType entries_[kCapacity];
- };
-
- struct PrivateSegmentHolder {
- Segment* private_push_segment;
- Segment* private_pop_segment;
- char cache_line_padding[64];
- };
-
- class GlobalPool {
- public:
- GlobalPool() : top_(nullptr) {}
-
- // Swaps contents, not thread safe.
- void Swap(GlobalPool& other) {
- Segment* temp = top_;
- set_top(other.top_);
- other.set_top(temp);
- size_t other_size = other.size_.exchange(
- size_.load(std::memory_order_relaxed), std::memory_order_relaxed);
- size_.store(other_size, std::memory_order_relaxed);
- }
-
- V8_INLINE void Push(Segment* segment) {
- base::MutexGuard guard(&lock_);
- segment->set_next(top_);
- set_top(segment);
- size_.fetch_add(1, std::memory_order_relaxed);
- }
-
- V8_INLINE bool Pop(Segment** segment) {
- base::MutexGuard guard(&lock_);
- if (top_ != nullptr) {
- DCHECK_LT(0U, size_);
- size_.fetch_sub(1, std::memory_order_relaxed);
- *segment = top_;
- set_top(top_->next());
- return true;
- }
- return false;
- }
-
- V8_INLINE bool IsEmpty() {
- return base::AsAtomicPointer::Relaxed_Load(&top_) == nullptr;
- }
-
- V8_INLINE size_t Size() const {
- // It is safe to read |size_| without a lock since this variable is
- // atomic, keeping in mind that threads may not immediately see the new
- // value when it is updated.
- return size_.load(std::memory_order_relaxed);
- }
-
- void Clear() {
- base::MutexGuard guard(&lock_);
- size_.store(0, std::memory_order_relaxed);
- Segment* current = top_;
- while (current != nullptr) {
- Segment* tmp = current;
- current = current->next();
- delete tmp;
- }
- set_top(nullptr);
- }
-
- // See Worklist::Update.
- template <typename Callback>
- void Update(Callback callback) {
- base::MutexGuard guard(&lock_);
- Segment* prev = nullptr;
- Segment* current = top_;
- size_t num_deleted = 0;
- while (current != nullptr) {
- current->Update(callback);
- if (current->IsEmpty()) {
- DCHECK_LT(0U, size_);
- ++num_deleted;
- if (prev == nullptr) {
- top_ = current->next();
- } else {
- prev->set_next(current->next());
- }
- Segment* tmp = current;
- current = current->next();
- delete tmp;
- } else {
- prev = current;
- current = current->next();
- }
- }
- size_.fetch_sub(num_deleted, std::memory_order_relaxed);
- }
-
- // See Worklist::Iterate.
- template <typename Callback>
- void Iterate(Callback callback) {
- base::MutexGuard guard(&lock_);
- for (Segment* current = top_; current != nullptr;
- current = current->next()) {
- current->Iterate(callback);
- }
- }
-
- void Merge(GlobalPool* other) {
- Segment* top = nullptr;
- size_t other_size = 0;
- {
- base::MutexGuard guard(&other->lock_);
- if (!other->top_) return;
- top = other->top_;
- other_size = other->size_.load(std::memory_order_relaxed);
- other->size_.store(0, std::memory_order_relaxed);
- other->set_top(nullptr);
- }
-
- // It's safe to iterate through these segments because the top was
- // extracted from |other|.
- Segment* end = top;
- while (end->next()) end = end->next();
-
- {
- base::MutexGuard guard(&lock_);
- size_.fetch_add(other_size, std::memory_order_relaxed);
- end->set_next(top_);
- set_top(top);
- }
- }
-
- private:
- void set_top(Segment* segment) {
- base::AsAtomicPointer::Relaxed_Store(&top_, segment);
- }
-
- base::Mutex lock_;
- Segment* top_;
- std::atomic<size_t> size_{0};
- };
-
- V8_INLINE Segment*& private_push_segment(int task_id) {
- return private_segments_[task_id].private_push_segment;
- }
-
- V8_INLINE Segment*& private_pop_segment(int task_id) {
- return private_segments_[task_id].private_pop_segment;
- }
-
- V8_INLINE void PublishPushSegmentToGlobal(int task_id) {
- if (!private_push_segment(task_id)->IsEmpty()) {
- global_pool_.Push(private_push_segment(task_id));
- private_push_segment(task_id) = NewSegment();
- }
- }
-
- V8_INLINE void PublishPopSegmentToGlobal(int task_id) {
- if (!private_pop_segment(task_id)->IsEmpty()) {
- global_pool_.Push(private_pop_segment(task_id));
- private_pop_segment(task_id) = NewSegment();
- }
- }
-
- V8_INLINE bool StealPopSegmentFromGlobal(int task_id) {
- if (global_pool_.IsEmpty()) return false;
- Segment* new_segment = nullptr;
- if (global_pool_.Pop(&new_segment)) {
- delete private_pop_segment(task_id);
- private_pop_segment(task_id) = new_segment;
- return true;
- }
- return false;
- }
-
- V8_INLINE Segment* NewSegment() {
- // Bottleneck for filtering in crash dumps.
- return new Segment();
- }
-
- PrivateSegmentHolder private_segments_[kMaxNumTasks];
- GlobalPool global_pool_;
- int num_tasks_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_WORKLIST_H_
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 83b296423d..7aff16b9da 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -225,7 +225,7 @@ void AccessorAssembler::HandleLoadICHandlerCase(
BIND(&call_handler);
{
- // TODO(v8:11880): avoid roundtrips between cdc and code.
+ // TODO(v8:11880): call CodeT directly.
TNode<Code> code_handler = FromCodeT(CAST(handler));
exit_point->ReturnCallStub(LoadWithVectorDescriptor{}, code_handler,
p->context(), p->lookup_start_object(),
@@ -600,7 +600,15 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Return(result);
BIND(&if_oob_string);
- GotoIf(IntPtrLessThan(index, IntPtrConstant(0)), miss);
+ if (Is64()) {
+ // Indices >= 4294967295 are stored as named properties; handle them
+ // in the runtime.
+ GotoIfNot(UintPtrLessThanOrEqual(
+ index, IntPtrConstant(JSObject::kMaxElementIndex)),
+ miss);
+ } else {
+ GotoIf(IntPtrLessThan(index, IntPtrConstant(0)), miss);
+ }
TNode<BoolT> allow_out_of_bounds =
IsSetWord<LoadHandler::AllowOutOfBoundsBits>(handler_word);
GotoIfNot(allow_out_of_bounds, miss);
@@ -988,8 +996,7 @@ TNode<Object> AccessorAssembler::HandleProtoHandler(
if (on_code_handler) {
Label if_smi_handler(this);
GotoIf(TaggedIsSmi(smi_or_code_handler), &if_smi_handler);
- // TODO(v8:11880): avoid roundtrips between cdc and code.
- TNode<Code> code = FromCodeT(CAST(smi_or_code_handler));
+ TNode<CodeT> code = CAST(smi_or_code_handler);
on_code_handler(code);
BIND(&if_smi_handler);
@@ -1161,7 +1168,9 @@ void AccessorAssembler::JumpIfDataProperty(TNode<Uint32T> details,
PropertyDetails::kAttributesReadOnlyMask));
}
TNode<Uint32T> kind = DecodeWord32<PropertyDetails::KindField>(details);
- GotoIf(Word32Equal(kind, Int32Constant(kData)), writable);
+ GotoIf(
+ Word32Equal(kind, Int32Constant(static_cast<int>(PropertyKind::kData))),
+ writable);
// Fall through if it's an accessor property.
}
@@ -1234,7 +1243,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
// Check that the property is a writable data property (no accessor).
const int kTypeAndReadOnlyMask = PropertyDetails::KindField::kMask |
PropertyDetails::kAttributesReadOnlyMask;
- STATIC_ASSERT(kData == 0);
+ STATIC_ASSERT(static_cast<int>(PropertyKind::kData) == 0);
GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL) {
@@ -1326,7 +1335,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
// |handler| is a heap object. Must be code, call it.
BIND(&call_handler);
{
- // TODO(v8:11880): avoid roundtrips between cdc and code.
+ // TODO(v8:11880): call CodeT directly.
TNode<Code> code_handler = FromCodeT(CAST(strong_handler));
TailCallStub(StoreWithVectorDescriptor{}, code_handler, p->context(),
p->receiver(), p->name(), p->value(), p->slot(),
@@ -1406,7 +1415,7 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
PropertyDetails::KindField::kMask |
PropertyDetails::kAttributesDontDeleteMask |
PropertyDetails::kAttributesReadOnlyMask;
- STATIC_ASSERT(kData == 0);
+ STATIC_ASSERT(static_cast<int>(PropertyKind::kData) == 0);
// Both DontDelete and ReadOnly attributes must not be set and it has to be
// a kData property.
GotoIf(IsSetWord32(details, kKindAndAttributesDontDeleteReadOnlyMask),
@@ -1497,7 +1506,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
CSA_DCHECK(this,
Word32Equal(DecodeWord32<PropertyDetails::KindField>(details),
- Int32Constant(kData)));
+ Int32Constant(static_cast<int>(PropertyKind::kData))));
Branch(Word32Equal(
DecodeWord32<PropertyDetails::LocationField>(details),
@@ -1696,16 +1705,17 @@ void AccessorAssembler::HandleStoreICProtoHandler(
OnCodeHandler on_code_handler;
if (support_elements == kSupportElements) {
// Code sub-handlers are expected only in KeyedStoreICs.
- on_code_handler = [=](TNode<Code> code_handler) {
+ on_code_handler = [=](TNode<CodeT> code_handler) {
// This is either element store or transitioning element store.
Label if_element_store(this), if_transitioning_element_store(this);
Branch(IsStoreHandler0Map(LoadMap(handler)), &if_element_store,
&if_transitioning_element_store);
BIND(&if_element_store);
{
- TailCallStub(StoreWithVectorDescriptor{}, code_handler, p->context(),
- p->receiver(), p->name(), p->value(), p->slot(),
- p->vector());
+ // TODO(v8:11880): call CodeT directly.
+ TailCallStub(StoreWithVectorDescriptor{}, FromCodeT(code_handler),
+ p->context(), p->receiver(), p->name(), p->value(),
+ p->slot(), p->vector());
}
BIND(&if_transitioning_element_store);
@@ -1717,9 +1727,10 @@ void AccessorAssembler::HandleStoreICProtoHandler(
GotoIf(IsDeprecatedMap(transition_map), miss);
- TailCallStub(StoreTransitionDescriptor{}, code_handler, p->context(),
- p->receiver(), p->name(), transition_map, p->value(),
- p->slot(), p->vector());
+ // TODO(v8:11880): call CodeT directly.
+ TailCallStub(StoreTransitionDescriptor{}, FromCodeT(code_handler),
+ p->context(), p->receiver(), p->name(), transition_map,
+ p->value(), p->slot(), p->vector());
}
};
}
@@ -1733,7 +1744,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
const int kTypeAndReadOnlyMask =
PropertyDetails::KindField::kMask |
PropertyDetails::kAttributesReadOnlyMask;
- STATIC_ASSERT(kData == 0);
+ STATIC_ASSERT(static_cast<int>(PropertyKind::kData) == 0);
GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
StoreValueByKeyIndex<PropertyDictionary>(properties, name_index,
@@ -3735,7 +3746,7 @@ void AccessorAssembler::StoreGlobalIC_PropertyCellCase(
GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask), miss);
CSA_DCHECK(this,
Word32Equal(DecodeWord32<PropertyDetails::KindField>(details),
- Int32Constant(kData)));
+ Int32Constant(static_cast<int>(PropertyKind::kData))));
TNode<Uint32T> type =
DecodeWord32<PropertyDetails::PropertyCellTypeField>(details);
@@ -3966,7 +3977,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
{
// Call the handler.
- // TODO(v8:11880): avoid roundtrips between cdc and code.
+ // TODO(v8:11880): call CodeT directly.
TNode<Code> code_handler = FromCodeT(CAST(handler));
TailCallStub(StoreWithVectorDescriptor{}, code_handler, p->context(),
p->receiver(), p->name(), p->value(), p->slot(),
@@ -3980,7 +3991,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
TNode<Map> transition_map =
CAST(GetHeapObjectAssumeWeak(maybe_transition_map, &miss));
GotoIf(IsDeprecatedMap(transition_map), &miss);
- // TODO(v8:11880): avoid roundtrips between cdc and code.
+ // TODO(v8:11880): call CodeT directly.
TNode<Code> code = FromCodeT(
CAST(LoadObjectField(handler, StoreHandler::kSmiHandlerOffset)));
TailCallStub(StoreTransitionDescriptor{}, code, p->context(),
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 927ec445fb..b9952a9863 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -468,7 +468,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
// Low-level helpers.
- using OnCodeHandler = std::function<void(TNode<Code> code_handler)>;
+ using OnCodeHandler = std::function<void(TNode<CodeT> code_handler)>;
using OnFoundOnLookupStartObject = std::function<void(
TNode<PropertyDictionary> properties, TNode<IntPtrT> name_index)>;
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index e8214b6a39..4deed77e75 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -674,5 +674,40 @@ TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
return result.value();
}
+TNode<Object>
+BinaryOpAssembler::Generate_BitwiseBinaryOpWithSmiOperandAndOptionalFeedback(
+ Operation bitwise_op, TNode<Object> left, TNode<Object> right,
+ const LazyNode<Context>& context, TVariable<Smi>* feedback) {
+ TNode<Smi> right_smi = CAST(right);
+ TVARIABLE(Object, result);
+ TVARIABLE(Smi, var_left_feedback);
+ TVARIABLE(Word32T, var_left_word32);
+ TVARIABLE(BigInt, var_left_bigint);
+ Label do_smi_op(this), if_bigint_mix(this, Label::kDeferred), done(this);
+
+ TaggedToWord32OrBigIntWithFeedback(context(), left, &do_smi_op,
+ &var_left_word32, &if_bigint_mix,
+ &var_left_bigint, &var_left_feedback);
+ BIND(&do_smi_op);
+ result =
+ BitwiseOp(var_left_word32.value(), SmiToInt32(right_smi), bitwise_op);
+ if (feedback) {
+ TNode<Smi> result_type = SelectSmiConstant(
+ TaggedIsSmi(result.value()), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ *feedback = SmiOr(result_type, var_left_feedback.value());
+ }
+ Goto(&done);
+
+ BIND(&if_bigint_mix);
+ if (feedback) {
+ *feedback = var_left_feedback.value();
+ }
+ ThrowTypeError(context(), MessageTemplate::kBigIntMixedTypes);
+
+ BIND(&done);
+ return result.value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/binary-op-assembler.h b/deps/v8/src/ic/binary-op-assembler.h
index 6dff319736..1f6f353ae3 100644
--- a/deps/v8/src/ic/binary-op-assembler.h
+++ b/deps/v8/src/ic/binary-op-assembler.h
@@ -56,10 +56,10 @@ class BinaryOpAssembler : public CodeStubAssembler {
TNode<Object> Generate_BitwiseOrWithFeedback(
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
- UpdateFeedbackMode update_feedback_mode, bool /* unused */) {
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
- Operation::kBitwiseOr, left, right, context, &feedback);
+ Operation::kBitwiseOr, left, right, context, &feedback, rhs_known_smi);
UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
update_feedback_mode);
return result;
@@ -68,10 +68,10 @@ class BinaryOpAssembler : public CodeStubAssembler {
TNode<Object> Generate_BitwiseXorWithFeedback(
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
- UpdateFeedbackMode update_feedback_mode, bool /* unused */) {
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
- Operation::kBitwiseXor, left, right, context, &feedback);
+ Operation::kBitwiseXor, left, right, context, &feedback, rhs_known_smi);
UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
update_feedback_mode);
return result;
@@ -80,10 +80,10 @@ class BinaryOpAssembler : public CodeStubAssembler {
TNode<Object> Generate_BitwiseAndWithFeedback(
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
- UpdateFeedbackMode update_feedback_mode, bool /* unused */) {
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
- Operation::kBitwiseAnd, left, right, context, &feedback);
+ Operation::kBitwiseAnd, left, right, context, &feedback, rhs_known_smi);
UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
update_feedback_mode);
return result;
@@ -92,10 +92,10 @@ class BinaryOpAssembler : public CodeStubAssembler {
TNode<Object> Generate_ShiftLeftWithFeedback(
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
- UpdateFeedbackMode update_feedback_mode, bool /* unused */) {
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
- Operation::kShiftLeft, left, right, context, &feedback);
+ Operation::kShiftLeft, left, right, context, &feedback, rhs_known_smi);
UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
update_feedback_mode);
return result;
@@ -104,10 +104,10 @@ class BinaryOpAssembler : public CodeStubAssembler {
TNode<Object> Generate_ShiftRightWithFeedback(
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
- UpdateFeedbackMode update_feedback_mode, bool /* unused */) {
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
- Operation::kShiftRight, left, right, context, &feedback);
+ Operation::kShiftRight, left, right, context, &feedback, rhs_known_smi);
UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
update_feedback_mode);
return result;
@@ -116,10 +116,11 @@ class BinaryOpAssembler : public CodeStubAssembler {
TNode<Object> Generate_ShiftRightLogicalWithFeedback(
const LazyNode<Context>& context, TNode<Object> left, TNode<Object> right,
TNode<UintPtrT> slot, const LazyNode<HeapObject>& maybe_feedback_vector,
- UpdateFeedbackMode update_feedback_mode, bool /* unused */) {
+ UpdateFeedbackMode update_feedback_mode, bool rhs_known_smi) {
TVARIABLE(Smi, feedback);
TNode<Object> result = Generate_BitwiseBinaryOpWithFeedback(
- Operation::kShiftRightLogical, left, right, context, &feedback);
+ Operation::kShiftRightLogical, left, right, context, &feedback,
+ rhs_known_smi);
UpdateFeedback(feedback.value(), maybe_feedback_vector(), slot,
update_feedback_mode);
return result;
@@ -127,9 +128,13 @@ class BinaryOpAssembler : public CodeStubAssembler {
TNode<Object> Generate_BitwiseBinaryOpWithFeedback(
Operation bitwise_op, TNode<Object> left, TNode<Object> right,
- const LazyNode<Context>& context, TVariable<Smi>* feedback) {
- return Generate_BitwiseBinaryOpWithOptionalFeedback(bitwise_op, left, right,
- context, feedback);
+ const LazyNode<Context>& context, TVariable<Smi>* feedback,
+ bool rhs_known_smi) {
+ return rhs_known_smi
+ ? Generate_BitwiseBinaryOpWithSmiOperandAndOptionalFeedback(
+ bitwise_op, left, right, context, feedback)
+ : Generate_BitwiseBinaryOpWithOptionalFeedback(
+ bitwise_op, left, right, context, feedback);
}
TNode<Object> Generate_BitwiseBinaryOp(Operation bitwise_op,
@@ -156,6 +161,10 @@ class BinaryOpAssembler : public CodeStubAssembler {
TNode<Object> Generate_BitwiseBinaryOpWithOptionalFeedback(
Operation bitwise_op, TNode<Object> left, TNode<Object> right,
const LazyNode<Context>& context, TVariable<Smi>* feedback);
+
+ TNode<Object> Generate_BitwiseBinaryOpWithSmiOperandAndOptionalFeedback(
+ Operation bitwise_op, TNode<Object> left, TNode<Object> right,
+ const LazyNode<Context>& context, TVariable<Smi>* feedback);
};
} // namespace internal
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index b338b427b6..2c1062c562 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -35,9 +35,9 @@ bool IC::IsHandler(MaybeObject object) {
}
bool IC::vector_needs_update() {
- if (state() == NO_FEEDBACK) return false;
- return (!vector_set_ &&
- (state() != MEGAMORPHIC || nexus()->GetKeyType() != ELEMENT));
+ if (state() == InlineCacheState::NO_FEEDBACK) return false;
+ return (!vector_set_ && (state() != InlineCacheState::MEGAMORPHIC ||
+ nexus()->GetKeyType() != IcCheckType::kElement));
}
} // namespace internal
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index b9f3d040a5..1eeba58612 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -51,6 +51,18 @@
namespace v8 {
namespace internal {
+// Aliases to avoid having to repeat the class.
+// With C++20 we can use "using" to introduce scoped enums.
+constexpr InlineCacheState NO_FEEDBACK = InlineCacheState::NO_FEEDBACK;
+constexpr InlineCacheState UNINITIALIZED = InlineCacheState::UNINITIALIZED;
+constexpr InlineCacheState MONOMORPHIC = InlineCacheState::MONOMORPHIC;
+constexpr InlineCacheState RECOMPUTE_HANDLER =
+ InlineCacheState::RECOMPUTE_HANDLER;
+constexpr InlineCacheState POLYMORPHIC = InlineCacheState::POLYMORPHIC;
+constexpr InlineCacheState MEGAMORPHIC = InlineCacheState::MEGAMORPHIC;
+constexpr InlineCacheState MEGADOM = InlineCacheState::MEGADOM;
+constexpr InlineCacheState GENERIC = InlineCacheState::GENERIC;
+
char IC::TransitionMarkFromState(IC::State state) {
switch (state) {
case NO_FEEDBACK:
@@ -354,8 +366,8 @@ bool IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
// Even though we don't change the feedback data, we still want to reset the
// profiler ticks. Real-world observations suggest that optimizing these
// functions doesn't improve performance.
- bool changed =
- nexus()->ConfigureMegamorphic(key->IsName() ? PROPERTY : ELEMENT);
+ bool changed = nexus()->ConfigureMegamorphic(
+ key->IsName() ? IcCheckType::kProperty : IcCheckType::kElement);
OnFeedbackChanged("Megamorphic");
return changed;
}
@@ -461,7 +473,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name,
(name_string->length() == 0)
? isolate()->factory()->anonymous_string()
: name_string;
- return TypeError(MessageTemplate::kInvalidPrivateBrand, object,
+ return TypeError(MessageTemplate::kInvalidPrivateBrandInstance, object,
class_name);
}
return TypeError(MessageTemplate::kInvalidPrivateMemberRead, object,
@@ -1117,7 +1129,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
case LookupIterator::DATA: {
Handle<JSReceiver> holder = lookup->GetHolder<JSReceiver>();
- DCHECK_EQ(kData, lookup->property_details().kind());
+ DCHECK_EQ(PropertyKind::kData, lookup->property_details().kind());
Handle<Smi> smi_handler;
if (lookup->is_dictionary_holder()) {
if (holder->IsJSGlobalObject(isolate())) {
@@ -2111,7 +2123,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
DCHECK(!receiver->IsAccessCheckNeeded() || lookup->name()->IsPrivate());
- DCHECK_EQ(kData, lookup->property_details().kind());
+ DCHECK_EQ(PropertyKind::kData, lookup->property_details().kind());
if (lookup->is_dictionary_holder()) {
if (holder->IsJSGlobalObject()) {
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobalDH);
@@ -3139,7 +3151,7 @@ static bool CanFastCloneObject(Handle<Map> map) {
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descriptors.GetDetails(i);
Name key = descriptors.GetKey(i);
- if (details.kind() != kData || !details.IsEnumerable() ||
+ if (details.kind() != PropertyKind::kData || !details.IsEnumerable() ||
key.IsPrivateName()) {
return false;
}
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index bceb82e48d..e14dd7c17a 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -45,7 +45,7 @@ class IC {
void MarkRecomputeHandler(Handle<Object> name) {
DCHECK(RecomputeHandlerForName(name));
old_state_ = state_;
- state_ = RECOMPUTE_HANDLER;
+ state_ = InlineCacheState::RECOMPUTE_HANDLER;
}
bool IsAnyHas() const { return IsKeyedHasIC(); }
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index cbb48ea201..3df5943ddd 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -774,7 +774,7 @@ TNode<Map> KeyedStoreGenericAssembler::FindCandidateStoreICTransitionMapHandler(
// transition array is expected to be the first among the transitions
// with the same name.
// See TransitionArray::CompareDetails() for details.
- STATIC_ASSERT(kData == 0);
+ STATIC_ASSERT(static_cast<int>(PropertyKind::kData) == 0);
STATIC_ASSERT(NONE == 0);
const int kKeyToTargetOffset = (TransitionArray::kEntryTargetIndex -
TransitionArray::kEntryKeyIndex) *
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index 1c545d2461..947d8381d8 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -22,6 +22,7 @@
#include "src/extensions/statistics-extension.h"
#include "src/extensions/trigger-failure-extension.h"
#include "src/logging/runtime-call-stats-scope.h"
+#include "src/objects/instance-type.h"
#include "src/objects/objects.h"
#ifdef ENABLE_VTUNE_TRACEMARK
#include "src/extensions/vtunedomain-support-extension.h"
@@ -244,7 +245,7 @@ class Genesis {
Handle<JSFunction> InstallTypedArray(const char* name,
ElementsKind elements_kind,
- InstanceType type,
+ InstanceType constructor_type,
int rab_gsab_initial_map_index);
void InitializeMapCaches();
@@ -502,21 +503,30 @@ V8_NOINLINE Handle<JSFunction> InstallFunction(
instance_size, inobject_properties, prototype, call);
}
-// This installs an instance type (|constructor_type|) on the constructor map
-// which will be used for protector cell checks -- this is separate from |type|
-// which is used to set the instance type of the object created by this
-// constructor. If protector cell checks are not required, continue to use the
-// default JS_FUNCTION_TYPE by directly calling InstallFunction.
-V8_NOINLINE Handle<JSFunction> InstallConstructor(
- Isolate* isolate, Handle<JSObject> target, const char* name,
- InstanceType type, int instance_size, int inobject_properties,
- Handle<HeapObject> prototype, Builtin call, InstanceType constructor_type) {
- Handle<JSFunction> function = InstallFunction(
- isolate, target, isolate->factory()->InternalizeUtf8String(name), type,
- instance_size, inobject_properties, prototype, call);
+// This sets a constructor instance type on the constructor map which will be
+// used in IsXxxConstructor() predicates. Having such predicates helps figuring
+// out if a protector cell should be invalidated. If there are no protector
+// cell checks required for constructor, this function must not be used.
+// Note, this function doesn't create a copy of the constructor's map. So it's
+// better to set constructor instance type after all the properties are added
+// to the constructor and thus the map is already guaranteed to be unique.
+V8_NOINLINE void SetConstructorInstanceType(Isolate* isolate,
+ Handle<JSFunction> constructor,
+ InstanceType constructor_type) {
DCHECK(InstanceTypeChecker::IsJSFunction(constructor_type));
- function->map().set_instance_type(constructor_type);
- return function;
+ DCHECK_NE(constructor_type, JS_FUNCTION_TYPE);
+
+ Map map = constructor->map();
+
+ // Check we don't accidentally change one of the existing maps.
+ DCHECK_NE(map, *isolate->strict_function_map());
+ DCHECK_NE(map, *isolate->strict_function_with_readonly_prototype_map());
+ // Constructor function map is always a root map, and thus we don't have to
+ // deal with updating the whole transition tree.
+ DCHECK(map.GetBackPointer().IsUndefined(isolate));
+ DCHECK_EQ(JS_FUNCTION_TYPE, map.instance_type());
+
+ map.set_instance_type(constructor_type);
}
V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
@@ -828,13 +838,15 @@ void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
Handle<JSObject> object_function_prototype =
factory->NewFunctionPrototype(object_fun);
- Handle<Map> map =
- Map::Copy(isolate(), handle(object_function_prototype->map(), isolate()),
- "EmptyObjectPrototype");
- map->set_is_prototype_map(true);
- // Ban re-setting Object.prototype.__proto__ to prevent Proxy security bug
- map->set_is_immutable_proto(true);
- object_function_prototype->set_map(*map);
+ {
+ Handle<Map> map = Map::Copy(
+ isolate(), handle(object_function_prototype->map(), isolate()),
+ "EmptyObjectPrototype");
+ map->set_is_prototype_map(true);
+ // Ban re-setting Object.prototype.__proto__ to prevent Proxy security bug
+ map->set_is_immutable_proto(true);
+ object_function_prototype->set_map(*map);
+ }
// Complete setting up empty function.
{
@@ -1658,10 +1670,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> array_prototype_to_string_fun;
{ // --- A r r a y ---
- Handle<JSFunction> array_function = InstallConstructor(
+ Handle<JSFunction> array_function = InstallFunction(
isolate_, global, "Array", JS_ARRAY_TYPE, JSArray::kHeaderSize, 0,
- isolate_->initial_object_prototype(), Builtin::kArrayConstructor,
- JS_ARRAY_CONSTRUCTOR_TYPE);
+ isolate_->initial_object_prototype(), Builtin::kArrayConstructor);
array_function->shared().DontAdaptArguments();
// This seems a bit hackish, but we need to make sure Array.length
@@ -1707,6 +1718,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
1, false);
SimpleInstallFunction(isolate_, array_function, "of", Builtin::kArrayOf, 0,
false);
+ SetConstructorInstanceType(isolate_, array_function,
+ JS_ARRAY_CONSTRUCTOR_TYPE);
JSObject::AddProperty(isolate_, proto, factory->constructor_string(),
array_function, DONT_ENUM);
@@ -1898,7 +1911,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Install Number constants
const double kMaxValue = 1.7976931348623157e+308;
const double kMinValue = 5e-324;
- const double kMinSafeInteger = -kMaxSafeInteger;
const double kEPS = 2.220446049250313e-16;
InstallConstant(isolate_, number_fun, "MAX_VALUE",
@@ -2346,10 +2358,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- P r o m i s e
- Handle<JSFunction> promise_fun = InstallConstructor(
+ Handle<JSFunction> promise_fun = InstallFunction(
isolate_, global, "Promise", JS_PROMISE_TYPE,
JSPromise::kSizeWithEmbedderFields, 0, factory->the_hole_value(),
- Builtin::kPromiseConstructor, JS_PROMISE_CONSTRUCTOR_TYPE);
+ Builtin::kPromiseConstructor);
InstallWithIntrinsicDefaultProto(isolate_, promise_fun,
Context::PROMISE_FUNCTION_INDEX);
@@ -2379,6 +2391,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallFunctionWithBuiltinId(isolate_, promise_fun, "reject",
Builtin::kPromiseReject, 1, true);
+ SetConstructorInstanceType(isolate_, promise_fun,
+ JS_PROMISE_CONSTRUCTOR_TYPE);
+
// Setup %PromisePrototype%.
Handle<JSObject> prototype(
JSObject::cast(promise_fun->instance_prototype()), isolate());
@@ -2409,11 +2424,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- R e g E x p
// Builtin functions for RegExp.prototype.
- Handle<JSFunction> regexp_fun = InstallConstructor(
+ Handle<JSFunction> regexp_fun = InstallFunction(
isolate_, global, "RegExp", JS_REG_EXP_TYPE,
JSRegExp::kHeaderSize + JSRegExp::kInObjectFieldCount * kTaggedSize,
JSRegExp::kInObjectFieldCount, factory->the_hole_value(),
- Builtin::kRegExpConstructor, JS_REG_EXP_CONSTRUCTOR_TYPE);
+ Builtin::kRegExpConstructor);
InstallWithIntrinsicDefaultProto(isolate_, regexp_fun,
Context::REGEXP_FUNCTION_INDEX);
Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate_);
@@ -2574,6 +2589,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
INSTALL_CAPTURE_GETTER(9);
#undef INSTALL_CAPTURE_GETTER
}
+ SetConstructorInstanceType(isolate_, regexp_fun,
+ JS_REG_EXP_CONSTRUCTOR_TYPE);
DCHECK(regexp_fun->has_initial_map());
Handle<Map> initial_map(regexp_fun->initial_map(), isolate());
@@ -4020,7 +4037,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
ElementsKind elements_kind,
- InstanceType type,
+ InstanceType constructor_type,
int rab_gsab_initial_map_index) {
Handle<JSObject> global =
Handle<JSObject>(native_context()->global_object(), isolate());
@@ -4028,10 +4045,10 @@ Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
Handle<JSObject> typed_array_prototype = isolate()->typed_array_prototype();
Handle<JSFunction> typed_array_function = isolate()->typed_array_function();
- Handle<JSFunction> result = InstallConstructor(
+ Handle<JSFunction> result = InstallFunction(
isolate(), global, name, JS_TYPED_ARRAY_TYPE,
JSTypedArray::kSizeWithEmbedderFields, 0, factory()->the_hole_value(),
- Builtin::kTypedArrayConstructor, type);
+ Builtin::kTypedArrayConstructor);
result->initial_map().set_elements_kind(elements_kind);
result->shared().DontAdaptArguments();
@@ -4045,6 +4062,11 @@ Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
InstallConstant(isolate(), result, "BYTES_PER_ELEMENT", bytes_per_element);
+ // TODO(v8:11256, ishell): given the granularity of typed array contructor
+ // protectors, consider creating only one constructor instance type for all
+ // typed array constructors.
+ SetConstructorInstanceType(isolate_, result, constructor_type);
+
// Setup prototype object.
DCHECK(result->prototype().IsJSObject());
Handle<JSObject> prototype(JSObject::cast(result->prototype()), isolate());
@@ -4372,7 +4394,6 @@ void Genesis::InitializeCallSiteBuiltins() {
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_top_level_await)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_assertions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_brand_checks)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_static_blocks)
@@ -4380,9 +4401,6 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_error_cause)
#ifdef V8_INTL_SUPPORT
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_best_fit_matcher)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_displaynames_v2)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_dateformat_day_period)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_more_timezone)
#endif // V8_INTL_SUPPORT
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
@@ -5872,7 +5890,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
for (InternalIndex i : from->map().IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
if (details.location() == PropertyLocation::kField) {
- if (details.kind() == kData) {
+ if (details.kind() == PropertyKind::kData) {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i), isolate());
// If the property is already there we skip it.
@@ -5883,13 +5901,13 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
JSObject::AddProperty(isolate(), to, key, value,
details.attributes());
} else {
- DCHECK_EQ(kAccessor, details.kind());
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
UNREACHABLE();
}
} else {
DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
- DCHECK_EQ(kAccessor, details.kind());
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
Handle<Name> key(descs->GetKey(i), isolate());
// If the property is already there we skip it.
if (PropertyAlreadyExists(isolate(), to, key)) continue;
@@ -5897,7 +5915,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
DCHECK(!to->HasFastProperties());
// Add to dictionary.
Handle<Object> value(descs->GetStrongValue(i), isolate());
- PropertyDetails d(kAccessor, details.attributes(),
+ PropertyDetails d(PropertyKind::kAccessor, details.attributes(),
PropertyCellType::kMutable);
JSObject::SetNormalizedProperty(to, key, value, d);
}
@@ -5918,7 +5936,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<Object> value(cell->value(), isolate());
if (value->IsTheHole(isolate())) continue;
PropertyDetails details = cell->property_details();
- if (details.kind() != kData) continue;
+ if (details.kind() != PropertyKind::kData) continue;
JSObject::AddProperty(isolate(), to, key, value, details.attributes());
}
@@ -5941,7 +5959,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
DCHECK(!value->IsCell());
DCHECK(!value->IsTheHole(isolate()));
PropertyDetails details = properties->DetailsAt(entry);
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
JSObject::AddProperty(isolate(), to, key, value, details.attributes());
}
} else {
@@ -5965,7 +5983,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
DCHECK(!value->IsCell());
DCHECK(!value->IsTheHole(isolate()));
PropertyDetails details = properties->DetailsAt(key_index);
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
JSObject::AddProperty(isolate(), to, key, value, details.attributes());
}
}
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index f8af775712..2476fc5c6a 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -116,8 +116,7 @@
V(_, useGrouping_string, "useGrouping") \
V(_, unitDisplay_string, "unitDisplay") \
V(_, weekday_string, "weekday") \
- V(_, weekendEnd_string, "weekendEnd") \
- V(_, weekendStart_string, "weekendStart") \
+ V(_, weekend_string, "weekend") \
V(_, weekInfo_string, "weekInfo") \
V(_, yearName_string, "yearName")
#else // V8_INTL_SUPPORT
@@ -197,7 +196,7 @@
V(_, dot_home_object_string, ".home_object") \
V(_, dot_result_string, ".result") \
V(_, dot_repl_result_string, ".repl_result") \
- V(_, dot_static_home_object_string, "._static_home_object") \
+ V(_, dot_static_home_object_string, ".static_home_object") \
V(_, dot_string, ".") \
V(_, dot_switch_tag_string, ".switch_tag") \
V(_, dotAll_string, "dotAll") \
@@ -527,11 +526,13 @@
F(MC_EVACUATE_PROLOGUE) \
F(MC_EVACUATE_REBALANCE) \
F(MC_EVACUATE_UPDATE_POINTERS) \
+ F(MC_EVACUATE_UPDATE_POINTERS_CLIENT_HEAPS) \
F(MC_EVACUATE_UPDATE_POINTERS_PARALLEL) \
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MC_FINISH_SWEEP_ARRAY_BUFFERS) \
+ F(MC_MARK_CLIENT_HEAPS) \
F(MC_MARK_EMBEDDER_PROLOGUE) \
F(MC_MARK_EMBEDDER_TRACING) \
F(MC_MARK_EMBEDDER_TRACING_CLOSURE) \
@@ -575,6 +576,7 @@
F(MINOR_MC_MARKING_DEQUE) \
F(MINOR_MC_RESET_LIVENESS) \
F(MINOR_MC_SWEEPING) \
+ F(SAFEPOINT) \
F(SCAVENGER) \
F(SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS) \
F(SCAVENGER_FAST_PROMOTE) \
@@ -589,8 +591,10 @@
F(SCAVENGER_SCAVENGE_WEAK) \
F(SCAVENGER_SCAVENGE_FINALIZE) \
F(SCAVENGER_SWEEP_ARRAY_BUFFERS) \
+ F(TIME_TO_GLOBAL_SAFEPOINT) \
F(TIME_TO_SAFEPOINT) \
- F(UNMAPPER)
+ F(UNMAPPER) \
+ F(UNPARK)
#define TRACER_BACKGROUND_SCOPES(F) \
F(BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP) \
diff --git a/deps/v8/src/init/v8.cc b/deps/v8/src/init/v8.cc
index f7e16d369c..5172d5da9a 100644
--- a/deps/v8/src/init/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -48,12 +48,9 @@ V8_DECLARE_ONCE(init_snapshot_once);
v8::Platform* V8::platform_ = nullptr;
-bool V8::Initialize() {
- InitializeOncePerProcess();
- return true;
-}
+void V8::Initialize() { base::CallOnce(&init_once, &InitializeOncePerProcess); }
-void V8::TearDown() {
+void V8::Dispose() {
#if V8_ENABLE_WEBASSEMBLY
wasm::WasmEngine::GlobalTearDown();
#endif // V8_ENABLE_WEBASSEMBLY
@@ -73,7 +70,7 @@ void V8::TearDown() {
FLAG_##flag = false; \
}
-void V8::InitializeOncePerProcessImpl() {
+void V8::InitializeOncePerProcess() {
CHECK(platform_);
#ifdef V8_VIRTUAL_MEMORY_CAGE
@@ -206,10 +203,6 @@ void V8::InitializeOncePerProcessImpl() {
ExternalReferenceTable::InitializeOncePerProcess();
}
-void V8::InitializeOncePerProcess() {
- base::CallOnce(&init_once, &InitializeOncePerProcessImpl);
-}
-
void V8::InitializePlatform(v8::Platform* platform) {
CHECK(!platform_);
CHECK(platform);
@@ -228,12 +221,12 @@ void V8::InitializePlatform(v8::Platform* platform) {
bool V8::InitializeVirtualMemoryCage() {
// Platform must have been initialized already.
CHECK(platform_);
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- return GetProcessWideVirtualMemoryCage()->Initialize(page_allocator);
+ v8::VirtualAddressSpace* vas = GetPlatformVirtualAddressSpace();
+ return GetProcessWideVirtualMemoryCage()->Initialize(vas);
}
#endif
-void V8::ShutdownPlatform() {
+void V8::DisposePlatform() {
CHECK(platform_);
#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
if (FLAG_enable_system_instrumentation) {
diff --git a/deps/v8/src/init/v8.h b/deps/v8/src/init/v8.h
index bbde9bfd13..edd5be247d 100644
--- a/deps/v8/src/init/v8.h
+++ b/deps/v8/src/init/v8.h
@@ -20,8 +20,8 @@ class V8 : public AllStatic {
public:
// Global actions.
- static bool Initialize();
- static void TearDown();
+ static void Initialize();
+ static void Dispose();
// Report process out of memory. Implementation found in api.cc.
// This function will not return, but will terminate the execution.
@@ -34,7 +34,7 @@ class V8 : public AllStatic {
#endif
static void InitializePlatform(v8::Platform* platform);
- static void ShutdownPlatform();
+ static void DisposePlatform();
V8_EXPORT_PRIVATE static v8::Platform* GetCurrentPlatform();
// Replaces the current platform with the given platform.
// Should be used only for testing.
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index 08b97ea3e9..711f154947 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -19,6 +19,7 @@ include_rules = [
"+src/debug/debug-interface.h",
"+src/debug/interface-types.h",
"+src/base/vector.h",
+ "+src/base/enum-set.h",
"+third_party/inspector_protocol/crdtp",
"+../../third_party/inspector_protocol/crdtp",
]
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index 8b81d21e0d..7791d9e481 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -71,6 +71,10 @@ String16 toProtocolString(v8::Isolate*, v8::Local<v8::String>);
String16 toProtocolStringWithTypeCheck(v8::Isolate*, v8::Local<v8::Value>);
String16 toString16(const StringView&);
StringView toStringView(const String16&);
+template <size_t N>
+StringView toStringView(const char* str[N]) {
+ return StringView(reinterpret_cast<const uint8_t*>(str), N);
+}
bool stringViewStartsWith(const StringView&, const char*);
// Creates a string buffer instance which owns |str|, a 16 bit string.
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 9ee34f5d9e..21c7f5f3c4 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -351,6 +351,17 @@ Response isValidRangeOfPositions(std::vector<std::pair<int, int>>& positions) {
}
return Response::Success();
}
+
+bool hitBreakReasonEncodedAsOther(v8::debug::BreakReasons breakReasons) {
+ // The listed break reasons are not explicitly encoded in CDP when
+ // reporting the break. They are summarized as 'other'.
+ v8::debug::BreakReasons otherBreakReasons(
+ {v8::debug::BreakReason::kStep,
+ v8::debug::BreakReason::kDebuggerStatement,
+ v8::debug::BreakReason::kScheduled, v8::debug::BreakReason::kAsyncStep,
+ v8::debug::BreakReason::kAlreadyPaused});
+ return breakReasons.contains_any(otherBreakReasons);
+}
} // namespace
V8DebuggerAgentImpl::V8DebuggerAgentImpl(
@@ -382,7 +393,8 @@ void V8DebuggerAgentImpl::enableImpl() {
if (isPaused()) {
didPause(0, v8::Local<v8::Value>(), std::vector<v8::debug::BreakpointId>(),
- v8::debug::kException, false, false, false);
+ v8::debug::kException, false,
+ v8::debug::BreakReasons({v8::debug::BreakReason::kAlreadyPaused}));
}
}
@@ -1128,14 +1140,14 @@ void V8DebuggerAgentImpl::cancelPauseOnNextStatement() {
Response V8DebuggerAgentImpl::pause() {
if (!enabled()) return Response::ServerError(kDebuggerNotEnabled);
if (isPaused()) return Response::Success();
+
+ pushBreakDetails(protocol::Debugger::Paused::ReasonEnum::Other, nullptr);
if (m_debugger->canBreakProgram()) {
m_debugger->interruptAndBreak(m_session->contextGroupId());
} else {
- if (m_breakReason.empty()) {
- m_debugger->setPauseOnNextCall(true, m_session->contextGroupId());
- }
- pushBreakDetails(protocol::Debugger::Paused::ReasonEnum::Other, nullptr);
+ m_debugger->setPauseOnNextCall(true, m_session->contextGroupId());
}
+
return Response::Success();
}
@@ -1744,19 +1756,19 @@ void V8DebuggerAgentImpl::setScriptInstrumentationBreakpointIfNeeded(
void V8DebuggerAgentImpl::didPause(
int contextId, v8::Local<v8::Value> exception,
const std::vector<v8::debug::BreakpointId>& hitBreakpoints,
- v8::debug::ExceptionType exceptionType, bool isUncaught, bool isOOMBreak,
- bool isAssert) {
+ v8::debug::ExceptionType exceptionType, bool isUncaught,
+ v8::debug::BreakReasons breakReasons) {
v8::HandleScope handles(m_isolate);
std::vector<BreakReason> hitReasons;
- if (isOOMBreak) {
+ if (breakReasons.contains(v8::debug::BreakReason::kOOM)) {
hitReasons.push_back(
std::make_pair(protocol::Debugger::Paused::ReasonEnum::OOM, nullptr));
- } else if (isAssert) {
+ } else if (breakReasons.contains(v8::debug::BreakReason::kAssert)) {
hitReasons.push_back(std::make_pair(
protocol::Debugger::Paused::ReasonEnum::Assert, nullptr));
- } else if (!exception.IsEmpty()) {
+ } else if (breakReasons.contains(v8::debug::BreakReason::kException)) {
InjectedScript* injectedScript = nullptr;
m_session->findInjectedScript(contextId, injectedScript);
if (injectedScript) {
@@ -1782,7 +1794,7 @@ void V8DebuggerAgentImpl::didPause(
auto hitBreakpointIds = std::make_unique<Array<String16>>();
bool hitInstrumentationBreakpoint = false;
-
+ bool hitRegularBreakpoint = false;
for (const auto& id : hitBreakpoints) {
auto it = m_breakpointsOnScriptRun.find(id);
if (it != m_breakpointsOnScriptRun.end()) {
@@ -1808,9 +1820,12 @@ void V8DebuggerAgentImpl::didPause(
hitBreakpointIds->emplace_back(breakpointId);
BreakpointType type;
parseBreakpointId(breakpointId, &type);
- if (type != BreakpointType::kDebugCommand) continue;
- hitReasons.push_back(std::make_pair(
- protocol::Debugger::Paused::ReasonEnum::DebugCommand, nullptr));
+ if (type == BreakpointType::kDebugCommand) {
+ hitReasons.push_back(std::make_pair(
+ protocol::Debugger::Paused::ReasonEnum::DebugCommand, nullptr));
+ } else {
+ hitRegularBreakpoint = true;
+ }
}
for (size_t i = 0; i < m_breakReason.size(); ++i) {
@@ -1818,6 +1833,22 @@ void V8DebuggerAgentImpl::didPause(
}
clearBreakDetails();
+ // Make sure that we only include (other: nullptr) once.
+ const BreakReason otherHitReason =
+ std::make_pair(protocol::Debugger::Paused::ReasonEnum::Other, nullptr);
+ const bool otherBreakReasons =
+ hitRegularBreakpoint || hitBreakReasonEncodedAsOther(breakReasons);
+ if (otherBreakReasons && std::find(hitReasons.begin(), hitReasons.end(),
+ otherHitReason) == hitReasons.end()) {
+ hitReasons.push_back(
+ std::make_pair(protocol::Debugger::Paused::ReasonEnum::Other, nullptr));
+ }
+
+ // We should always know why we pause: either the pause relates to this agent
+ // (`hitReason` is non empty), or it relates to another agent (hit a
+ // breakpoint there, or a triggered pause was scheduled by other agent).
+ DCHECK(hitReasons.size() > 0 || !hitBreakpoints.empty() ||
+ breakReasons.contains(v8::debug::BreakReason::kAgent));
String16 breakReason = protocol::Debugger::Paused::ReasonEnum::Other;
std::unique_ptr<protocol::DictionaryValue> breakAuxData;
if (hitReasons.size() == 1) {
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index e54edc39f1..693d2bed91 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -10,9 +10,9 @@
#include <unordered_map>
#include <vector>
+#include "src/base/enum-set.h"
#include "src/base/macros.h"
#include "src/debug/debug-interface.h"
-#include "src/debug/interface-types.h"
#include "src/inspector/protocol/Debugger.h"
#include "src/inspector/protocol/Forward.h"
@@ -152,7 +152,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
void didPause(int contextId, v8::Local<v8::Value> exception,
const std::vector<v8::debug::BreakpointId>& hitBreakpoints,
v8::debug::ExceptionType exceptionType, bool isUncaught,
- bool isOOMBreak, bool isAssert);
+ v8::debug::BreakReasons breakReasons);
void didContinue();
void didParseSource(std::unique_ptr<V8DebuggerScript>, bool success);
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 8cd51ea1ba..e3f724b5a2 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -238,8 +238,8 @@ void V8Debugger::breakProgramOnAssert(int targetContextGroupId) {
if (!canBreakProgram()) return;
DCHECK(targetContextGroupId);
m_targetContextGroupId = targetContextGroupId;
- m_scheduledAssertBreak = true;
- v8::debug::BreakRightNow(m_isolate);
+ v8::debug::BreakRightNow(
+ m_isolate, v8::debug::BreakReasons({v8::debug::BreakReason::kAssert}));
}
void V8Debugger::stepIntoStatement(int targetContextGroupId,
@@ -397,6 +397,7 @@ void V8Debugger::clearContinueToLocation() {
void V8Debugger::handleProgramBreak(
v8::Local<v8::Context> pausedContext, v8::Local<v8::Value> exception,
const std::vector<v8::debug::BreakpointId>& breakpointIds,
+ v8::debug::BreakReasons breakReasons,
v8::debug::ExceptionType exceptionType, bool isUncaught) {
// Don't allow nested breaks.
if (isPaused()) return;
@@ -406,6 +407,16 @@ void V8Debugger::handleProgramBreak(
v8::debug::PrepareStep(m_isolate, v8::debug::StepOut);
return;
}
+
+ DCHECK(hasScheduledBreakOnNextFunctionCall() ==
+ (m_taskWithScheduledBreakPauseRequested ||
+ m_externalAsyncTaskPauseRequested || m_pauseOnNextCallRequested));
+ if (m_taskWithScheduledBreakPauseRequested ||
+ m_externalAsyncTaskPauseRequested)
+ breakReasons.Add(v8::debug::BreakReason::kAsyncStep);
+ if (m_pauseOnNextCallRequested)
+ breakReasons.Add(v8::debug::BreakReason::kAgent);
+
m_targetContextGroupId = 0;
m_pauseOnNextCallRequested = false;
m_pauseOnAsyncCall = false;
@@ -414,8 +425,10 @@ void V8Debugger::handleProgramBreak(
m_taskWithScheduledBreakPauseRequested = false;
bool scheduledOOMBreak = m_scheduledOOMBreak;
- bool scheduledAssertBreak = m_scheduledAssertBreak;
+ DCHECK(scheduledOOMBreak ==
+ breakReasons.contains(v8::debug::BreakReason::kOOM));
bool hasAgents = false;
+
m_inspector->forEachSession(
contextGroupId,
[&scheduledOOMBreak, &hasAgents](V8InspectorSessionImpl* session) {
@@ -434,84 +447,14 @@ void V8Debugger::handleProgramBreak(
DCHECK(contextGroupId);
m_pausedContextGroupId = contextGroupId;
- // Collect all instrumentation breakpoints.
- std::set<v8::debug::BreakpointId> instrumentationBreakpointIdSet;
- m_inspector->forEachSession(
- contextGroupId, [&breakpointIds, &instrumentationBreakpointIdSet](
- V8InspectorSessionImpl* session) {
- if (!session->debuggerAgent()->acceptsPause(false)) return;
-
- const std::vector<v8::debug::BreakpointId>
- sessionInstrumentationBreakpoints =
- session->debuggerAgent()->instrumentationBreakpointIdsMatching(
- breakpointIds);
-
- instrumentationBreakpointIdSet.insert(
- sessionInstrumentationBreakpoints.begin(),
- sessionInstrumentationBreakpoints.end());
- });
- std::vector<v8::debug::BreakpointId> instrumentationBreakpointIds(
- instrumentationBreakpointIdSet.begin(),
- instrumentationBreakpointIdSet.end());
-
- const bool regularBreakpointHit =
- instrumentationBreakpointIds.size() < breakpointIds.size();
- const bool hasNonInstrumentationBreakReason =
- regularBreakpointHit || hasScheduledBreakOnNextFunctionCall() ||
- scheduledAssertBreak || scheduledOOMBreak || !exception.IsEmpty();
-
- std::vector<v8::debug::BreakpointId> regularBreakpointIds = breakpointIds;
- if (hasNonInstrumentationBreakReason &&
- !instrumentationBreakpointIds.empty()) {
- // Send out pause events for instrumentation breakpoints.
- m_inspector->forEachSession(
- contextGroupId, [&pausedContext, &instrumentationBreakpointIds](
- V8InspectorSessionImpl* session) {
- if (!session->debuggerAgent()->acceptsPause(false)) return;
- session->debuggerAgent()->didPause(
- InspectedContext::contextId(pausedContext), {},
- instrumentationBreakpointIds,
- v8::debug::ExceptionType::kException, false, false, false);
- });
- {
- v8::Context::Scope scope(pausedContext);
- m_inspector->client()->runMessageLoopOnPause(contextGroupId);
- }
- m_inspector->forEachSession(contextGroupId,
- [](V8InspectorSessionImpl* session) {
- if (session->debuggerAgent()->enabled()) {
- session->debuggerAgent()->didContinue();
- }
- });
-
- // Remove instrumentation breakpoints from regular breakpoints, as they
- // have already been reported.
- for (const v8::debug::BreakpointId& breakpointId :
- instrumentationBreakpointIds) {
- auto iter = std::find(regularBreakpointIds.begin(),
- regularBreakpointIds.end(), breakpointId);
- if (iter != regularBreakpointIds.end()) {
- regularBreakpointIds.erase(iter);
- }
- }
- }
-
- // If instrumentation breakpoints did coincide with other known reasons, then
- // the remaining reasons are summarized in the following pause event.
- // If we, however, do NOT know whether instrumentation breakpoints coincided
- // with other reasons (hasNonInstrumentationBreakReason == false), then send
- // instrumentation breakpoints here. The reason for this is that we do not
- // want to trigger two pause events if we only break because of an
- // instrumentation.
m_inspector->forEachSession(
- contextGroupId, [&pausedContext, &exception, &regularBreakpointIds,
- &exceptionType, &isUncaught, &scheduledOOMBreak,
- &scheduledAssertBreak](V8InspectorSessionImpl* session) {
+ contextGroupId,
+ [&pausedContext, &exception, &breakpointIds, &exceptionType, &isUncaught,
+ &scheduledOOMBreak, &breakReasons](V8InspectorSessionImpl* session) {
if (session->debuggerAgent()->acceptsPause(scheduledOOMBreak)) {
session->debuggerAgent()->didPause(
InspectedContext::contextId(pausedContext), exception,
- regularBreakpointIds, exceptionType, isUncaught,
- scheduledOOMBreak, scheduledAssertBreak);
+ breakpointIds, exceptionType, isUncaught, breakReasons);
}
});
{
@@ -527,7 +470,6 @@ void V8Debugger::handleProgramBreak(
if (m_scheduledOOMBreak) m_isolate->RestoreOriginalHeapLimit();
m_scheduledOOMBreak = false;
- m_scheduledAssertBreak = false;
}
namespace {
@@ -550,7 +492,14 @@ size_t V8Debugger::nearHeapLimitCallback(void* data, size_t current_heap_limit,
thisPtr->m_targetContextGroupId =
context.IsEmpty() ? 0 : thisPtr->m_inspector->contextGroupId(context);
thisPtr->m_isolate->RequestInterrupt(
- [](v8::Isolate* isolate, void*) { v8::debug::BreakRightNow(isolate); },
+ [](v8::Isolate* isolate, void*) {
+ // There's a redundancy between setting `m_scheduledOOMBreak` and
+ // passing the reason along in `BreakRightNow`. The
+ // `m_scheduledOOMBreak` is used elsewhere, so we cannot remove it. And
+ // for being explicit, we still pass the break reason along.
+ v8::debug::BreakRightNow(
+ isolate, v8::debug::BreakReasons({v8::debug::BreakReason::kOOM}));
+ },
nullptr);
return HeapLimitForDebugging(initial_heap_limit);
}
@@ -580,8 +529,10 @@ void V8Debugger::ScriptCompiled(v8::Local<v8::debug::Script> script,
void V8Debugger::BreakProgramRequested(
v8::Local<v8::Context> pausedContext,
- const std::vector<v8::debug::BreakpointId>& break_points_hit) {
- handleProgramBreak(pausedContext, v8::Local<v8::Value>(), break_points_hit);
+ const std::vector<v8::debug::BreakpointId>& break_points_hit,
+ v8::debug::BreakReasons reasons) {
+ handleProgramBreak(pausedContext, v8::Local<v8::Value>(), break_points_hit,
+ reasons);
}
void V8Debugger::ExceptionThrown(v8::Local<v8::Context> pausedContext,
@@ -589,8 +540,10 @@ void V8Debugger::ExceptionThrown(v8::Local<v8::Context> pausedContext,
v8::Local<v8::Value> promise, bool isUncaught,
v8::debug::ExceptionType exceptionType) {
std::vector<v8::debug::BreakpointId> break_points_hit;
- handleProgramBreak(pausedContext, exception, break_points_hit, exceptionType,
- isUncaught);
+ handleProgramBreak(
+ pausedContext, exception, break_points_hit,
+ v8::debug::BreakReasons({v8::debug::BreakReason::kException}),
+ exceptionType, isUncaught);
}
bool V8Debugger::IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
@@ -641,15 +594,15 @@ void V8Debugger::AsyncEventOccurred(v8::debug::DebugAsyncActionType type,
void* task = reinterpret_cast<void*>(id * 2 + 1);
switch (type) {
case v8::debug::kDebugPromiseThen:
- asyncTaskScheduledForStack("Promise.then", task, false);
+ asyncTaskScheduledForStack(toStringView("Promise.then"), task, false);
if (!isBlackboxed) asyncTaskCandidateForStepping(task);
break;
case v8::debug::kDebugPromiseCatch:
- asyncTaskScheduledForStack("Promise.catch", task, false);
+ asyncTaskScheduledForStack(toStringView("Promise.catch"), task, false);
if (!isBlackboxed) asyncTaskCandidateForStepping(task);
break;
case v8::debug::kDebugPromiseFinally:
- asyncTaskScheduledForStack("Promise.finally", task, false);
+ asyncTaskScheduledForStack(toStringView("Promise.finally"), task, false);
if (!isBlackboxed) asyncTaskCandidateForStepping(task);
break;
case v8::debug::kDebugWillHandle:
@@ -662,7 +615,7 @@ void V8Debugger::AsyncEventOccurred(v8::debug::DebugAsyncActionType type,
break;
case v8::debug::kAsyncFunctionSuspended: {
if (m_asyncTaskStacks.find(task) == m_asyncTaskStacks.end()) {
- asyncTaskScheduledForStack("await", task, true, true);
+ asyncTaskScheduledForStack(toStringView("await"), task, true, true);
}
auto stackIt = m_asyncTaskStacks.find(task);
if (stackIt != m_asyncTaskStacks.end() && !stackIt->second.expired()) {
@@ -961,7 +914,7 @@ void V8Debugger::externalAsyncTaskFinished(const V8StackTraceId& parent) {
void V8Debugger::asyncTaskScheduled(const StringView& taskName, void* task,
bool recurring) {
- asyncTaskScheduledForStack(toString16(taskName), task, recurring);
+ asyncTaskScheduledForStack(taskName, task, recurring);
asyncTaskCandidateForStepping(task);
}
@@ -980,13 +933,13 @@ void V8Debugger::asyncTaskFinished(void* task) {
asyncTaskFinishedForStack(task);
}
-void V8Debugger::asyncTaskScheduledForStack(const String16& taskName,
+void V8Debugger::asyncTaskScheduledForStack(const StringView& taskName,
void* task, bool recurring,
bool skipTopFrame) {
if (!m_maxAsyncCallStackDepth) return;
v8::HandleScope scope(m_isolate);
std::shared_ptr<AsyncStackTrace> asyncStack = AsyncStackTrace::capture(
- this, taskName, V8StackTraceImpl::maxCallStackSizeToCapture,
+ this, toString16(taskName), V8StackTraceImpl::maxCallStackSizeToCapture,
skipTopFrame);
if (asyncStack) {
m_asyncTaskStacks[task] = asyncStack;
@@ -1124,6 +1077,7 @@ void V8Debugger::collectOldAsyncStacksIfNeeded() {
m_allAsyncStacks.pop_front();
}
cleanupExpiredWeakPointers(m_asyncTaskStacks);
+ cleanupExpiredWeakPointers(m_cachedStackFrames);
cleanupExpiredWeakPointers(m_storedStackTraces);
for (auto it = m_recurringTasks.begin(); it != m_recurringTasks.end();) {
if (m_asyncTaskStacks.find(*it) == m_asyncTaskStacks.end()) {
@@ -1136,8 +1090,30 @@ void V8Debugger::collectOldAsyncStacksIfNeeded() {
std::shared_ptr<StackFrame> V8Debugger::symbolize(
v8::Local<v8::StackFrame> v8Frame) {
- CHECK(!v8Frame.IsEmpty());
- return std::make_shared<StackFrame>(isolate(), v8Frame);
+ int scriptId = v8Frame->GetScriptId();
+ int lineNumber = v8Frame->GetLineNumber() - 1;
+ int columnNumber = v8Frame->GetColumn() - 1;
+ CachedStackFrameKey key{scriptId, lineNumber, columnNumber};
+ auto functionName = toProtocolString(isolate(), v8Frame->GetFunctionName());
+ auto it = m_cachedStackFrames.find(key);
+ if (it != m_cachedStackFrames.end() && !it->second.expired()) {
+ auto stackFrame = it->second.lock();
+ if (stackFrame->functionName() == functionName) {
+ DCHECK_EQ(
+ stackFrame->sourceURL(),
+ toProtocolString(isolate(), v8Frame->GetScriptNameOrSourceURL()));
+ return stackFrame;
+ }
+ }
+ auto sourceURL =
+ toProtocolString(isolate(), v8Frame->GetScriptNameOrSourceURL());
+ auto hasSourceURLComment =
+ v8Frame->GetScriptName() != v8Frame->GetScriptNameOrSourceURL();
+ auto stackFrame = std::make_shared<StackFrame>(
+ std::move(functionName), scriptId, std::move(sourceURL), lineNumber,
+ columnNumber, hasSourceURLComment);
+ m_cachedStackFrames.emplace(key, stackFrame);
+ return stackFrame;
}
void V8Debugger::setMaxAsyncTaskStacksForTest(int limit) {
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index c5095b6505..6394cfc63d 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -142,6 +142,7 @@ class V8Debugger : public v8::debug::DebugDelegate,
void handleProgramBreak(
v8::Local<v8::Context> pausedContext, v8::Local<v8::Value> exception,
const std::vector<v8::debug::BreakpointId>& hitBreakpoints,
+ v8::debug::BreakReasons break_reasons,
v8::debug::ExceptionType exception_type = v8::debug::kException,
bool isUncaught = false);
@@ -160,7 +161,7 @@ class V8Debugger : public v8::debug::DebugDelegate,
v8::MaybeLocal<v8::Array> collectionsEntries(v8::Local<v8::Context> context,
v8::Local<v8::Value> value);
- void asyncTaskScheduledForStack(const String16& taskName, void* task,
+ void asyncTaskScheduledForStack(const StringView& taskName, void* task,
bool recurring, bool skipTopFrame = false);
void asyncTaskCanceledForStack(void* task);
void asyncTaskStartedForStack(void* task);
@@ -178,7 +179,8 @@ class V8Debugger : public v8::debug::DebugDelegate,
bool has_compile_error) override;
void BreakProgramRequested(
v8::Local<v8::Context> paused_context,
- const std::vector<v8::debug::BreakpointId>& break_points_hit) override;
+ const std::vector<v8::debug::BreakpointId>& break_points_hit,
+ v8::debug::BreakReasons break_reasons) override;
void ExceptionThrown(v8::Local<v8::Context> paused_context,
v8::Local<v8::Value> exception,
v8::Local<v8::Value> promise, bool is_uncaught,
@@ -203,13 +205,42 @@ class V8Debugger : public v8::debug::DebugDelegate,
int m_ignoreScriptParsedEventsCounter;
size_t m_originalHeapLimit = 0;
bool m_scheduledOOMBreak = false;
- bool m_scheduledAssertBreak = false;
int m_targetContextGroupId = 0;
int m_pausedContextGroupId = 0;
int m_continueToLocationBreakpointId;
String16 m_continueToLocationTargetCallFrames;
std::unique_ptr<V8StackTraceImpl> m_continueToLocationStack;
+ // We cache symbolized stack frames by (scriptId,lineNumber,columnNumber)
+ // to reduce memory pressure for huge web apps with lots of deep async
+ // stacks.
+ struct CachedStackFrameKey {
+ int scriptId;
+ int lineNumber;
+ int columnNumber;
+
+ struct Equal {
+ bool operator()(CachedStackFrameKey const& a,
+ CachedStackFrameKey const& b) const {
+ return a.scriptId == b.scriptId && a.lineNumber == b.lineNumber &&
+ a.columnNumber == b.columnNumber;
+ }
+ };
+
+ struct Hash {
+ size_t operator()(CachedStackFrameKey const& key) const {
+ size_t code = 0;
+ code = code * 31 + key.scriptId;
+ code = code * 31 + key.lineNumber;
+ code = code * 31 + key.columnNumber;
+ return code;
+ }
+ };
+ };
+ std::unordered_map<CachedStackFrameKey, std::weak_ptr<StackFrame>,
+ CachedStackFrameKey::Hash, CachedStackFrameKey::Equal>
+ m_cachedStackFrames;
+
using AsyncTaskToStackTrace =
std::unordered_map<void*, std::weak_ptr<AsyncStackTrace>>;
AsyncTaskToStackTrace m_asyncTaskStacks;
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index b1b584c363..56291b2775 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -168,16 +168,15 @@ std::unique_ptr<StringBuffer> V8StackTraceId::ToString() {
return StringBufferFrom(std::move(json));
}
-StackFrame::StackFrame(v8::Isolate* isolate, v8::Local<v8::StackFrame> v8Frame)
- : m_functionName(
- toProtocolString(isolate, v8::debug::GetFunctionDebugName(v8Frame))),
- m_scriptId(v8Frame->GetScriptId()),
- m_sourceURL(
- toProtocolString(isolate, v8Frame->GetScriptNameOrSourceURL())),
- m_lineNumber(v8Frame->GetLineNumber() - 1),
- m_columnNumber(v8Frame->GetColumn() - 1),
- m_hasSourceURLComment(v8Frame->GetScriptName() !=
- v8Frame->GetScriptNameOrSourceURL()) {
+StackFrame::StackFrame(String16&& functionName, int scriptId,
+ String16&& sourceURL, int lineNumber, int columnNumber,
+ bool hasSourceURLComment)
+ : m_functionName(std::move(functionName)),
+ m_scriptId(scriptId),
+ m_sourceURL(std::move(sourceURL)),
+ m_lineNumber(lineNumber),
+ m_columnNumber(columnNumber),
+ m_hasSourceURLComment(hasSourceURLComment) {
DCHECK_NE(v8::Message::kNoLineNumberInfo, m_lineNumber + 1);
DCHECK_NE(v8::Message::kNoColumnInfo, m_columnNumber + 1);
}
@@ -332,11 +331,6 @@ V8StackTraceImpl::buildInspectorObjectImpl(V8Debugger* debugger,
}
std::unique_ptr<protocol::Runtime::API::StackTrace>
-V8StackTraceImpl::buildInspectorObject() const {
- return buildInspectorObjectImpl(nullptr);
-}
-
-std::unique_ptr<protocol::Runtime::API::StackTrace>
V8StackTraceImpl::buildInspectorObject(int maxAsyncDepth) const {
return buildInspectorObjectImpl(nullptr,
std::min(maxAsyncDepth, m_maxAsyncDepth));
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index 8cefffee12..ec8ee90737 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -27,7 +27,8 @@ struct V8StackTraceId;
class StackFrame {
public:
- explicit StackFrame(v8::Isolate* isolate, v8::Local<v8::StackFrame> frame);
+ StackFrame(String16&& functionName, int scriptId, String16&& sourceURL,
+ int lineNumber, int columnNumber, bool hasSourceURLComment);
~StackFrame() = default;
const String16& functionName() const;
@@ -78,8 +79,6 @@ class V8StackTraceImpl : public V8StackTrace {
int topColumnNumber() const override; // 1-based.
int topScriptId() const override;
StringView topFunctionName() const override;
- std::unique_ptr<protocol::Runtime::API::StackTrace> buildInspectorObject()
- const override;
std::unique_ptr<protocol::Runtime::API::StackTrace> buildInspectorObject(
int maxAsyncDepth) const override;
std::unique_ptr<StringBuffer> toString() const override;
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 611933026d..3001a56356 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -1474,103 +1474,15 @@ std::vector<PrivatePropertyMirror> ValueMirror::getPrivateProperties(
return mirrors;
}
-String16 descriptionForNode(v8::Local<v8::Context> context,
- v8::Local<v8::Value> value) {
- if (!value->IsObject()) return String16();
- v8::Local<v8::Object> object = value.As<v8::Object>();
- v8::Isolate* isolate = context->GetIsolate();
- v8::TryCatch tryCatch(isolate);
- v8::Local<v8::Value> nodeName;
- if (!object->Get(context, toV8String(isolate, "nodeName"))
- .ToLocal(&nodeName)) {
- return String16();
- }
- String16 description;
- v8::Local<v8::Function> toLowerCase =
- v8::debug::GetBuiltin(isolate, v8::debug::kStringToLowerCase);
- if (nodeName->IsString()) {
- if (!toLowerCase->Call(context, nodeName, 0, nullptr).ToLocal(&nodeName))
- return String16();
- if (nodeName->IsString()) {
- description = toProtocolString(isolate, nodeName.As<v8::String>());
- }
- }
- if (!description.length()) {
- v8::Local<v8::Value> constructor;
- if (!object->Get(context, toV8String(isolate, "constructor"))
- .ToLocal(&constructor) ||
- !constructor->IsObject()) {
- return String16();
- }
- if (!value.As<v8::Object>()
- ->Get(context, toV8String(isolate, "name"))
- .ToLocal(&value) ||
- !value->IsString()) {
- return String16();
- }
- description = toProtocolString(isolate, value.As<v8::String>());
- }
- v8::Local<v8::Value> nodeType;
- if (!object->Get(context, toV8String(isolate, "nodeType"))
- .ToLocal(&nodeType) ||
- !nodeType->IsInt32()) {
- return description;
- }
- if (nodeType.As<v8::Int32>()->Value() == 1) {
- v8::Local<v8::Value> idValue;
- if (!object->Get(context, toV8String(isolate, "id")).ToLocal(&idValue)) {
- return description;
- }
- if (idValue->IsString()) {
- String16 id = toProtocolString(isolate, idValue.As<v8::String>());
- if (id.length()) {
- description = String16::concat(description, '#', id);
- }
- }
- v8::Local<v8::Value> classNameValue;
- if (!object->Get(context, toV8String(isolate, "className"))
- .ToLocal(&classNameValue)) {
- return description;
- }
- if (classNameValue->IsString() &&
- classNameValue.As<v8::String>()->Length()) {
- String16 classes =
- toProtocolString(isolate, classNameValue.As<v8::String>());
- String16Builder output;
- bool previousIsDot = false;
- for (size_t i = 0; i < classes.length(); ++i) {
- if (classes[i] == ' ') {
- if (!previousIsDot) {
- output.append('.');
- previousIsDot = true;
- }
- } else {
- output.append(classes[i]);
- previousIsDot = classes[i] == '.';
- }
- }
- description = String16::concat(description, '.', output.toString());
- }
- } else if (nodeType.As<v8::Int32>()->Value() == 1) {
- return String16::concat("<!DOCTYPE ", description, '>');
- }
- return description;
-}
-
std::unique_ptr<ValueMirror> clientMirror(v8::Local<v8::Context> context,
v8::Local<v8::Value> value,
const String16& subtype) {
- // TODO(alph): description and length retrieval should move to embedder.
auto descriptionForValueSubtype =
clientFor(context)->descriptionForValueSubtype(context, value);
if (descriptionForValueSubtype) {
return std::make_unique<ObjectMirror>(
value, subtype, toString16(descriptionForValueSubtype->string()));
}
- if (subtype == "node") {
- return std::make_unique<ObjectMirror>(value, subtype,
- descriptionForNode(context, value));
- }
if (subtype == "error") {
return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Error,
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 7f5f09d71c..8e90cd7d0d 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -501,6 +501,17 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
SetStatementPosition(stmt->position());
}
+ BytecodeSourceInfo PopSourcePosition() {
+ BytecodeSourceInfo source_info = latest_source_info_;
+ latest_source_info_.set_invalid();
+ return source_info;
+ }
+
+ void PushSourcePosition(BytecodeSourceInfo source_info) {
+ DCHECK(!latest_source_info_.is_valid());
+ latest_source_info_ = source_info;
+ }
+
void SetStatementPosition(int position) {
if (position == kNoSourcePosition) return;
latest_source_info_.MakeStatementPosition(position);
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 58c46bedeb..b7da127253 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -164,6 +164,8 @@ void BytecodeArrayWriter::BindLabel(BytecodeLabel* label) {
void BytecodeArrayWriter::BindLoopHeader(BytecodeLoopHeader* loop_header) {
size_t current_offset = bytecodes()->size();
loop_header->bind_to(current_offset);
+ // Don't start a basic block when the entire loop is dead.
+ if (exit_seen_in_block_) return;
StartBasicBlock();
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 2046787788..4acf248c4d 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -17,6 +17,8 @@
#include "src/codegen/compiler.h"
#include "src/codegen/unoptimized-compilation-info.h"
#include "src/common/globals.h"
+#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
+#include "src/heap/parked-scope.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
@@ -1130,10 +1132,12 @@ static bool IsInEagerLiterals(
#endif // DEBUG
BytecodeGenerator::BytecodeGenerator(
- Zone* compile_zone, UnoptimizedCompilationInfo* info,
+ LocalIsolate* local_isolate, Zone* compile_zone,
+ UnoptimizedCompilationInfo* info,
const AstStringConstants* ast_string_constants,
- std::vector<FunctionLiteral*>* eager_inner_literals)
- : zone_(compile_zone),
+ std::vector<FunctionLiteral*>* eager_inner_literals, Handle<Script> script)
+ : local_isolate_(local_isolate),
+ zone_(compile_zone),
builder_(zone(), info->num_parameters_including_this(),
info->scope()->num_stack_slots(), info->feedback_vector_spec(),
info->SourcePositionRecordingMode()),
@@ -1142,6 +1146,7 @@ BytecodeGenerator::BytecodeGenerator(
closure_scope_(info->scope()),
current_scope_(info->scope()),
eager_inner_literals_(eager_inner_literals),
+ script_(script),
feedback_slot_cache_(zone()->New<FeedbackSlotCache>(zone())),
top_level_builder_(zone()->New<TopLevelDeclarationsBuilder>()),
block_coverage_builder_(nullptr),
@@ -1358,10 +1363,6 @@ bool NeedsContextInitialization(DeclarationScope* scope) {
} // namespace
void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
- DisallowGarbageCollection no_gc;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
-
InitializeAstVisitor(stack_limit);
// Initialize the incoming context.
@@ -2132,6 +2133,8 @@ void BytecodeGenerator::BuildTryCatch(
TryBodyFunc try_body_func, CatchBodyFunc catch_body_func,
HandlerTable::CatchPrediction catch_prediction,
TryCatchStatement* stmt_for_coverage) {
+ if (builder()->RemainderOfBlockIsDead()) return;
+
TryCatchBuilder try_control_builder(
builder(),
stmt_for_coverage == nullptr ? nullptr : block_coverage_builder_,
@@ -2162,6 +2165,8 @@ void BytecodeGenerator::BuildTryFinally(
TryBodyFunc try_body_func, FinallyBodyFunc finally_body_func,
HandlerTable::CatchPrediction catch_prediction,
TryFinallyStatement* stmt_for_coverage) {
+ if (builder()->RemainderOfBlockIsDead()) return;
+
// We can't know whether the finally block will override ("catch") an
// exception thrown in the try block, so we just adopt the outer prediction.
TryFinallyBuilder try_control_builder(
@@ -2514,8 +2519,35 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
}
void BytecodeGenerator::AddToEagerLiteralsIfEager(FunctionLiteral* literal) {
- if (eager_inner_literals_ && literal->ShouldEagerCompile()) {
+ // Only parallel compile when there's a script (not the case for source
+ // position collection).
+ if (!script_.is_null() && literal->should_parallel_compile()) {
+ // If we should normally be eagerly compiling this function, we must be here
+ // because of post_parallel_compile_tasks_for_eager_toplevel.
+ DCHECK_IMPLIES(
+ literal->ShouldEagerCompile(),
+ info()->flags().post_parallel_compile_tasks_for_eager_toplevel());
+ // There exists a lazy compile dispatcher.
+ DCHECK(info()->dispatcher());
+ // There exists a cloneable character stream.
+ DCHECK(info()->character_stream()->can_be_cloned_for_parallel_access());
+
+ UnparkedScope scope(local_isolate_);
+ // If there doesn't already exist a SharedFunctionInfo for this function,
+ // then create one and enqueue it. Otherwise, we're reparsing (e.g. for the
+ // debugger, source position collection, call printing, recompile after
+ // flushing, etc.) and don't want to over-compile.
+ Handle<SharedFunctionInfo> shared_info;
+ if (!Script::FindSharedFunctionInfo(script_, local_isolate_, literal)
+ .ToHandle(&shared_info)) {
+ shared_info =
+ Compiler::GetSharedFunctionInfo(literal, script_, local_isolate_);
+ info()->dispatcher()->Enqueue(local_isolate_, shared_info,
+ info()->character_stream()->Clone());
+ }
+ } else if (eager_inner_literals_ && literal->ShouldEagerCompile()) {
DCHECK(!IsInEagerLiterals(literal, *eager_inner_literals_));
+ DCHECK(!literal->should_parallel_compile());
eager_inner_literals_->push_back(literal);
}
}
@@ -2760,8 +2792,14 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr, Register name) {
CurrentScope current_scope(this, expr->scope());
DCHECK_NOT_NULL(expr->scope());
if (expr->scope()->NeedsContext()) {
+ // Make sure to associate the source position for the class
+ // after the block context is created. Otherwise we have a mismatch
+ // between the scope and the context, where we already are in a
+ // block context for the class, but not yet in the class scope.
+ BytecodeSourceInfo source_info = builder()->PopSourcePosition();
BuildNewLocalBlockContext(expr->scope());
ContextScope scope(this, expr->scope());
+ builder()->PushSourcePosition(source_info);
BuildClassLiteral(expr, name);
} else {
BuildClassLiteral(expr, name);
@@ -2775,6 +2813,7 @@ void BytecodeGenerator::BuildClassProperty(ClassLiteral::Property* property) {
// Private methods are not initialized in BuildClassProperty.
DCHECK_IMPLIES(property->is_private(),
property->kind() == ClassLiteral::Property::FIELD);
+ builder()->SetExpressionPosition(property->key());
bool is_literal_store = property->key()->IsPropertyName() &&
!property->is_computed_name() &&
@@ -3835,9 +3874,9 @@ BytecodeGenerator::AssignmentLhsData::NamedSuperProperty(
// static
BytecodeGenerator::AssignmentLhsData
BytecodeGenerator::AssignmentLhsData::PrivateMethodOrAccessor(
- AssignType type, Property* property) {
- return AssignmentLhsData(type, property, RegisterList(), Register(),
- Register(), nullptr, nullptr);
+ AssignType type, Property* property, Register object, Register key) {
+ return AssignmentLhsData(type, property, RegisterList(), object, key, nullptr,
+ nullptr);
}
// static
BytecodeGenerator::AssignmentLhsData
@@ -3875,7 +3914,14 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
case PRIVATE_SETTER_ONLY:
case PRIVATE_GETTER_AND_SETTER: {
DCHECK(!property->IsSuperAccess());
- return AssignmentLhsData::PrivateMethodOrAccessor(assign_type, property);
+ AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
+ Register object = VisitForRegisterValue(property->obj());
+ Register key =
+ assign_type == PRIVATE_GETTER_ONLY || assign_type == PRIVATE_METHOD
+ ? Register()
+ : VisitForRegisterValue(property->key());
+ return AssignmentLhsData::PrivateMethodOrAccessor(assign_type, property,
+ object, key);
}
case NAMED_SUPER_PROPERTY: {
AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
@@ -4431,11 +4477,15 @@ void BytecodeGenerator::BuildAssignment(
break;
}
case PRIVATE_METHOD: {
+ Property* property = lhs_data.expr()->AsProperty();
+ BuildPrivateBrandCheck(property, lhs_data.object());
BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateMethodWrite,
lhs_data.expr()->AsProperty());
break;
}
case PRIVATE_GETTER_ONLY: {
+ Property* property = lhs_data.expr()->AsProperty();
+ BuildPrivateBrandCheck(property, lhs_data.object());
BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateSetterAccess,
lhs_data.expr()->AsProperty());
break;
@@ -4445,11 +4495,8 @@ void BytecodeGenerator::BuildAssignment(
Register value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
Property* property = lhs_data.expr()->AsProperty();
- Register object = VisitForRegisterValue(property->obj());
- Register key = VisitForRegisterValue(property->key());
- BuildPrivateBrandCheck(property, object,
- MessageTemplate::kInvalidPrivateMemberWrite);
- BuildPrivateSetterAccess(object, key, value);
+ BuildPrivateBrandCheck(property, lhs_data.object());
+ BuildPrivateSetterAccess(lhs_data.object(), lhs_data.key(), value);
if (!execution_result()->IsEffect()) {
builder()->LoadAccumulatorWithRegister(value);
}
@@ -4504,9 +4551,7 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
// The property access is invalid, but if the brand check fails too, we
// need to return the error from the brand check.
Property* property = lhs_data.expr()->AsProperty();
- Register object = VisitForRegisterValue(property->obj());
- BuildPrivateBrandCheck(property, object,
- MessageTemplate::kInvalidPrivateMemberRead);
+ BuildPrivateBrandCheck(property, lhs_data.object());
BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateMethodWrite,
lhs_data.expr()->AsProperty());
break;
@@ -4515,9 +4560,7 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
// The property access is invalid, but if the brand check fails too, we
// need to return the error from the brand check.
Property* property = lhs_data.expr()->AsProperty();
- Register object = VisitForRegisterValue(property->obj());
- BuildPrivateBrandCheck(property, object,
- MessageTemplate::kInvalidPrivateMemberRead);
+ BuildPrivateBrandCheck(property, lhs_data.object());
BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateSetterAccess,
lhs_data.expr()->AsProperty());
@@ -4527,20 +4570,15 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
// The property access is invalid, but if the brand check fails too, we
// need to return the error from the brand check.
Property* property = lhs_data.expr()->AsProperty();
- Register object = VisitForRegisterValue(property->obj());
- BuildPrivateBrandCheck(property, object,
- MessageTemplate::kInvalidPrivateMemberRead);
+ BuildPrivateBrandCheck(property, lhs_data.object());
BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateGetterAccess,
lhs_data.expr()->AsProperty());
break;
}
case PRIVATE_GETTER_AND_SETTER: {
Property* property = lhs_data.expr()->AsProperty();
- Register object = VisitForRegisterValue(property->obj());
- Register key = VisitForRegisterValue(property->key());
- BuildPrivateBrandCheck(property, object,
- MessageTemplate::kInvalidPrivateMemberRead);
- BuildPrivateGetterAccess(object, key);
+ BuildPrivateBrandCheck(property, lhs_data.object());
+ BuildPrivateGetterAccess(lhs_data.object(), lhs_data.key());
break;
}
}
@@ -5032,6 +5070,7 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
VisitKeyedSuperPropertyLoad(property, Register::invalid_value());
break;
case PRIVATE_SETTER_ONLY: {
+ BuildPrivateBrandCheck(property, obj);
BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateGetterAccess,
property);
break;
@@ -5039,14 +5078,12 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
case PRIVATE_GETTER_ONLY:
case PRIVATE_GETTER_AND_SETTER: {
Register key = VisitForRegisterValue(property->key());
- BuildPrivateBrandCheck(property, obj,
- MessageTemplate::kInvalidPrivateMemberRead);
+ BuildPrivateBrandCheck(property, obj);
BuildPrivateGetterAccess(obj, key);
break;
}
case PRIVATE_METHOD: {
- BuildPrivateBrandCheck(property, obj,
- MessageTemplate::kInvalidPrivateMemberRead);
+ BuildPrivateBrandCheck(property, obj);
// In the case of private methods, property->key() is the function to be
// loaded (stored in a context slot), so load this directly.
VisitForAccumulatorValue(property->key());
@@ -5145,11 +5182,11 @@ void BytecodeGenerator::BuildPrivateMethodIn(Variable* private_name,
}
void BytecodeGenerator::BuildPrivateBrandCheck(Property* property,
- Register object,
- MessageTemplate tmpl) {
+ Register object) {
Variable* private_name = property->key()->AsVariableProxy()->var();
DCHECK(IsPrivateMethodOrAccessorVariableMode(private_name->mode()));
ClassScope* scope = private_name->scope()->AsClassScope();
+ builder()->SetExpressionPosition(property);
if (private_name->is_static()) {
// For static private methods, the only valid receiver is the class.
// Load the class constructor.
@@ -5179,13 +5216,21 @@ void BytecodeGenerator::BuildPrivateBrandCheck(Property* property,
BytecodeLabel return_check;
builder()->CompareReference(object).JumpIfTrue(
ToBooleanMode::kAlreadyBoolean, &return_check);
- BuildInvalidPropertyAccess(tmpl, property);
+ const AstRawString* name = scope->class_variable()->raw_name();
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadLiteral(
+ Smi::FromEnum(MessageTemplate::kInvalidPrivateBrandStatic))
+ .StoreAccumulatorInRegister(args[0])
+ .LoadLiteral(name)
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kNewTypeError, args)
+ .Throw();
builder()->Bind(&return_check);
}
} else {
BuildVariableLoadForAccumulatorValue(scope->brand(),
HoleCheckMode::kElided);
- builder()->SetExpressionPosition(property);
builder()->LoadKeyedProperty(
object, feedback_index(feedback_spec()->AddKeyedLoadICSlot()));
}
@@ -5886,16 +5931,22 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case PRIVATE_METHOD: {
+ object = VisitForRegisterValue(property->obj());
+ BuildPrivateBrandCheck(property, object);
BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateMethodWrite,
property);
return;
}
case PRIVATE_GETTER_ONLY: {
+ object = VisitForRegisterValue(property->obj());
+ BuildPrivateBrandCheck(property, object);
BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateSetterAccess,
property);
return;
}
case PRIVATE_SETTER_ONLY: {
+ object = VisitForRegisterValue(property->obj());
+ BuildPrivateBrandCheck(property, object);
BuildInvalidPropertyAccess(MessageTemplate::kInvalidPrivateGetterAccess,
property);
return;
@@ -5903,8 +5954,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
case PRIVATE_GETTER_AND_SETTER: {
object = VisitForRegisterValue(property->obj());
key = VisitForRegisterValue(property->key());
- BuildPrivateBrandCheck(property, object,
- MessageTemplate::kInvalidPrivateMemberRead);
+ BuildPrivateBrandCheck(property, object);
BuildPrivateGetterAccess(object, key);
break;
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index d3cc86acf5..1c11cbbb50 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -32,9 +32,10 @@ class BytecodeJumpTable;
class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
public:
explicit BytecodeGenerator(
- Zone* zone, UnoptimizedCompilationInfo* info,
+ LocalIsolate* local_isolate, Zone* zone, UnoptimizedCompilationInfo* info,
const AstStringConstants* ast_string_constants,
- std::vector<FunctionLiteral*>* eager_inner_literals);
+ std::vector<FunctionLiteral*>* eager_inner_literals,
+ Handle<Script> script);
void GenerateBytecode(uintptr_t stack_limit);
template <typename IsolateT>
@@ -98,7 +99,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
const AstRawString* name);
static AssignmentLhsData KeyedProperty(Register object, Register key);
static AssignmentLhsData PrivateMethodOrAccessor(AssignType type,
- Property* property);
+ Property* property,
+ Register object,
+ Register key);
static AssignmentLhsData NamedSuperProperty(
RegisterList super_property_args);
static AssignmentLhsData KeyedSuperProperty(
@@ -117,11 +120,17 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
return object_expr_;
}
Register object() const {
- DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == KEYED_PROPERTY);
+ DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == KEYED_PROPERTY ||
+ assign_type_ == PRIVATE_METHOD ||
+ assign_type_ == PRIVATE_GETTER_ONLY ||
+ assign_type_ == PRIVATE_SETTER_ONLY ||
+ assign_type_ == PRIVATE_GETTER_AND_SETTER);
return object_;
}
Register key() const {
- DCHECK(assign_type_ == KEYED_PROPERTY);
+ DCHECK(assign_type_ == KEYED_PROPERTY ||
+ assign_type_ == PRIVATE_SETTER_ONLY ||
+ assign_type_ == PRIVATE_GETTER_AND_SETTER);
return key_;
}
const AstRawString* name() const {
@@ -311,8 +320,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
void BuildInvalidPropertyAccess(MessageTemplate tmpl, Property* property);
- void BuildPrivateBrandCheck(Property* property, Register object,
- MessageTemplate tmpl);
+ void BuildPrivateBrandCheck(Property* property, Register object);
void BuildPrivateMethodIn(Variable* private_name,
Expression* object_expression);
void BuildPrivateGetterAccess(Register obj, Register access_pair);
@@ -492,6 +500,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
current_loop_scope_ = loop_scope;
}
+ LocalIsolate* local_isolate_;
Zone* zone_;
BytecodeArrayBuilder builder_;
UnoptimizedCompilationInfo* info_;
@@ -501,6 +510,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// External vector of literals to be eagerly compiled.
std::vector<FunctionLiteral*>* eager_inner_literals_;
+ Handle<Script> script_;
FeedbackSlotCache* feedback_slot_cache_;
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index cba90c7893..fe635115f6 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -1108,18 +1108,67 @@ void InterpreterAssembler::JumpConditional(TNode<BoolT> condition,
Dispatch();
}
+void InterpreterAssembler::JumpConditionalByImmediateOperand(
+ TNode<BoolT> condition, int operand_index) {
+ Label match(this), no_match(this);
+
+ Branch(condition, &match, &no_match);
+ BIND(&match);
+ TNode<IntPtrT> jump_offset = Signed(BytecodeOperandUImmWord(operand_index));
+ Jump(jump_offset);
+ BIND(&no_match);
+ Dispatch();
+}
+
+void InterpreterAssembler::JumpConditionalByConstantOperand(
+ TNode<BoolT> condition, int operand_index) {
+ Label match(this), no_match(this);
+
+ Branch(condition, &match, &no_match);
+ BIND(&match);
+ TNode<IntPtrT> jump_offset =
+ LoadAndUntagConstantPoolEntryAtOperandIndex(operand_index);
+ Jump(jump_offset);
+ BIND(&no_match);
+ Dispatch();
+}
+
void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs,
TNode<Object> rhs,
TNode<IntPtrT> jump_offset) {
JumpConditional(TaggedEqual(lhs, rhs), jump_offset);
}
+void InterpreterAssembler::JumpIfTaggedEqual(TNode<Object> lhs,
+ TNode<Object> rhs,
+ int operand_index) {
+ JumpConditionalByImmediateOperand(TaggedEqual(lhs, rhs), operand_index);
+}
+
+void InterpreterAssembler::JumpIfTaggedEqualConstant(TNode<Object> lhs,
+ TNode<Object> rhs,
+ int operand_index) {
+ JumpConditionalByConstantOperand(TaggedEqual(lhs, rhs), operand_index);
+}
+
void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs,
TNode<Object> rhs,
TNode<IntPtrT> jump_offset) {
JumpConditional(TaggedNotEqual(lhs, rhs), jump_offset);
}
+void InterpreterAssembler::JumpIfTaggedNotEqual(TNode<Object> lhs,
+ TNode<Object> rhs,
+ int operand_index) {
+ JumpConditionalByImmediateOperand(TaggedNotEqual(lhs, rhs), operand_index);
+}
+
+void InterpreterAssembler::JumpIfTaggedNotEqualConstant(TNode<Object> lhs,
+ TNode<Object> rhs,
+ int operand_index) {
+ JumpConditionalByConstantOperand(TaggedNotEqual(lhs, rhs), operand_index);
+}
+
TNode<WordT> InterpreterAssembler::LoadBytecode(
TNode<IntPtrT> bytecode_offset) {
TNode<Uint8T> bytecode =
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index d89c05e2d3..9855dedda3 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -204,11 +204,33 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void JumpIfTaggedEqual(TNode<Object> lhs, TNode<Object> rhs,
TNode<IntPtrT> jump_offset);
+ // Jump forward relative to the current bytecode by offest specified in
+ // operand |operand_index| if the word values |lhs| and |rhs| are equal.
+ void JumpIfTaggedEqual(TNode<Object> lhs, TNode<Object> rhs,
+ int operand_index);
+
+ // Jump forward relative to the current bytecode by offest specified from the
+ // constant pool if the word values |lhs| and |rhs| are equal.
+ // The constant's index is specified in operand |operand_index|.
+ void JumpIfTaggedEqualConstant(TNode<Object> lhs, TNode<Object> rhs,
+ int operand_index);
+
// Jump forward relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are not equal.
void JumpIfTaggedNotEqual(TNode<Object> lhs, TNode<Object> rhs,
TNode<IntPtrT> jump_offset);
+ // Jump forward relative to the current bytecode by offest specified in
+ // operand |operand_index| if the word values |lhs| and |rhs| are not equal.
+ void JumpIfTaggedNotEqual(TNode<Object> lhs, TNode<Object> rhs,
+ int operand_index);
+
+ // Jump forward relative to the current bytecode by offest specified from the
+ // constant pool if the word values |lhs| and |rhs| are not equal.
+ // The constant's index is specified in operand |operand_index|.
+ void JumpIfTaggedNotEqualConstant(TNode<Object> lhs, TNode<Object> rhs,
+ int operand_index);
+
// Updates the profiler interrupt budget for a return.
void UpdateInterruptBudgetOnReturn();
@@ -345,6 +367,19 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// JumpIfTaggedNotEqual.
void JumpConditional(TNode<BoolT> condition, TNode<IntPtrT> jump_offset);
+ // Jump forward relative to the current bytecode by offest specified in
+ // operand |operand_index| if the |condition| is true. Helper function for
+ // JumpIfTaggedEqual and JumpIfTaggedNotEqual.
+ void JumpConditionalByImmediateOperand(TNode<BoolT> condition,
+ int operand_index);
+
+ // Jump forward relative to the current bytecode by offest specified from the
+ // constant pool if the |condition| is true. The constant's index is specified
+ // in operand |operand_index|. Helper function for JumpIfTaggedEqualConstant
+ // and JumpIfTaggedNotEqualConstant.
+ void JumpConditionalByConstantOperand(TNode<BoolT> condition,
+ int operand_index);
+
// Save the bytecode offset to the interpreter frame.
void SaveBytecodeOffset();
// Reload the bytecode offset from the interpreter frame.
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 0054445e83..b32804a6fd 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -999,7 +999,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
BinaryOpAssembler binop_asm(state());
TNode<Object> result = binop_asm.Generate_BitwiseBinaryOpWithFeedback(
- bitwise_op, left, right, [=] { return context; }, &feedback);
+ bitwise_op, left, right, [=] { return context; }, &feedback, false);
MaybeUpdateFeedback(feedback.value(), maybe_feedback_vector, slot_index);
SetAccumulator(result);
@@ -1013,29 +1013,15 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TVARIABLE(Smi, var_left_feedback);
- TVARIABLE(Word32T, var_left_word32);
- TVARIABLE(BigInt, var_left_bigint);
- Label do_smi_op(this), if_bigint_mix(this);
-
- TaggedToWord32OrBigIntWithFeedback(context, left, &do_smi_op,
- &var_left_word32, &if_bigint_mix,
- &var_left_bigint, &var_left_feedback);
- BIND(&do_smi_op);
- TNode<Number> result =
- BitwiseOp(var_left_word32.value(), SmiToInt32(right), bitwise_op);
- TNode<Smi> result_type = SelectSmiConstant(
- TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
- BinaryOperationFeedback::kNumber);
- MaybeUpdateFeedback(SmiOr(result_type, var_left_feedback.value()),
- maybe_feedback_vector, slot_index);
+ TVARIABLE(Smi, feedback);
+
+ BinaryOpAssembler binop_asm(state());
+ TNode<Object> result = binop_asm.Generate_BitwiseBinaryOpWithFeedback(
+ bitwise_op, left, right, [=] { return context; }, &feedback, true);
+
+ MaybeUpdateFeedback(feedback.value(), maybe_feedback_vector, slot_index);
SetAccumulator(result);
Dispatch();
-
- BIND(&if_bigint_mix);
- MaybeUpdateFeedback(var_left_feedback.value(), maybe_feedback_vector,
- slot_index);
- ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
}
};
@@ -1919,9 +1905,8 @@ IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
// will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
- JumpIfTaggedEqual(accumulator, TrueConstant(), relative_jump);
+ JumpIfTaggedEqual(accumulator, TrueConstant(), 0);
}
// JumpIfTrueConstant <idx>
@@ -1931,9 +1916,8 @@ IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
// and will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
- JumpIfTaggedEqual(accumulator, TrueConstant(), relative_jump);
+ JumpIfTaggedEqualConstant(accumulator, TrueConstant(), 0);
}
// JumpIfFalse <imm>
@@ -1943,9 +1927,8 @@ IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
// will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
- JumpIfTaggedEqual(accumulator, FalseConstant(), relative_jump);
+ JumpIfTaggedEqual(accumulator, FalseConstant(), 0);
}
// JumpIfFalseConstant <idx>
@@ -1955,9 +1938,8 @@ IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
// and will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
CSA_DCHECK(this, IsBoolean(CAST(accumulator)));
- JumpIfTaggedEqual(accumulator, FalseConstant(), relative_jump);
+ JumpIfTaggedEqualConstant(accumulator, FalseConstant(), 0);
}
// JumpIfToBooleanTrue <imm>
@@ -1966,10 +1948,10 @@ IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
// referenced by the accumulator is true when the object is cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanTrue, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Jump(relative_jump);
BIND(&if_false);
Dispatch();
@@ -1982,10 +1964,10 @@ IGNITION_HANDLER(JumpIfToBooleanTrue, InterpreterAssembler) {
// cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanTrueConstant, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
+ TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Jump(relative_jump);
BIND(&if_false);
Dispatch();
@@ -1997,12 +1979,12 @@ IGNITION_HANDLER(JumpIfToBooleanTrueConstant, InterpreterAssembler) {
// referenced by the accumulator is false when the object is cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanFalse, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
Dispatch();
BIND(&if_false);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Jump(relative_jump);
}
@@ -2013,12 +1995,12 @@ IGNITION_HANDLER(JumpIfToBooleanFalse, InterpreterAssembler) {
// cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) {
TNode<Object> value = GetAccumulator();
- TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
BIND(&if_true);
Dispatch();
BIND(&if_false);
+ TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Jump(relative_jump);
}
@@ -2028,8 +2010,7 @@ IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) {
// referenced by the accumulator is the null constant.
IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
- JumpIfTaggedEqual(accumulator, NullConstant(), relative_jump);
+ JumpIfTaggedEqual(accumulator, NullConstant(), 0);
}
// JumpIfNullConstant <idx>
@@ -2038,8 +2019,7 @@ IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
// pool if the object referenced by the accumulator is the null constant.
IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
- JumpIfTaggedEqual(accumulator, NullConstant(), relative_jump);
+ JumpIfTaggedEqualConstant(accumulator, NullConstant(), 0);
}
// JumpIfNotNull <imm>
@@ -2048,8 +2028,7 @@ IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
// referenced by the accumulator is not the null constant.
IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
- JumpIfTaggedNotEqual(accumulator, NullConstant(), relative_jump);
+ JumpIfTaggedNotEqual(accumulator, NullConstant(), 0);
}
// JumpIfNotNullConstant <idx>
@@ -2058,8 +2037,7 @@ IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
// pool if the object referenced by the accumulator is not the null constant.
IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
- JumpIfTaggedNotEqual(accumulator, NullConstant(), relative_jump);
+ JumpIfTaggedNotEqualConstant(accumulator, NullConstant(), 0);
}
// JumpIfUndefined <imm>
@@ -2068,8 +2046,7 @@ IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
// referenced by the accumulator is the undefined constant.
IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
- JumpIfTaggedEqual(accumulator, UndefinedConstant(), relative_jump);
+ JumpIfTaggedEqual(accumulator, UndefinedConstant(), 0);
}
// JumpIfUndefinedConstant <idx>
@@ -2078,8 +2055,7 @@ IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
// pool if the object referenced by the accumulator is the undefined constant.
IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
- JumpIfTaggedEqual(accumulator, UndefinedConstant(), relative_jump);
+ JumpIfTaggedEqualConstant(accumulator, UndefinedConstant(), 0);
}
// JumpIfNotUndefined <imm>
@@ -2088,8 +2064,7 @@ IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
// referenced by the accumulator is not the undefined constant.
IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
- JumpIfTaggedNotEqual(accumulator, UndefinedConstant(), relative_jump);
+ JumpIfTaggedNotEqual(accumulator, UndefinedConstant(), 0);
}
// JumpIfNotUndefinedConstant <idx>
@@ -2099,8 +2074,7 @@ IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
// constant.
IGNITION_HANDLER(JumpIfNotUndefinedConstant, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
- JumpIfTaggedNotEqual(accumulator, UndefinedConstant(), relative_jump);
+ JumpIfTaggedNotEqualConstant(accumulator, UndefinedConstant(), 0);
}
// JumpIfUndefinedOrNull <imm>
@@ -2144,7 +2118,6 @@ IGNITION_HANDLER(JumpIfUndefinedOrNullConstant, InterpreterAssembler) {
// referenced by the accumulator is a JSReceiver.
IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Label if_object(this), if_notobject(this, Label::kDeferred), if_notsmi(this);
Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
@@ -2152,6 +2125,7 @@ IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) {
BIND(&if_notsmi);
Branch(IsJSReceiver(CAST(accumulator)), &if_object, &if_notobject);
BIND(&if_object);
+ TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
Jump(relative_jump);
BIND(&if_notobject);
@@ -2164,7 +2138,6 @@ IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) {
// pool if the object referenced by the accumulator is a JSReceiver.
IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
TNode<Object> accumulator = GetAccumulator();
- TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Label if_object(this), if_notobject(this), if_notsmi(this);
Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
@@ -2173,6 +2146,7 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
Branch(IsJSReceiver(CAST(accumulator)), &if_object, &if_notobject);
BIND(&if_object);
+ TNode<IntPtrT> relative_jump = LoadAndUntagConstantPoolEntryAtOperandIndex(0);
Jump(relative_jump);
BIND(&if_notobject);
@@ -2639,12 +2613,8 @@ IGNITION_HANDLER(CreateRestParameter, InterpreterAssembler) {
// Sets the pending message to the value in the accumulator, and returns the
// previous pending message in the accumulator.
IGNITION_HANDLER(SetPendingMessage, InterpreterAssembler) {
- TNode<ExternalReference> pending_message = ExternalConstant(
- ExternalReference::address_of_pending_message(isolate()));
- TNode<HeapObject> previous_message =
- UncheckedCast<HeapObject>(LoadFullTagged(pending_message));
- TNode<Object> new_message = GetAccumulator();
- StoreFullTaggedNoWriteBarrier(pending_message, new_message);
+ TNode<HeapObject> previous_message = GetPendingMessage();
+ SetPendingMessage(CAST(GetAccumulator()));
SetAccumulator(previous_message);
Dispatch();
}
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 88d7706c72..26fe890914 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -35,6 +35,7 @@ namespace interpreter {
class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
public:
InterpreterCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal,
+ Handle<Script> script,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals,
LocalIsolate* local_isolate);
@@ -170,7 +171,7 @@ bool ShouldPrintBytecode(Handle<SharedFunctionInfo> shared) {
} // namespace
InterpreterCompilationJob::InterpreterCompilationJob(
- ParseInfo* parse_info, FunctionLiteral* literal,
+ ParseInfo* parse_info, FunctionLiteral* literal, Handle<Script> script,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals,
LocalIsolate* local_isolate)
@@ -179,8 +180,9 @@ InterpreterCompilationJob::InterpreterCompilationJob(
zone_(allocator, ZONE_NAME),
compilation_info_(&zone_, parse_info, literal),
local_isolate_(local_isolate),
- generator_(&zone_, &compilation_info_, parse_info->ast_string_constants(),
- eager_inner_literals) {}
+ generator_(local_isolate, &zone_, &compilation_info_,
+ parse_info->ast_string_constants(), eager_inner_literals,
+ script) {}
InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
RCS_SCOPE(parse_info()->runtime_call_stats(),
@@ -196,8 +198,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
MaybePrintAst(parse_info(), compilation_info());
}
- base::Optional<ParkedScope> parked_scope;
- if (local_isolate_) parked_scope.emplace(local_isolate_);
+ ParkedScope parked_scope(local_isolate_);
generator()->GenerateBytecode(stack_limit());
@@ -303,12 +304,13 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::DoFinalizeJobImpl(
}
std::unique_ptr<UnoptimizedCompilationJob> Interpreter::NewCompilationJob(
- ParseInfo* parse_info, FunctionLiteral* literal,
+ ParseInfo* parse_info, FunctionLiteral* literal, Handle<Script> script,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals,
LocalIsolate* local_isolate) {
return std::make_unique<InterpreterCompilationJob>(
- parse_info, literal, allocator, eager_inner_literals, local_isolate);
+ parse_info, literal, script, allocator, eager_inner_literals,
+ local_isolate);
}
std::unique_ptr<UnoptimizedCompilationJob>
@@ -317,7 +319,7 @@ Interpreter::NewSourcePositionCollectionJob(
Handle<BytecodeArray> existing_bytecode, AccountingAllocator* allocator,
LocalIsolate* local_isolate) {
auto job = std::make_unique<InterpreterCompilationJob>(
- parse_info, literal, allocator, nullptr, local_isolate);
+ parse_info, literal, Handle<Script>(), allocator, nullptr, local_isolate);
job->compilation_info()->SetBytecodeArray(existing_bytecode);
return job;
}
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 9daa886e65..2210f78ee3 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -46,7 +46,7 @@ class Interpreter {
// Additionally, if |eager_inner_literals| is not null, adds any eagerly
// compilable inner FunctionLiterals to this list.
static std::unique_ptr<UnoptimizedCompilationJob> NewCompilationJob(
- ParseInfo* parse_info, FunctionLiteral* literal,
+ ParseInfo* parse_info, FunctionLiteral* literal, Handle<Script> script,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals,
LocalIsolate* local_isolate);
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
index e1086d68ea..74a8046b2e 100644
--- a/deps/v8/src/json/json-parser.cc
+++ b/deps/v8/src/json/json-parser.cc
@@ -965,9 +965,10 @@ Handle<Object> JsonParser<Char>::ParseJsonNumber() {
}
base::Vector<const Char> chars(start, cursor_ - start);
- number = StringToDouble(chars,
- NO_FLAGS, // Hex, octal or trailing junk.
- std::numeric_limits<double>::quiet_NaN());
+ number =
+ StringToDouble(chars,
+ NO_CONVERSION_FLAGS, // Hex, octal or trailing junk.
+ std::numeric_limits<double>::quiet_NaN());
DCHECK(!std::isnan(number));
}
diff --git a/deps/v8/src/json/json-parser.h b/deps/v8/src/json/json-parser.h
index 4819f9d64e..227b01fe74 100644
--- a/deps/v8/src/json/json-parser.h
+++ b/deps/v8/src/json/json-parser.h
@@ -8,6 +8,7 @@
#include "include/v8-callbacks.h"
#include "src/base/small-vector.h"
#include "src/base/strings.h"
+#include "src/common/high-allocation-throughput-scope.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/objects/objects.h"
@@ -145,6 +146,8 @@ class JsonParser final {
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Parse(
Isolate* isolate, Handle<String> source, Handle<Object> reviver) {
+ HighAllocationThroughputScope high_throughput_scope(
+ V8::GetCurrentPlatform());
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
JsonParser(isolate, source).ParseJson(), Object);
diff --git a/deps/v8/src/json/json-stringifier.cc b/deps/v8/src/json/json-stringifier.cc
index 2a71ccf26f..8dd3118447 100644
--- a/deps/v8/src/json/json-stringifier.cc
+++ b/deps/v8/src/json/json-stringifier.cc
@@ -102,6 +102,7 @@ class JsonStringifier {
V8_INLINE static bool DoNotEscape(Char c);
V8_INLINE void NewLine();
+ V8_NOINLINE void NewLineOutline();
V8_INLINE void Indent() { indent_++; }
V8_INLINE void Unindent() { indent_--; }
V8_INLINE void Separator(bool first);
@@ -383,8 +384,10 @@ JsonStringifier::Result JsonStringifier::StackPush(Handle<Object> object,
{
DisallowGarbageCollection no_gc;
- for (size_t i = 0; i < stack_.size(); ++i) {
- if (*stack_[i].second == *object) {
+ Object raw_obj = *object;
+ size_t size = stack_.size();
+ for (size_t i = 0; i < size; ++i) {
+ if (*stack_[i].second == raw_obj) {
AllowGarbageCollection allow_to_return_error;
Handle<String> circle_description =
ConstructCircularStructureErrorMessage(key, i);
@@ -408,26 +411,26 @@ class CircularStructureMessageBuilder {
void AppendStartLine(Handle<Object> start_object) {
builder_.AppendCString(kStartPrefix);
- builder_.AppendCString("starting at object with constructor ");
+ builder_.AppendCStringLiteral("starting at object with constructor ");
AppendConstructorName(start_object);
}
void AppendNormalLine(Handle<Object> key, Handle<Object> object) {
builder_.AppendCString(kLinePrefix);
AppendKey(key);
- builder_.AppendCString(" -> object with constructor ");
+ builder_.AppendCStringLiteral(" -> object with constructor ");
AppendConstructorName(object);
}
void AppendClosingLine(Handle<Object> closing_key) {
builder_.AppendCString(kEndPrefix);
AppendKey(closing_key);
- builder_.AppendCString(" closes the circle");
+ builder_.AppendCStringLiteral(" closes the circle");
}
void AppendEllipsis() {
builder_.AppendCString(kLinePrefix);
- builder_.AppendCString("...");
+ builder_.AppendCStringLiteral("...");
}
MaybeHandle<String> Finish() { return builder_.Finish(); }
@@ -444,7 +447,7 @@ class CircularStructureMessageBuilder {
// A key can either be a string, the empty string or a Smi.
void AppendKey(Handle<Object> key) {
if (key->IsSmi()) {
- builder_.AppendCString("index ");
+ builder_.AppendCStringLiteral("index ");
AppendSmi(Smi::cast(*key));
return;
}
@@ -452,9 +455,9 @@ class CircularStructureMessageBuilder {
CHECK(key->IsString());
Handle<String> key_as_string = Handle<String>::cast(key);
if (key_as_string->length() == 0) {
- builder_.AppendCString("<anonymous>");
+ builder_.AppendCStringLiteral("<anonymous>");
} else {
- builder_.AppendCString("property '");
+ builder_.AppendCStringLiteral("property '");
builder_.AppendString(key_as_string);
builder_.AppendCharacter('\'');
}
@@ -517,14 +520,21 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
bool comma,
Handle<Object> key) {
StackLimitCheck interrupt_check(isolate_);
- Handle<Object> initial_value = object;
if (interrupt_check.InterruptRequested() &&
isolate_->stack_guard()->HandleInterrupts().IsException(isolate_)) {
return EXCEPTION;
}
- if (object->IsJSReceiver() || object->IsBigInt()) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, object, ApplyToJsonFunction(object, key), EXCEPTION);
+
+ Handle<Object> initial_value = object;
+ PtrComprCageBase cage_base(isolate_);
+ if (!object->IsSmi()) {
+ InstanceType instance_type =
+ HeapObject::cast(*object).map(cage_base).instance_type();
+ if (InstanceTypeChecker::IsJSReceiver(instance_type) ||
+ InstanceTypeChecker::IsBigInt(instance_type)) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, object, ApplyToJsonFunction(object, key), EXCEPTION);
+ }
}
if (!replacer_function_.is_null()) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -537,7 +547,9 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
return SerializeSmi(Smi::cast(*object));
}
- switch (HeapObject::cast(*object).map().instance_type()) {
+ InstanceType instance_type =
+ HeapObject::cast(*object).map(cage_base).instance_type();
+ switch (instance_type) {
case HEAP_NUMBER_TYPE:
if (deferred_string_key) SerializeDeferredKey(comma, key);
return SerializeHeapNumber(Handle<HeapNumber>::cast(object));
@@ -549,15 +561,15 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
switch (Oddball::cast(*object).kind()) {
case Oddball::kFalse:
if (deferred_string_key) SerializeDeferredKey(comma, key);
- builder_.AppendCString("false");
+ builder_.AppendCStringLiteral("false");
return SUCCESS;
case Oddball::kTrue:
if (deferred_string_key) SerializeDeferredKey(comma, key);
- builder_.AppendCString("true");
+ builder_.AppendCStringLiteral("true");
return SUCCESS;
case Oddball::kNull:
if (deferred_string_key) SerializeDeferredKey(comma, key);
- builder_.AppendCString("null");
+ builder_.AppendCStringLiteral("null");
return SUCCESS;
default:
return UNCHANGED;
@@ -572,16 +584,16 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
case SYMBOL_TYPE:
return UNCHANGED;
default:
- if (object->IsString()) {
+ if (InstanceTypeChecker::IsString(instance_type)) {
if (deferred_string_key) SerializeDeferredKey(comma, key);
SerializeString(Handle<String>::cast(object));
return SUCCESS;
} else {
DCHECK(object->IsJSReceiver());
- if (object->IsCallable()) return UNCHANGED;
+ if (HeapObject::cast(*object).IsCallable(cage_base)) return UNCHANGED;
// Go to slow path for global proxy and objects requiring access checks.
if (deferred_string_key) SerializeDeferredKey(comma, key);
- if (object->IsJSProxy()) {
+ if (InstanceTypeChecker::IsJSProxy(instance_type)) {
return SerializeJSProxy(Handle<JSProxy>::cast(object), key);
}
return SerializeJSObject(Handle<JSObject>::cast(object), key);
@@ -610,7 +622,11 @@ JsonStringifier::Result JsonStringifier::SerializeJSPrimitiveWrapper(
*factory()->NewTypeError(MessageTemplate::kBigIntSerializeJSON));
return EXCEPTION;
} else if (raw.IsBoolean()) {
- builder_.AppendCString(raw.IsTrue(isolate_) ? "true" : "false");
+ if (raw.IsTrue(isolate_)) {
+ builder_.AppendCStringLiteral("true");
+ } else {
+ builder_.AppendCStringLiteral("false");
+ }
} else {
// ES6 24.3.2.1 step 10.c, serialize as an ordinary JSObject.
return SerializeJSObject(object, key);
@@ -628,7 +644,7 @@ JsonStringifier::Result JsonStringifier::SerializeSmi(Smi object) {
JsonStringifier::Result JsonStringifier::SerializeDouble(double number) {
if (std::isinf(number) || std::isnan(number)) {
- builder_.AppendCString("null");
+ builder_.AppendCStringLiteral("null");
return SUCCESS;
}
static const int kBufferSize = 100;
@@ -640,76 +656,90 @@ JsonStringifier::Result JsonStringifier::SerializeDouble(double number) {
JsonStringifier::Result JsonStringifier::SerializeJSArray(
Handle<JSArray> object, Handle<Object> key) {
- HandleScope handle_scope(isolate_);
- Result stack_push = StackPush(object, key);
- if (stack_push != SUCCESS) return stack_push;
uint32_t length = 0;
CHECK(object->length().ToArrayLength(&length));
DCHECK(!object->IsAccessCheckNeeded());
+ if (length == 0) {
+ builder_.AppendCStringLiteral("[]");
+ return SUCCESS;
+ }
+
+ PtrComprCageBase cage_base(isolate_);
+ Result stack_push = StackPush(object, key);
+ if (stack_push != SUCCESS) return stack_push;
+
builder_.AppendCharacter('[');
Indent();
uint32_t i = 0;
if (replacer_function_.is_null()) {
- switch (object->GetElementsKind()) {
+ StackLimitCheck interrupt_check(isolate_);
+ const uint32_t kInterruptLength = 4000;
+ uint32_t limit = std::min(length, kInterruptLength);
+ const uint32_t kMaxAllowedFastPackedLength =
+ std::numeric_limits<uint32_t>::max() - kInterruptLength;
+ STATIC_ASSERT(FixedArray::kMaxLength < kMaxAllowedFastPackedLength);
+ switch (object->GetElementsKind(cage_base)) {
case PACKED_SMI_ELEMENTS: {
- Handle<FixedArray> elements(FixedArray::cast(object->elements()),
- isolate_);
- StackLimitCheck interrupt_check(isolate_);
- while (i < length) {
+ Handle<FixedArray> elements(
+ FixedArray::cast(object->elements(cage_base)), isolate_);
+ while (true) {
+ for (; i < limit; i++) {
+ Separator(i == 0);
+ SerializeSmi(Smi::cast(elements->get(cage_base, i)));
+ }
+ if (i >= length) break;
+ DCHECK_LT(limit, kMaxAllowedFastPackedLength);
+ limit = std::min(length, limit + kInterruptLength);
if (interrupt_check.InterruptRequested() &&
isolate_->stack_guard()->HandleInterrupts().IsException(
isolate_)) {
return EXCEPTION;
}
- Separator(i == 0);
- SerializeSmi(Smi::cast(elements->get(i)));
- i++;
}
break;
}
case PACKED_DOUBLE_ELEMENTS: {
- // Empty array is FixedArray but not FixedDoubleArray.
- if (length == 0) break;
Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(object->elements()), isolate_);
- StackLimitCheck interrupt_check(isolate_);
- while (i < length) {
+ FixedDoubleArray::cast(object->elements(cage_base)), isolate_);
+ while (true) {
+ for (; i < limit; i++) {
+ Separator(i == 0);
+ SerializeDouble(elements->get_scalar(i));
+ }
+ if (i >= length) break;
+ DCHECK_LT(limit, kMaxAllowedFastPackedLength);
+ limit = std::min(length, limit + kInterruptLength);
if (interrupt_check.InterruptRequested() &&
isolate_->stack_guard()->HandleInterrupts().IsException(
isolate_)) {
return EXCEPTION;
}
- Separator(i == 0);
- SerializeDouble(elements->get_scalar(i));
- i++;
}
break;
}
case PACKED_ELEMENTS: {
+ HandleScope handle_scope(isolate_);
Handle<Object> old_length(object->length(), isolate_);
- while (i < length) {
+ for (i = 0; i < length; i++) {
if (object->length() != *old_length ||
- object->GetElementsKind() != PACKED_ELEMENTS) {
+ object->GetElementsKind(cage_base) != PACKED_ELEMENTS) {
// Fall back to slow path.
break;
}
Separator(i == 0);
Result result = SerializeElement(
isolate_,
- Handle<Object>(FixedArray::cast(object->elements()).get(i),
- isolate_),
+ handle(FixedArray::cast(object->elements()).get(cage_base, i),
+ isolate_),
i);
if (result == UNCHANGED) {
- builder_.AppendCString("null");
+ builder_.AppendCStringLiteral("null");
} else if (result != SUCCESS) {
return result;
}
- i++;
}
break;
}
- // The FAST_HOLEY_* cases could be handled in a faster way. They resemble
- // the non-holey cases except that a lookup is necessary for holes.
default:
break;
}
@@ -720,7 +750,7 @@ JsonStringifier::Result JsonStringifier::SerializeJSArray(
if (result != SUCCESS) return result;
}
Unindent();
- if (length > 0) NewLine();
+ NewLine();
builder_.AppendCharacter(']');
StackPop();
return SUCCESS;
@@ -734,6 +764,7 @@ JsonStringifier::Result JsonStringifier::SerializeArrayLikeSlow(
isolate_->Throw(*isolate_->factory()->NewInvalidStringLengthError());
return EXCEPTION;
}
+ HandleScope handle_scope(isolate_);
for (uint32_t i = start; i < length; i++) {
Separator(i == 0);
Handle<Object> element;
@@ -745,7 +776,7 @@ JsonStringifier::Result JsonStringifier::SerializeArrayLikeSlow(
if (result == UNCHANGED) {
// Detect overflow sooner for large sparse arrays.
if (builder_.HasOverflowed()) return EXCEPTION;
- builder_.AppendCString("null");
+ builder_.AppendCStringLiteral("null");
} else {
return result;
}
@@ -753,58 +784,81 @@ JsonStringifier::Result JsonStringifier::SerializeArrayLikeSlow(
return SUCCESS;
}
+namespace {
+V8_INLINE bool CanFastSerializeJSObject(PtrComprCageBase cage_base,
+ JSObject raw_object, Isolate* isolate) {
+ DisallowGarbageCollection no_gc;
+ if (raw_object.map(cage_base).IsCustomElementsReceiverMap()) return false;
+ if (!raw_object.HasFastProperties(cage_base)) return false;
+ auto roots = ReadOnlyRoots(isolate);
+ auto elements = raw_object.elements(cage_base);
+ return elements == roots.empty_fixed_array() ||
+ elements == roots.empty_slow_element_dictionary();
+}
+} // namespace
+
JsonStringifier::Result JsonStringifier::SerializeJSObject(
Handle<JSObject> object, Handle<Object> key) {
+ PtrComprCageBase cage_base(isolate_);
HandleScope handle_scope(isolate_);
+
+ if (!property_list_.is_null() ||
+ !CanFastSerializeJSObject(cage_base, *object, isolate_)) {
+ Result stack_push = StackPush(object, key);
+ if (stack_push != SUCCESS) return stack_push;
+ Result result = SerializeJSReceiverSlow(object);
+ if (result != SUCCESS) return result;
+ StackPop();
+ return SUCCESS;
+ }
+
+ DCHECK(!object->IsJSGlobalProxy());
+ DCHECK(!object->HasIndexedInterceptor());
+ DCHECK(!object->HasNamedInterceptor());
+
+ Handle<Map> map(object->map(cage_base), isolate_);
+ if (map->NumberOfOwnDescriptors() == 0) {
+ builder_.AppendCStringLiteral("{}");
+ return SUCCESS;
+ }
+
Result stack_push = StackPush(object, key);
if (stack_push != SUCCESS) return stack_push;
-
- if (property_list_.is_null() &&
- !object->map().IsCustomElementsReceiverMap() &&
- object->HasFastProperties() &&
- (object->elements() == ReadOnlyRoots(isolate_).empty_fixed_array() ||
- object->elements() ==
- ReadOnlyRoots(isolate_).empty_slow_element_dictionary())) {
- DCHECK(!object->IsJSGlobalProxy());
- DCHECK(!object->HasIndexedInterceptor());
- DCHECK(!object->HasNamedInterceptor());
- Handle<Map> map(object->map(), isolate_);
- builder_.AppendCharacter('{');
- Indent();
- bool comma = false;
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- Handle<Name> name(map->instance_descriptors(isolate_).GetKey(i),
- isolate_);
+ builder_.AppendCharacter('{');
+ Indent();
+ bool comma = false;
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
+ Handle<String> key_name;
+ PropertyDetails details = PropertyDetails::Empty();
+ {
+ DisallowGarbageCollection no_gc;
+ DescriptorArray descriptors = map->instance_descriptors(cage_base);
+ Name name = descriptors.GetKey(i);
// TODO(rossberg): Should this throw?
- if (!name->IsString()) continue;
- Handle<String> key_name = Handle<String>::cast(name);
- PropertyDetails details =
- map->instance_descriptors(isolate_).GetDetails(i);
- if (details.IsDontEnum()) continue;
- Handle<Object> property;
- if (details.location() == PropertyLocation::kField &&
- *map == object->map()) {
- DCHECK_EQ(kData, details.kind());
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- property = JSObject::FastPropertyAt(object, details.representation(),
- field_index);
- } else {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, property,
- Object::GetPropertyOrElement(isolate_, object, key_name),
- EXCEPTION);
- }
- Result result = SerializeProperty(property, comma, key_name);
- if (!comma && result == SUCCESS) comma = true;
- if (result == EXCEPTION) return result;
+ if (!name.IsString(cage_base)) continue;
+ key_name = handle(String::cast(name), isolate_);
+ details = descriptors.GetDetails(i);
}
- Unindent();
- if (comma) NewLine();
- builder_.AppendCharacter('}');
- } else {
- Result result = SerializeJSReceiverSlow(object);
- if (result != SUCCESS) return result;
+ if (details.IsDontEnum()) continue;
+ Handle<Object> property;
+ if (details.location() == PropertyLocation::kField &&
+ *map == object->map(cage_base)) {
+ DCHECK_EQ(PropertyKind::kData, details.kind());
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ property = JSObject::FastPropertyAt(object, details.representation(),
+ field_index);
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, property,
+ Object::GetPropertyOrElement(isolate_, object, key_name), EXCEPTION);
+ }
+ Result result = SerializeProperty(property, comma, key_name);
+ if (!comma && result == SUCCESS) comma = true;
+ if (result == EXCEPTION) return result;
}
+ Unindent();
+ if (comma) NewLine();
+ builder_.AppendCharacter('}');
StackPop();
return SUCCESS;
}
@@ -887,14 +941,17 @@ void JsonStringifier::SerializeStringUnchecked_(
SrcChar c = src[i];
if (DoNotEscape(c)) {
dest->Append(c);
- } else if (c >= 0xD800 && c <= 0xDFFF) {
+ } else if (sizeof(SrcChar) != 1 &&
+ base::IsInRange(c, static_cast<SrcChar>(0xD800),
+ static_cast<SrcChar>(0xDFFF))) {
// The current character is a surrogate.
if (c <= 0xDBFF) {
// The current character is a leading surrogate.
if (i + 1 < src.length()) {
// There is a next character.
SrcChar next = src[i + 1];
- if (next >= 0xDC00 && next <= 0xDFFF) {
+ if (base::IsInRange(next, static_cast<SrcChar>(0xDC00),
+ static_cast<SrcChar>(0xDFFF))) {
// The next character is a trailing surrogate, meaning this is a
// surrogate pair.
dest->Append(c);
@@ -950,14 +1007,17 @@ void JsonStringifier::SerializeString_(Handle<String> string) {
SrcChar c = reader.Get<SrcChar>(i);
if (DoNotEscape(c)) {
builder_.Append<SrcChar, DestChar>(c);
- } else if (c >= 0xD800 && c <= 0xDFFF) {
+ } else if (sizeof(SrcChar) != 1 &&
+ base::IsInRange(c, static_cast<SrcChar>(0xD800),
+ static_cast<SrcChar>(0xDFFF))) {
// The current character is a surrogate.
if (c <= 0xDBFF) {
// The current character is a leading surrogate.
if (i + 1 < reader.length()) {
// There is a next character.
SrcChar next = reader.Get<SrcChar>(i + 1);
- if (next >= 0xDC00 && next <= 0xDFFF) {
+ if (base::IsInRange(next, static_cast<SrcChar>(0xDC00),
+ static_cast<SrcChar>(0xDFFF))) {
// The next character is a trailing surrogate, meaning this is a
// surrogate pair.
builder_.Append<SrcChar, DestChar>(c);
@@ -966,7 +1026,7 @@ void JsonStringifier::SerializeString_(Handle<String> string) {
} else {
// The next character is not a trailing surrogate. Thus, the
// current character is a lone leading surrogate.
- builder_.AppendCString("\\u");
+ builder_.AppendCStringLiteral("\\u");
char* const hex = DoubleToRadixCString(c, 16);
builder_.AppendCString(hex);
DeleteArray(hex);
@@ -974,7 +1034,7 @@ void JsonStringifier::SerializeString_(Handle<String> string) {
} else {
// There is no next character. Thus, the current character is a
// lone leading surrogate.
- builder_.AppendCString("\\u");
+ builder_.AppendCStringLiteral("\\u");
char* const hex = DoubleToRadixCString(c, 16);
builder_.AppendCString(hex);
DeleteArray(hex);
@@ -984,7 +1044,7 @@ void JsonStringifier::SerializeString_(Handle<String> string) {
// been preceded by a leading surrogate, we would've ended up in the
// other branch earlier on, and the current character would've been
// handled as part of the surrogate pair already.)
- builder_.AppendCString("\\u");
+ builder_.AppendCStringLiteral("\\u");
char* const hex = DoubleToRadixCString(c, 16);
builder_.AppendCString(hex);
DeleteArray(hex);
@@ -1000,7 +1060,9 @@ void JsonStringifier::SerializeString_(Handle<String> string) {
template <>
bool JsonStringifier::DoNotEscape(uint8_t c) {
// https://tc39.github.io/ecma262/#table-json-single-character-escapes
- return c >= 0x23 && c <= 0x7E && c != 0x5C;
+ return base::IsInRange(c, static_cast<uint8_t>(0x23),
+ static_cast<uint8_t>(0x7E)) &&
+ c != 0x5C;
}
template <>
@@ -1011,6 +1073,10 @@ bool JsonStringifier::DoNotEscape(uint16_t c) {
void JsonStringifier::NewLine() {
if (gap_ == nullptr) return;
+ NewLineOutline();
+}
+
+void JsonStringifier::NewLineOutline() {
builder_.AppendCharacter('\n');
for (int i = 0; i < indent_; i++) builder_.AppendCString(gap_);
}
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index fb94972b85..111ae5e864 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -210,8 +210,8 @@ void SamplerManager::AddSampler(Sampler* sampler) {
sampler_map_.emplace(thread_id, std::move(samplers));
} else {
SamplerList& samplers = it->second;
- auto it = std::find(samplers.begin(), samplers.end(), sampler);
- if (it == samplers.end()) samplers.push_back(sampler);
+ auto sampler_it = std::find(samplers.begin(), samplers.end(), sampler);
+ if (sampler_it == samplers.end()) samplers.push_back(sampler);
}
}
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index 59978ef428..d3cdccd91a 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -65,8 +65,6 @@ namespace internal {
HR(compile_script_cache_behaviour, V8.CompileScript.CacheBehaviour, 0, 20, \
21) \
HR(wasm_memory_allocation_result, V8.WasmMemoryAllocationResult, 0, 3, 4) \
- HR(wasm_address_space_usage_mb, V8.WasmAddressSpaceUsageMiB, 0, 1 << 20, \
- 128) \
/* committed code size per module, collected on GC */ \
HR(wasm_module_code_size_mb, V8.WasmModuleCodeSizeMiB, 0, 1024, 64) \
/* code size per module after baseline compilation */ \
@@ -104,7 +102,10 @@ namespace internal {
/* The maximum of 100M backtracks takes roughly 2 seconds on my machine. */ \
HR(regexp_backtracks, V8.RegExpBacktracks, 1, 100000000, 50) \
/* See the CagedMemoryAllocationOutcome enum in backing-store.cc */ \
- HR(caged_memory_allocation_outcome, V8.CagedMemoryAllocationOutcome, 0, 2, 3)
+ HR(caged_memory_allocation_outcome, V8.CagedMemoryAllocationOutcome, 0, 2, \
+ 3) \
+ /* number of times a cache event is triggered for a wasm module */ \
+ HR(wasm_cache_count, V8.WasmCacheCount, 0, 100, 101)
#define NESTED_TIMED_HISTOGRAM_LIST(HT) \
/* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
@@ -135,7 +136,9 @@ namespace internal {
HT(snapshot_decompress, V8.SnapshotDecompress, 10000000, MICROSECOND) \
/* Time to decompress context snapshot. */ \
HT(context_snapshot_decompress, V8.ContextSnapshotDecompress, 10000000, \
- MICROSECOND)
+ MICROSECOND) \
+ HT(wasm_compile_after_deserialize, \
+ V8.WasmCompileAfterDeserializeMilliSeconds, 1000000, MILLISECOND)
#define NESTED_TIMED_HISTOGRAM_LIST_SLOW(HT) \
/* Total V8 time (including JS and runtime calls, exluding callbacks) */ \
@@ -163,6 +166,8 @@ namespace internal {
HT(gc_scavenger_foreground, V8.GCScavengerForeground, 10000, MILLISECOND) \
HT(measure_memory_delay_ms, V8.MeasureMemoryDelayMilliseconds, 100000, \
MILLISECOND) \
+ HT(gc_time_to_global_safepoint, V8.GC.TimeToGlobalSafepoint, 10000000, \
+ MICROSECOND) \
HT(gc_time_to_safepoint, V8.GC.TimeToSafepoint, 10000000, MICROSECOND) \
HT(gc_time_to_collection_on_background, V8.GC.TimeToCollectionOnBackground, \
10000000, MICROSECOND) \
@@ -304,9 +309,7 @@ namespace internal {
SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
/* Number of code objects found from pc. */ \
SC(pc_to_code, V8.PcToCode) \
- SC(pc_to_code_cached, V8.PcToCodeCached) \
- /* The store-buffer implementation of the write barrier. */ \
- SC(store_buffer_overflows, V8.StoreBufferOverflows)
+ SC(pc_to_code_cached, V8.PcToCodeCached)
#define STATS_COUNTER_LIST_2(SC) \
/* Amount of (JS) compiled code. */ \
@@ -346,11 +349,9 @@ namespace internal {
/* Total code size (including metadata) of baseline code or bytecode. */ \
SC(total_baseline_code_size, V8.TotalBaselineCodeSize) \
/* Total count of functions compiled using the baseline compiler. */ \
- SC(total_baseline_compile_count, V8.TotalBaselineCompileCount)
-
-#define STATS_COUNTER_TS_LIST(SC) \
- SC(wasm_generated_code_size, V8.WasmGeneratedCodeBytes) \
- SC(wasm_reloc_size, V8.WasmRelocBytes) \
+ SC(total_baseline_compile_count, V8.TotalBaselineCompileCount) \
+ SC(wasm_generated_code_size, V8.WasmGeneratedCodeBytes) \
+ SC(wasm_reloc_size, V8.WasmRelocBytes) \
SC(wasm_lazily_compiled_functions, V8.WasmLazilyCompiledFunctions)
// List of counters that can be incremented from generated code. We need them in
diff --git a/deps/v8/src/logging/counters.cc b/deps/v8/src/logging/counters.cc
index a333327e93..fb73184c3f 100644
--- a/deps/v8/src/logging/counters.cc
+++ b/deps/v8/src/logging/counters.cc
@@ -24,56 +24,17 @@ void StatsTable::SetCounterFunction(CounterLookupCallback f) {
lookup_function_ = f;
}
-int* StatsCounterBase::FindLocationInStatsTable() const {
+int* StatsCounter::FindLocationInStatsTable() const {
return counters_->FindLocation(name_);
}
-StatsCounterThreadSafe::StatsCounterThreadSafe(Counters* counters,
- const char* name)
- : StatsCounterBase(counters, name) {}
-
-void StatsCounterThreadSafe::Set(int Value) {
- if (ptr_) {
- base::MutexGuard Guard(&mutex_);
- SetLoc(ptr_, Value);
- }
-}
-
-void StatsCounterThreadSafe::Increment() {
- if (ptr_) {
- base::MutexGuard Guard(&mutex_);
- IncrementLoc(ptr_);
- }
-}
-
-void StatsCounterThreadSafe::Increment(int value) {
- if (ptr_) {
- base::MutexGuard Guard(&mutex_);
- IncrementLoc(ptr_, value);
- }
-}
-
-void StatsCounterThreadSafe::Decrement() {
- if (ptr_) {
- base::MutexGuard Guard(&mutex_);
- DecrementLoc(ptr_);
- }
-}
-
-void StatsCounterThreadSafe::Decrement(int value) {
- if (ptr_) {
- base::MutexGuard Guard(&mutex_);
- DecrementLoc(ptr_, value);
- }
-}
-
void Histogram::AddSample(int sample) {
if (Enabled()) {
counters_->AddHistogramSample(histogram_, sample);
}
}
-void* Histogram::CreateHistogram() const {
+V8_EXPORT_PRIVATE void* Histogram::CreateHistogram() const {
return counters_->CreateHistogram(name_, min_, max_, num_buckets_);
}
@@ -121,11 +82,8 @@ bool TimedHistogram::ToggleRunningState(bool expect_to_run) const {
Counters::Counters(Isolate* isolate)
:
-#define SC(name, caption) name##_(this, "c:" #caption),
- STATS_COUNTER_TS_LIST(SC)
-#undef SC
#ifdef V8_RUNTIME_CALL_STATS
- runtime_call_stats_(RuntimeCallStats::kMainIsolateThread),
+ runtime_call_stats_(RuntimeCallStats::kMainIsolateThread),
worker_thread_runtime_call_stats_(),
#endif
isolate_(isolate),
@@ -143,9 +101,9 @@ Counters::Counters(Isolate* isolate)
#undef HR
};
for (const auto& histogram : kHistograms) {
- this->*histogram.member =
- Histogram(histogram.caption, histogram.min, histogram.max,
- histogram.num_buckets, this);
+ (this->*histogram.member)
+ .Initialize(histogram.caption, histogram.min, histogram.max,
+ histogram.num_buckets, this);
}
const int DefaultTimedHistogramNumBuckets = 50;
@@ -162,9 +120,9 @@ Counters::Counters(Isolate* isolate)
#undef HT
};
for (const auto& timer : kNestedTimedHistograms) {
- this->*timer.member =
- NestedTimedHistogram(timer.caption, 0, timer.max, timer.res,
- DefaultTimedHistogramNumBuckets, this);
+ (this->*timer.member)
+ .Initialize(timer.caption, 0, timer.max, timer.res,
+ DefaultTimedHistogramNumBuckets, this);
}
static const struct {
@@ -179,8 +137,9 @@ Counters::Counters(Isolate* isolate)
#undef HT
};
for (const auto& timer : kTimedHistograms) {
- this->*timer.member = TimedHistogram(timer.caption, 0, timer.max, timer.res,
- DefaultTimedHistogramNumBuckets, this);
+ (this->*timer.member)
+ .Initialize(timer.caption, 0, timer.max, timer.res,
+ DefaultTimedHistogramNumBuckets, this);
}
static const struct {
@@ -192,8 +151,9 @@ Counters::Counters(Isolate* isolate)
#undef AHT
};
for (const auto& aht : kAggregatableHistogramTimers) {
- this->*aht.member = AggregatableHistogramTimer(
- aht.caption, 0, 10000000, DefaultTimedHistogramNumBuckets, this);
+ (this->*aht.member)
+ .Initialize(aht.caption, 0, 10000000, DefaultTimedHistogramNumBuckets,
+ this);
}
static const struct {
@@ -205,7 +165,8 @@ Counters::Counters(Isolate* isolate)
#undef HP
};
for (const auto& percentage : kHistogramPercentages) {
- this->*percentage.member = Histogram(percentage.caption, 0, 101, 100, this);
+ (this->*percentage.member)
+ .Initialize(percentage.caption, 0, 101, 100, this);
}
// Exponential histogram assigns bucket limits to points
@@ -223,43 +184,39 @@ Counters::Counters(Isolate* isolate)
#undef HM
};
for (const auto& histogram : kLegacyMemoryHistograms) {
- this->*histogram.member =
- Histogram(histogram.caption, 1000, 500000, 50, this);
+ (this->*histogram.member)
+ .Initialize(histogram.caption, 1000, 500000, 50, this);
}
- // clang-format off
- static const struct {
+ static constexpr struct {
StatsCounter Counters::*member;
const char* caption;
} kStatsCounters[] = {
-#define SC(name, caption) {&Counters::name##_, "c:" #caption},
- STATS_COUNTER_LIST_1(SC)
- STATS_COUNTER_LIST_2(SC)
- STATS_COUNTER_NATIVE_CODE_LIST(SC)
-#undef SC
-#define SC(name) \
- {&Counters::count_of_##name##_, "c:" "V8.CountOf_" #name}, \
- {&Counters::size_of_##name##_, "c:" "V8.SizeOf_" #name},
- INSTANCE_TYPE_LIST(SC)
-#undef SC
-#define SC(name) \
- {&Counters::count_of_CODE_TYPE_##name##_, \
- "c:" "V8.CountOf_CODE_TYPE-" #name}, \
- {&Counters::size_of_CODE_TYPE_##name##_, \
- "c:" "V8.SizeOf_CODE_TYPE-" #name},
- CODE_KIND_LIST(SC)
-#undef SC
-#define SC(name) \
- {&Counters::count_of_FIXED_ARRAY_##name##_, \
- "c:" "V8.CountOf_FIXED_ARRAY-" #name}, \
- {&Counters::size_of_FIXED_ARRAY_##name##_, \
- "c:" "V8.SizeOf_FIXED_ARRAY-" #name},
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
+#define SC(name, caption) {&Counters::name##_, "c:" caption},
+#define BARE_SC(name, caption) SC(name, #caption)
+#define COUNT_AND_SIZE_SC(name) \
+ SC(count_of_##name, "V8.CountOf_" #name) \
+ SC(size_of_##name, "V8.SizeOf_" #name)
+#define CODE_KIND_SC(name) COUNT_AND_SIZE_SC(CODE_TYPE_##name)
+#define FIXED_ARRAY_INSTANCE_TYPE_SC(name) COUNT_AND_SIZE_SC(FIXED_ARRAY_##name)
+
+ // clang-format off
+ STATS_COUNTER_LIST_1(BARE_SC)
+ STATS_COUNTER_LIST_2(BARE_SC)
+ STATS_COUNTER_NATIVE_CODE_LIST(BARE_SC)
+ INSTANCE_TYPE_LIST(COUNT_AND_SIZE_SC)
+ CODE_KIND_LIST(CODE_KIND_SC)
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_INSTANCE_TYPE_SC)
+ // clang-format on
+
+#undef FIXED_ARRAY_INSTANCE_TYPE_SC
+#undef CODE_KIND_SC
+#undef COUNT_AND_SIZE_SC
+#undef BARE_SC
#undef SC
};
- // clang-format on
for (const auto& counter : kStatsCounters) {
- this->*counter.member = StatsCounter(this, counter.caption);
+ (this->*counter.member).Init(this, counter.caption);
}
}
@@ -269,7 +226,6 @@ void Counters::ResetCounterFunction(CounterLookupCallback f) {
#define SC(name, caption) name##_.Reset();
STATS_COUNTER_LIST_1(SC)
STATS_COUNTER_LIST_2(SC)
- STATS_COUNTER_TS_LIST(SC)
STATS_COUNTER_NATIVE_CODE_LIST(SC)
#undef SC
@@ -303,7 +259,7 @@ void Counters::ResetCreateHistogramFunction(CreateHistogramCallback f) {
NESTED_TIMED_HISTOGRAM_LIST(HT)
#undef HT
-#define HT(name, caption, max, res) name##_.Reset(FLAG_slow_histograms);
+#define HT(name, caption, max, res) name##_.Reset();
NESTED_TIMED_HISTOGRAM_LIST_SLOW(HT)
#undef HT
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index 08e35352cf..bb662b3e21 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -91,58 +91,32 @@ class StatsTable {
AddHistogramSampleCallback add_histogram_sample_function_;
};
-// Base class for stats counters.
-class StatsCounterBase {
- protected:
- Counters* counters_;
- const char* name_;
- int* ptr_;
-
- StatsCounterBase() = default;
- StatsCounterBase(Counters* counters, const char* name)
- : counters_(counters), name_(name), ptr_(nullptr) {}
-
- void SetLoc(int* loc, int value) { *loc = value; }
- void IncrementLoc(int* loc) { (*loc)++; }
- void IncrementLoc(int* loc, int value) { (*loc) += value; }
- void DecrementLoc(int* loc) { (*loc)--; }
- void DecrementLoc(int* loc, int value) { (*loc) -= value; }
-
- V8_EXPORT_PRIVATE int* FindLocationInStatsTable() const;
-};
-
-// StatsCounters are dynamically created values which can be tracked in
-// the StatsTable. They are designed to be lightweight to create and
-// easy to use.
+// StatsCounters are dynamically created values which can be tracked in the
+// StatsTable. They are designed to be lightweight to create and easy to use.
//
// Internally, a counter represents a value in a row of a StatsTable.
// The row has a 32bit value for each process/thread in the table and also
-// a name (stored in the table metadata). Since the storage location can be
-// thread-specific, this class cannot be shared across threads. Note: This
-// class is not thread safe.
-class StatsCounter : public StatsCounterBase {
+// a name (stored in the table metadata). Since the storage location can be
+// thread-specific, this class cannot be shared across threads.
+// This class is thread-safe.
+class StatsCounter {
public:
- // Sets the counter to a specific value.
void Set(int value) {
- if (int* loc = GetPtr()) SetLoc(loc, value);
- }
-
- // Increments the counter.
- void Increment() {
- if (int* loc = GetPtr()) IncrementLoc(loc);
- }
-
- void Increment(int value) {
- if (int* loc = GetPtr()) IncrementLoc(loc, value);
+ if (std::atomic<int>* loc = GetPtr()) {
+ loc->store(value, std::memory_order_relaxed);
+ }
}
- // Decrements the counter.
- void Decrement() {
- if (int* loc = GetPtr()) DecrementLoc(loc);
+ void Increment(int value = 1) {
+ if (std::atomic<int>* loc = GetPtr()) {
+ loc->fetch_add(value, std::memory_order_relaxed);
+ }
}
- void Decrement(int value) {
- if (int* loc = GetPtr()) DecrementLoc(loc, value);
+ void Decrement(int value = 1) {
+ if (std::atomic<int>* loc = GetPtr()) {
+ loc->fetch_sub(value, std::memory_order_relaxed);
+ }
}
// Is this counter enabled?
@@ -152,8 +126,8 @@ class StatsCounter : public StatsCounterBase {
// Get the internal pointer to the counter. This is used
// by the code generator to emit code that manipulates a
// given counter without calling the runtime system.
- int* GetInternalPointer() {
- int* loc = GetPtr();
+ std::atomic<int>* GetInternalPointer() {
+ std::atomic<int>* loc = GetPtr();
DCHECK_NOT_NULL(loc);
return loc;
}
@@ -161,47 +135,44 @@ class StatsCounter : public StatsCounterBase {
private:
friend class Counters;
- StatsCounter() = default;
- StatsCounter(Counters* counters, const char* name)
- : StatsCounterBase(counters, name), lookup_done_(false) {}
-
- // Reset the cached internal pointer.
- void Reset() { lookup_done_ = false; }
-
- // Returns the cached address of this counter location.
- int* GetPtr() {
- if (lookup_done_) return ptr_;
- lookup_done_ = true;
- ptr_ = FindLocationInStatsTable();
- return ptr_;
+ void Init(Counters* counters, const char* name) {
+ DCHECK_NULL(counters_);
+ DCHECK_NOT_NULL(counters);
+ // Counter names always start with "c:V8.".
+ DCHECK_EQ(0, memcmp(name, "c:V8.", 5));
+ counters_ = counters;
+ name_ = name;
}
- bool lookup_done_;
-};
+ V8_EXPORT_PRIVATE int* FindLocationInStatsTable() const;
-// Thread safe version of StatsCounter.
-class V8_EXPORT_PRIVATE StatsCounterThreadSafe : public StatsCounterBase {
- public:
- void Set(int Value);
- void Increment();
- void Increment(int value);
- void Decrement();
- void Decrement(int value);
- bool Enabled() { return ptr_ != nullptr; }
- int* GetInternalPointer() {
- DCHECK_NOT_NULL(ptr_);
- return ptr_;
+ // Reset the cached internal pointer.
+ void Reset() {
+ lookup_done_.store(false, std::memory_order_release);
+ ptr_.store(nullptr, std::memory_order_release);
}
- private:
- friend class Counters;
-
- StatsCounterThreadSafe(Counters* counters, const char* name);
- void Reset() { ptr_ = FindLocationInStatsTable(); }
-
- base::Mutex mutex_;
+ // Returns the cached address of this counter location.
+ std::atomic<int>* GetPtr() {
+ // {Init} must have been called.
+ DCHECK_NOT_NULL(counters_);
+ DCHECK_NOT_NULL(name_);
+ auto* ptr = ptr_.load(std::memory_order_acquire);
+ if (V8_LIKELY(ptr)) return ptr;
+ if (!lookup_done_.load(std::memory_order_acquire)) {
+ ptr = base::AsAtomicPtr(FindLocationInStatsTable());
+ ptr_.store(ptr, std::memory_order_release);
+ lookup_done_.store(true, std::memory_order_release);
+ }
+ // Re-load after checking {lookup_done_}.
+ return ptr_.load(std::memory_order_acquire);
+ }
- DISALLOW_IMPLICIT_CONSTRUCTORS(StatsCounterThreadSafe);
+ Counters* counters_ = nullptr;
+ const char* name_ = nullptr;
+ // A pointer to an atomic, set atomically in {GetPtr}.
+ std::atomic<std::atomic<int>*> ptr_{nullptr};
+ std::atomic<bool> lookup_done_{false};
};
// A Histogram represents a dynamically created histogram in the
@@ -228,35 +199,47 @@ class Histogram {
protected:
Histogram() = default;
- Histogram(const char* name, int min, int max, int num_buckets,
- Counters* counters)
- : name_(name),
- min_(min),
- max_(max),
- num_buckets_(num_buckets),
- histogram_(nullptr),
- counters_(counters) {
- DCHECK(counters_);
+ Histogram(const Histogram&) = delete;
+ Histogram& operator=(const Histogram&) = delete;
+
+ void Initialize(const char* name, int min, int max, int num_buckets,
+ Counters* counters) {
+ name_ = name;
+ min_ = min;
+ max_ = max;
+ num_buckets_ = num_buckets;
+ histogram_ = nullptr;
+ counters_ = counters;
+ DCHECK_NOT_NULL(counters_);
}
Counters* counters() const { return counters_; }
- // Reset the cached internal pointer.
- void Reset(bool create_new = true) {
- histogram_ = create_new ? CreateHistogram() : nullptr;
+ // Reset the cached internal pointer to nullptr; the histogram will be
+ // created lazily, the first time it is needed.
+ void Reset() { histogram_ = nullptr; }
+
+ // Lazily create the histogram, if it has not been created yet.
+ void EnsureCreated(bool create_new = true) {
+ if (create_new && histogram_.load(std::memory_order_acquire) == nullptr) {
+ base::MutexGuard Guard(&mutex_);
+ if (histogram_.load(std::memory_order_relaxed) == nullptr)
+ histogram_.store(CreateHistogram(), std::memory_order_release);
+ }
}
private:
friend class Counters;
- void* CreateHistogram() const;
+ V8_EXPORT_PRIVATE void* CreateHistogram() const;
const char* name_;
int min_;
int max_;
int num_buckets_;
- void* histogram_;
+ std::atomic<void*> histogram_;
Counters* counters_;
+ base::Mutex mutex_;
};
enum class TimedHistogramResolution { MILLISECOND, MICROSECOND };
@@ -290,11 +273,15 @@ class TimedHistogram : public Histogram {
TimedHistogramResolution resolution_;
TimedHistogram() = default;
- TimedHistogram(const char* name, int min, int max,
- TimedHistogramResolution resolution, int num_buckets,
- Counters* counters)
- : Histogram(name, min, max, num_buckets, counters),
- resolution_(resolution) {}
+ TimedHistogram(const TimedHistogram&) = delete;
+ TimedHistogram& operator=(const TimedHistogram&) = delete;
+
+ void Initialize(const char* name, int min, int max,
+ TimedHistogramResolution resolution, int num_buckets,
+ Counters* counters) {
+ Histogram::Initialize(name, min, max, num_buckets, counters);
+ resolution_ = resolution;
+ }
};
class NestedTimedHistogramScope;
@@ -307,7 +294,9 @@ class NestedTimedHistogram : public TimedHistogram {
NestedTimedHistogram(const char* name, int min, int max,
TimedHistogramResolution resolution, int num_buckets,
Counters* counters)
- : TimedHistogram(name, min, max, resolution, num_buckets, counters) {}
+ : NestedTimedHistogram() {
+ Initialize(name, min, max, resolution, num_buckets, counters);
+ }
private:
friend class Counters;
@@ -327,6 +316,8 @@ class NestedTimedHistogram : public TimedHistogram {
NestedTimedHistogramScope* current_ = nullptr;
NestedTimedHistogram() = default;
+ NestedTimedHistogram(const NestedTimedHistogram&) = delete;
+ NestedTimedHistogram& operator=(const NestedTimedHistogram&) = delete;
};
// A histogram timer that can aggregate events within a larger scope.
@@ -361,9 +352,9 @@ class AggregatableHistogramTimer : public Histogram {
friend class Counters;
AggregatableHistogramTimer() = default;
- AggregatableHistogramTimer(const char* name, int min, int max,
- int num_buckets, Counters* counters)
- : Histogram(name, min, max, num_buckets, counters) {}
+ AggregatableHistogramTimer(const AggregatableHistogramTimer&) = delete;
+ AggregatableHistogramTimer& operator=(const AggregatableHistogramTimer&) =
+ delete;
base::TimeDelta time_;
};
@@ -539,33 +530,58 @@ class Counters : public std::enable_shared_from_this<Counters> {
}
#define HR(name, caption, min, max, num_buckets) \
- Histogram* name() { return &name##_; }
+ Histogram* name() { \
+ name##_.EnsureCreated(); \
+ return &name##_; \
+ }
HISTOGRAM_RANGE_LIST(HR)
#undef HR
#define HT(name, caption, max, res) \
- NestedTimedHistogram* name() { return &name##_; }
+ NestedTimedHistogram* name() { \
+ name##_.EnsureCreated(); \
+ return &name##_; \
+ }
NESTED_TIMED_HISTOGRAM_LIST(HT)
+#undef HT
+
+#define HT(name, caption, max, res) \
+ NestedTimedHistogram* name() { \
+ name##_.EnsureCreated(FLAG_slow_histograms); \
+ return &name##_; \
+ }
NESTED_TIMED_HISTOGRAM_LIST_SLOW(HT)
#undef HT
#define HT(name, caption, max, res) \
- TimedHistogram* name() { return &name##_; }
+ TimedHistogram* name() { \
+ name##_.EnsureCreated(); \
+ return &name##_; \
+ }
TIMED_HISTOGRAM_LIST(HT)
#undef HT
-#define AHT(name, caption) \
- AggregatableHistogramTimer* name() { return &name##_; }
+#define AHT(name, caption) \
+ AggregatableHistogramTimer* name() { \
+ name##_.EnsureCreated(); \
+ return &name##_; \
+ }
AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT)
#undef AHT
-#define HP(name, caption) \
- Histogram* name() { return &name##_; }
+#define HP(name, caption) \
+ Histogram* name() { \
+ name##_.EnsureCreated(); \
+ return &name##_; \
+ }
HISTOGRAM_PERCENTAGE_LIST(HP)
#undef HP
-#define HM(name, caption) \
- Histogram* name() { return &name##_; }
+#define HM(name, caption) \
+ Histogram* name() { \
+ name##_.EnsureCreated(); \
+ return &name##_; \
+ }
HISTOGRAM_LEGACY_MEMORY_LIST(HM)
#undef HM
@@ -576,11 +592,6 @@ class Counters : public std::enable_shared_from_this<Counters> {
STATS_COUNTER_NATIVE_CODE_LIST(SC)
#undef SC
-#define SC(name, caption) \
- StatsCounterThreadSafe* name() { return &name##_; }
- STATS_COUNTER_TS_LIST(SC)
-#undef SC
-
// clang-format off
enum Id {
#define RATE_ID(name, caption, max, res) k_##name,
@@ -600,7 +611,6 @@ class Counters : public std::enable_shared_from_this<Counters> {
#define COUNTER_ID(name, caption) k_##name,
STATS_COUNTER_LIST_1(COUNTER_ID)
STATS_COUNTER_LIST_2(COUNTER_ID)
- STATS_COUNTER_TS_LIST(COUNTER_ID)
STATS_COUNTER_NATIVE_CODE_LIST(COUNTER_ID)
#undef COUNTER_ID
#define COUNTER_ID(name) kCountOf##name, kSizeOf##name,
@@ -634,7 +644,7 @@ class Counters : public std::enable_shared_from_this<Counters> {
private:
friend class StatsTable;
- friend class StatsCounterBase;
+ friend class StatsCounter;
friend class Histogram;
friend class NestedTimedHistogramScope;
@@ -683,10 +693,6 @@ class Counters : public std::enable_shared_from_this<Counters> {
STATS_COUNTER_NATIVE_CODE_LIST(SC)
#undef SC
-#define SC(name, caption) StatsCounterThreadSafe name##_;
- STATS_COUNTER_TS_LIST(SC)
-#undef SC
-
#define SC(name) \
StatsCounter size_of_##name##_; \
StatsCounter count_of_##name##_;
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index 1232af1717..28a82bb5b9 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -1513,7 +1513,7 @@ void Logger::CodeDisableOptEvent(Handle<AbstractCode> code,
MSG_BUILDER();
msg << kLogEventsNames[CodeEventListener::CODE_DISABLE_OPT_EVENT] << kNext
<< shared->DebugNameCStr().get() << kNext
- << GetBailoutReason(shared->disable_optimization_reason());
+ << GetBailoutReason(shared->disabled_optimization_reason());
msg.WriteToLogFile();
}
@@ -1976,7 +1976,6 @@ void Logger::LogAccessorCallbacks() {
}
void Logger::LogAllMaps() {
- DisallowGarbageCollection no_gc;
Heap* heap = isolate_->heap();
CombinedHeapObjectIterator iterator(heap);
for (HeapObject obj = iterator.Next(); !obj.is_null();
@@ -2245,13 +2244,14 @@ void ExistingCodeLogger::LogCompiledFunctions() {
LogExistingFunction(
shared,
Handle<AbstractCode>(
- AbstractCode::cast(shared->InterpreterTrampoline()), isolate_));
+ AbstractCode::cast(FromCodeT(shared->InterpreterTrampoline())),
+ isolate_));
}
if (shared->HasBaselineCode()) {
- LogExistingFunction(
- shared, Handle<AbstractCode>(
- AbstractCode::cast(shared->baseline_code(kAcquireLoad)),
- isolate_));
+ LogExistingFunction(shared, Handle<AbstractCode>(
+ AbstractCode::cast(FromCodeT(
+ shared->baseline_code(kAcquireLoad))),
+ isolate_));
}
if (pair.second.is_identical_to(BUILTIN_CODE(isolate_, CompileLazy)))
continue;
diff --git a/deps/v8/src/logging/runtime-call-stats-scope.h b/deps/v8/src/logging/runtime-call-stats-scope.h
index 6b3db25ae8..ffdf08378d 100644
--- a/deps/v8/src/logging/runtime-call-stats-scope.h
+++ b/deps/v8/src/logging/runtime-call-stats-scope.h
@@ -8,6 +8,7 @@
#include <memory>
#include "src/execution/isolate.h"
+#include "src/execution/local-isolate.h"
#include "src/logging/counters.h"
#include "src/logging/runtime-call-stats.h"
#include "src/logging/tracing-flags.h"
@@ -29,6 +30,14 @@ RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
stats_->Enter(&timer_, counter_id);
}
+RuntimeCallTimerScope::RuntimeCallTimerScope(LocalIsolate* isolate,
+ RuntimeCallCounterId counter_id) {
+ if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
+ DCHECK_NOT_NULL(isolate->runtime_call_stats());
+ stats_ = isolate->runtime_call_stats();
+ stats_->Enter(&timer_, counter_id);
+}
+
#else // RUNTIME_CALL_STATS
#define RCS_SCOPE(...)
diff --git a/deps/v8/src/logging/runtime-call-stats.h b/deps/v8/src/logging/runtime-call-stats.h
index 05ec1fa21c..f1b5be5c3c 100644
--- a/deps/v8/src/logging/runtime-call-stats.h
+++ b/deps/v8/src/logging/runtime-call-stats.h
@@ -388,13 +388,13 @@ class RuntimeCallTimer final {
V(BoundFunctionLengthGetter) \
V(BoundFunctionNameGetter) \
V(CodeGenerationFromStringsCallbacks) \
+ V(CompileBackgroundBaselinePreVisit) \
+ V(CompileBackgroundBaselineVisit) \
V(CompileBackgroundCompileTask) \
V(CompileBaseline) \
+ V(CompileBaselineFinalization) \
V(CompileBaselinePreVisit) \
V(CompileBaselineVisit) \
- V(CompileBackgroundBaselinePreVisit) \
- V(CompileBackgroundBaselineVisit) \
- V(CompileBaselineFinalization) \
V(CompileCollectSourcePositions) \
V(CompileDeserialize) \
V(CompileEnqueueOnDispatcher) \
@@ -469,6 +469,7 @@ class RuntimeCallTimer final {
V(PrototypeMap_TransitionToDataProperty) \
V(PrototypeObject_DeleteProperty) \
V(ReconfigureToDataProperty) \
+ V(SnapshotDecompress) \
V(StringLengthGetter) \
V(TestCounter1) \
V(TestCounter2) \
@@ -476,10 +477,10 @@ class RuntimeCallTimer final {
V(UpdateProtector) \
V(WebSnapshotDeserialize) \
V(WebSnapshotDeserialize_Arrays) \
+ V(WebSnapshotDeserialize_Classes) \
V(WebSnapshotDeserialize_Contexts) \
V(WebSnapshotDeserialize_Exports) \
V(WebSnapshotDeserialize_Functions) \
- V(WebSnapshotDeserialize_Classes) \
V(WebSnapshotDeserialize_Maps) \
V(WebSnapshotDeserialize_Objects) \
V(WebSnapshotDeserialize_Strings)
@@ -711,6 +712,8 @@ class V8_NODISCARD RuntimeCallTimerScope {
public:
inline RuntimeCallTimerScope(Isolate* isolate,
RuntimeCallCounterId counter_id);
+ inline RuntimeCallTimerScope(LocalIsolate* isolate,
+ RuntimeCallCounterId counter_id);
inline RuntimeCallTimerScope(RuntimeCallStats* stats,
RuntimeCallCounterId counter_id,
RuntimeCallStats::CounterMode mode =
diff --git a/deps/v8/src/numbers/conversions.cc b/deps/v8/src/numbers/conversions.cc
index a12a3f1c72..0683402794 100644
--- a/deps/v8/src/numbers/conversions.cc
+++ b/deps/v8/src/numbers/conversions.cc
@@ -1477,7 +1477,7 @@ bool IsSpecialIndex(String string) {
}
// Slow path: test DoubleToString(StringToDouble(string)) == string.
base::Vector<const uint16_t> vector(buffer, length);
- double d = StringToDouble(vector, NO_FLAGS);
+ double d = StringToDouble(vector, NO_CONVERSION_FLAGS);
if (std::isnan(d)) return false;
// Compute reverse string.
char reverse_buffer[kBufferSize + 1]; // Result will be /0 terminated.
diff --git a/deps/v8/src/numbers/conversions.h b/deps/v8/src/numbers/conversions.h
index 9b958e22e6..9232de93ca 100644
--- a/deps/v8/src/numbers/conversions.h
+++ b/deps/v8/src/numbers/conversions.h
@@ -79,7 +79,7 @@ inline uint64_t DoubleToUint64(double x);
// Enumeration for allowing octals and ignoring junk when converting
// strings to numbers.
enum ConversionFlags {
- NO_FLAGS = 0,
+ NO_CONVERSION_FLAGS = 0,
ALLOW_HEX = 1,
ALLOW_OCTAL = 2,
ALLOW_IMPLICIT_OCTAL = 4,
diff --git a/deps/v8/src/numbers/hash-seed-inl.h b/deps/v8/src/numbers/hash-seed-inl.h
index 51ee906be2..0964d55241 100644
--- a/deps/v8/src/numbers/hash-seed-inl.h
+++ b/deps/v8/src/numbers/hash-seed-inl.h
@@ -17,9 +17,11 @@ namespace v8 {
namespace internal {
class Isolate;
+class LocalIsolate;
class ReadOnlyRoots;
inline uint64_t HashSeed(Isolate* isolate);
+inline uint64_t HashSeed(LocalIsolate* isolate);
inline uint64_t HashSeed(ReadOnlyRoots roots);
} // namespace internal
@@ -36,10 +38,13 @@ inline uint64_t HashSeed(Isolate* isolate) {
return HashSeed(ReadOnlyRoots(isolate));
}
+inline uint64_t HashSeed(LocalIsolate* isolate) {
+ return HashSeed(ReadOnlyRoots(isolate));
+}
+
inline uint64_t HashSeed(ReadOnlyRoots roots) {
uint64_t seed;
roots.hash_seed().copy_out(0, reinterpret_cast<byte*>(&seed), kInt64Size);
- DCHECK(FLAG_randomize_hashes || seed == 0);
return seed;
}
diff --git a/deps/v8/src/objects/all-objects-inl.h b/deps/v8/src/objects/all-objects-inl.h
index 57fe58f9aa..07e6fe44f0 100644
--- a/deps/v8/src/objects/all-objects-inl.h
+++ b/deps/v8/src/objects/all-objects-inl.h
@@ -57,7 +57,6 @@
#include "src/objects/microtask-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/name-inl.h"
-#include "src/objects/objects-body-descriptors-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
@@ -87,6 +86,7 @@
#include "src/objects/templates-inl.h"
#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/transitions-inl.h"
+#include "src/objects/turbofan-types-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator-inl.h"
diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h
index 1fc6709a5e..44f2c4491b 100644
--- a/deps/v8/src/objects/allocation-site-inl.h
+++ b/deps/v8/src/objects/allocation-site-inl.h
@@ -74,9 +74,8 @@ void AllocationSite::Initialize() {
set_nested_site(Smi::zero());
set_pretenure_data(0, kRelaxedStore);
set_pretenure_create_count(0);
- set_dependent_code(
- DependentCode::cast(GetReadOnlyRoots().empty_weak_fixed_array()),
- SKIP_WRITE_BARRIER);
+ set_dependent_code(DependentCode::empty_dependent_code(GetReadOnlyRoots()),
+ SKIP_WRITE_BARRIER);
}
bool AllocationSite::IsZombie() const {
@@ -241,7 +240,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
CHECK_NE(to_kind, DICTIONARY_ELEMENTS);
JSObject::TransitionElementsKind(boilerplate, to_kind);
site->dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kAllocationSiteTransitionChangedGroup);
+ isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
result = true;
}
}
@@ -261,7 +260,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
}
site->SetElementsKind(to_kind);
site->dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kAllocationSiteTransitionChangedGroup);
+ isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
result = true;
}
}
diff --git a/deps/v8/src/objects/allocation-site.h b/deps/v8/src/objects/allocation-site.h
index 4d673b4caf..ea8521fc5d 100644
--- a/deps/v8/src/objects/allocation-site.h
+++ b/deps/v8/src/objects/allocation-site.h
@@ -181,6 +181,8 @@ class AllocationMemento
DECL_PRINTER(AllocationMemento)
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(AllocationMemento)
};
diff --git a/deps/v8/src/objects/api-callbacks.h b/deps/v8/src/objects/api-callbacks.h
index a55ca5eb62..d375de9505 100644
--- a/deps/v8/src/objects/api-callbacks.h
+++ b/deps/v8/src/objects/api-callbacks.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+class StructBodyDescriptor;
+
#include "torque-generated/src/objects/api-callbacks-tq.inc"
// An accessor must have a getter, but can have no setter.
@@ -66,6 +68,8 @@ class AccessorInfo : public TorqueGeneratedAccessorInfo<AccessorInfo, Struct> {
DECL_PRINTER(AccessorInfo)
+ using BodyDescriptor = StructBodyDescriptor;
+
private:
inline bool HasExpectedReceiverType();
@@ -80,6 +84,8 @@ class AccessCheckInfo
public:
static AccessCheckInfo Get(Isolate* isolate, Handle<JSObject> receiver);
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(AccessCheckInfo)
};
@@ -94,6 +100,8 @@ class InterceptorInfo
DEFINE_TORQUE_GENERATED_INTERCEPTOR_INFO_FLAGS()
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(InterceptorInfo)
};
@@ -113,6 +121,8 @@ class CallHandlerInfo
Address redirected_callback() const;
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(CallHandlerInfo)
};
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 661e0759f6..ddf1ce1489 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+class StructBodyDescriptor;
+
#include "torque-generated/src/objects/arguments-tq.inc"
// Superclass for all objects with instance type {JS_ARGUMENTS_OBJECT_TYPE}
@@ -66,6 +68,8 @@ class AliasedArgumentsEntry
: public TorqueGeneratedAliasedArgumentsEntry<AliasedArgumentsEntry,
Struct> {
public:
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(AliasedArgumentsEntry)
};
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
index ca11a89cee..5dca72929a 100644
--- a/deps/v8/src/objects/backing-store.cc
+++ b/deps/v8/src/objects/backing-store.cc
@@ -194,6 +194,8 @@ BackingStore::BackingStore(void* buffer_start, size_t byte_length,
DCHECK_IMPLIES(is_resizable_, free_on_destruct_);
DCHECK_IMPLIES(!is_wasm_memory && !is_resizable_,
byte_length_ == max_byte_length_);
+ DCHECK_GE(max_byte_length_, byte_length_);
+ DCHECK_GE(byte_capacity_, max_byte_length_);
}
BackingStore::~BackingStore() {
@@ -323,10 +325,9 @@ std::unique_ptr<BackingStore> BackingStore::Allocate(
counters->array_buffer_new_size_failures()->AddSample(mb_length);
return {};
}
-
- DCHECK(IsValidBackingStorePointer(buffer_start));
}
+ DCHECK(IsValidBackingStorePointer(buffer_start));
auto result = new BackingStore(buffer_start, // start
byte_length, // length
byte_length, // max length
diff --git a/deps/v8/src/objects/backing-store.h b/deps/v8/src/objects/backing-store.h
index 5ba95a2ba8..beaa9e8f30 100644
--- a/deps/v8/src/objects/backing-store.h
+++ b/deps/v8/src/objects/backing-store.h
@@ -98,6 +98,11 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
bool has_guard_regions() const { return has_guard_regions_; }
bool free_on_destruct() const { return free_on_destruct_; }
+ bool IsEmpty() const {
+ DCHECK_GE(byte_capacity_, byte_length_);
+ return byte_capacity_ == 0;
+ }
+
enum ResizeOrGrowResult { kSuccess, kFailure, kRace };
ResizeOrGrowResult ResizeInPlace(Isolate* isolate, size_t new_byte_length,
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 5f323aa4ec..aa9ff9d30b 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -1247,12 +1247,8 @@ MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Isolate* isolate,
return ThrowBigIntTooBig<BigInt>(isolate);
}
digit_t shift = maybe_shift.FromJust();
- int digit_shift = static_cast<int>(shift / kDigitBits);
- int bits_shift = static_cast<int>(shift % kDigitBits);
- int length = x->length();
- bool grow = bits_shift != 0 &&
- (x->digit(length - 1) >> (kDigitBits - bits_shift)) != 0;
- int result_length = length + digit_shift + grow;
+ const int result_length = bigint::LeftShift_ResultLength(
+ x->length(), x->digit(x->length() - 1), shift);
if (result_length > kMaxLength) {
return ThrowBigIntTooBig<BigInt>(isolate);
}
@@ -1260,26 +1256,7 @@ MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Isolate* isolate,
if (!New(isolate, result_length).ToHandle(&result)) {
return MaybeHandle<BigInt>();
}
- if (bits_shift == 0) {
- int i = 0;
- for (; i < digit_shift; i++) result->set_digit(i, 0ul);
- for (; i < result_length; i++) {
- result->set_digit(i, x->digit(i - digit_shift));
- }
- } else {
- digit_t carry = 0;
- for (int i = 0; i < digit_shift; i++) result->set_digit(i, 0ul);
- for (int i = 0; i < length; i++) {
- digit_t d = x->digit(i);
- result->set_digit(i + digit_shift, (d << bits_shift) | carry);
- carry = d >> (kDigitBits - bits_shift);
- }
- if (grow) {
- result->set_digit(length + digit_shift, carry);
- } else {
- DCHECK_EQ(carry, 0);
- }
- }
+ bigint::LeftShift(GetRWDigits(result), GetDigits(x), shift);
result->set_sign(x->sign());
return MakeImmutable(result);
}
@@ -1287,72 +1264,22 @@ MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Isolate* isolate,
Handle<BigInt> MutableBigInt::RightShiftByAbsolute(Isolate* isolate,
Handle<BigIntBase> x,
Handle<BigIntBase> y) {
- int length = x->length();
- bool sign = x->sign();
+ const bool sign = x->sign();
Maybe<digit_t> maybe_shift = ToShiftAmount(y);
if (maybe_shift.IsNothing()) {
return RightShiftByMaximum(isolate, sign);
}
- digit_t shift = maybe_shift.FromJust();
- int digit_shift = static_cast<int>(shift / kDigitBits);
- int bits_shift = static_cast<int>(shift % kDigitBits);
- int result_length = length - digit_shift;
+ const digit_t shift = maybe_shift.FromJust();
+ bigint::RightShiftState state;
+ const int result_length =
+ bigint::RightShift_ResultLength(GetDigits(x), sign, shift, &state);
+ DCHECK_LE(result_length, x->length());
if (result_length <= 0) {
return RightShiftByMaximum(isolate, sign);
}
- // For negative numbers, round down if any bit was shifted out (so that e.g.
- // -5n >> 1n == -3n and not -2n). Check now whether this will happen and
- // whether it can cause overflow into a new digit. If we allocate the result
- // large enough up front, it avoids having to do a second allocation later.
- bool must_round_down = false;
- if (sign) {
- const digit_t mask = (static_cast<digit_t>(1) << bits_shift) - 1;
- if ((x->digit(digit_shift) & mask) != 0) {
- must_round_down = true;
- } else {
- for (int i = 0; i < digit_shift; i++) {
- if (x->digit(i) != 0) {
- must_round_down = true;
- break;
- }
- }
- }
- }
- // If bits_shift is non-zero, it frees up bits, preventing overflow.
- if (must_round_down && bits_shift == 0) {
- // Overflow cannot happen if the most significant digit has unset bits.
- digit_t msd = x->digit(length - 1);
- bool rounding_can_overflow = digit_ismax(msd);
- if (rounding_can_overflow) result_length++;
- }
-
- DCHECK_LE(result_length, length);
Handle<MutableBigInt> result = New(isolate, result_length).ToHandleChecked();
- if (bits_shift == 0) {
- // Zero out any overflow digit (see "rounding_can_overflow" above).
- result->set_digit(result_length - 1, 0);
- for (int i = digit_shift; i < length; i++) {
- result->set_digit(i - digit_shift, x->digit(i));
- }
- } else {
- digit_t carry = x->digit(digit_shift) >> bits_shift;
- int last = length - digit_shift - 1;
- for (int i = 0; i < last; i++) {
- digit_t d = x->digit(i + digit_shift + 1);
- result->set_digit(i, (d << (kDigitBits - bits_shift)) | carry);
- carry = d >> bits_shift;
- }
- result->set_digit(last, carry);
- }
-
- if (sign) {
- result->set_sign(true);
- if (must_round_down) {
- // Since the result is negative, rounding down means adding one to
- // its absolute value. This cannot overflow.
- result = AbsoluteAddOne(isolate, result, true, *result).ToHandleChecked();
- }
- }
+ bigint::RightShift(GetRWDigits(result), GetDigits(x), shift, state);
+ if (sign) result->set_sign(true);
return MakeImmutable(result);
}
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index ccca2a3994..a4fc2439f4 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -34,7 +34,7 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(DeoptimizationData, FixedArray)
TQ_OBJECT_CONSTRUCTORS_IMPL(BytecodeArray)
OBJECT_CONSTRUCTORS_IMPL(AbstractCode, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(DependentCode, WeakFixedArray)
+OBJECT_CONSTRUCTORS_IMPL(DependentCode, WeakArrayList)
OBJECT_CONSTRUCTORS_IMPL(CodeDataContainer, HeapObject)
NEVER_READ_ONLY_SPACE_IMPL(AbstractCode)
@@ -44,6 +44,7 @@ CAST_ACCESSOR(Code)
CAST_ACCESSOR(CodeDataContainer)
CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DeoptimizationData)
+CAST_ACCESSOR(DeoptimizationLiteralArray)
int AbstractCode::raw_instruction_size() {
if (IsCode()) {
@@ -120,8 +121,14 @@ Address AbstractCode::InstructionEnd() {
}
}
-bool AbstractCode::contains(Address inner_pointer) {
- return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
+bool AbstractCode::contains(Isolate* isolate, Address inner_pointer) {
+ PtrComprCageBase cage_base(isolate);
+ if (IsCode(cage_base)) {
+ return GetCode().contains(isolate, inner_pointer);
+ } else {
+ return (address() <= inner_pointer) &&
+ (inner_pointer <= address() + Size(cage_base));
+ }
}
CodeKind AbstractCode::kind() {
@@ -134,47 +141,6 @@ BytecodeArray AbstractCode::GetBytecodeArray() {
return BytecodeArray::cast(*this);
}
-DependentCode DependentCode::next_link() {
- return DependentCode::cast(Get(kNextLinkIndex)->GetHeapObjectAssumeStrong());
-}
-
-void DependentCode::set_next_link(DependentCode next) {
- Set(kNextLinkIndex, HeapObjectReference::Strong(next));
-}
-
-int DependentCode::flags() { return Smi::ToInt(Get(kFlagsIndex)->ToSmi()); }
-
-void DependentCode::set_flags(int flags) {
- Set(kFlagsIndex, MaybeObject::FromObject(Smi::FromInt(flags)));
-}
-
-int DependentCode::count() { return CountField::decode(flags()); }
-
-void DependentCode::set_count(int value) {
- set_flags(CountField::update(flags(), value));
-}
-
-DependentCode::DependencyGroup DependentCode::group() {
- return static_cast<DependencyGroup>(GroupField::decode(flags()));
-}
-
-void DependentCode::set_object_at(int i, MaybeObject object) {
- Set(kCodesStartIndex + i, object);
-}
-
-MaybeObject DependentCode::object_at(int i) {
- return Get(kCodesStartIndex + i);
-}
-
-void DependentCode::clear_at(int i) {
- Set(kCodesStartIndex + i,
- HeapObjectReference::Strong(GetReadOnlyRoots().undefined_value()));
-}
-
-void DependentCode::copy(int from, int to) {
- Set(kCodesStartIndex + to, Get(kCodesStartIndex + from));
-}
-
OBJECT_CONSTRUCTORS_IMPL(Code, HeapObject)
NEVER_READ_ONLY_SPACE_IMPL(Code)
@@ -292,7 +258,7 @@ CodeDataContainer Code::GCSafeCodeDataContainer(AcquireLoadTag) const {
// Helper functions for converting Code objects to CodeDataContainer and back
// when V8_EXTERNAL_CODE_SPACE is enabled.
inline CodeT ToCodeT(Code code) {
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
return code.code_data_container(kAcquireLoad);
#else
return code;
@@ -300,7 +266,7 @@ inline CodeT ToCodeT(Code code) {
}
inline Code FromCodeT(CodeT code) {
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
return code.code();
#else
return code;
@@ -308,7 +274,7 @@ inline Code FromCodeT(CodeT code) {
}
inline Code FromCodeT(CodeT code, RelaxedLoadTag) {
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
return code.code(kRelaxedLoad);
#else
return code;
@@ -316,7 +282,7 @@ inline Code FromCodeT(CodeT code, RelaxedLoadTag) {
}
inline CodeDataContainer CodeDataContainerFromCodeT(CodeT code) {
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
return code;
#else
return code.code_data_container(kAcquireLoad);
@@ -475,7 +441,8 @@ bool Code::contains(Isolate* isolate, Address inner_pointer) {
return true;
}
}
- return (address() <= inner_pointer) && (inner_pointer < address() + Size());
+ return (address() <= inner_pointer) &&
+ (inner_pointer < address() + CodeSize());
}
// static
@@ -488,6 +455,8 @@ void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
int Code::CodeSize() const { return SizeFor(raw_body_size()); }
+DEF_GETTER(Code, Size, int) { return CodeSize(); }
+
CodeKind Code::kind() const {
STATIC_ASSERT(FIELD_SIZE(kFlagsOffset) == kInt32Size);
const uint32_t flags = RELAXED_READ_UINT32_FIELD(*this, kFlagsOffset);
@@ -855,6 +824,15 @@ bool Code::IsWeakObjectInOptimizedCode(HeapObject object) {
InstanceTypeChecker::IsContext(instance_type);
}
+bool Code::IsWeakObjectInDeoptimizationLiteralArray(Object object) {
+ // Maps must be strong because they can be used as part of the description for
+ // how to materialize an object upon deoptimization, in which case it is
+ // possible to reach the code that requires the Map without anything else
+ // holding a strong pointer to that Map.
+ return object.IsHeapObject() && !object.IsMap() &&
+ Code::IsWeakObjectInOptimizedCode(HeapObject::cast(object));
+}
+
bool Code::IsExecutable() {
return !Builtins::IsBuiltinId(builtin_id()) || !is_off_heap_trampoline() ||
Builtins::CodeObjectIsExecutable(builtin_id());
@@ -994,6 +972,35 @@ void CodeDataContainer::clear_padding() {
kSize - kUnalignedSize);
}
+#ifdef V8_EXTERNAL_CODE_SPACE
+//
+// A collection of getters and predicates that forward queries to associated
+// Code object.
+//
+
+#define DEF_PRIMITIVE_FORWARDING_CDC_GETTER(name, type) \
+ type CodeDataContainer::name() const { return FromCodeT(*this).name(); }
+
+#define DEF_FORWARDING_CDC_GETTER(name, type) \
+ DEF_GETTER(CodeDataContainer, name, type) { \
+ return FromCodeT(*this).name(cage_base); \
+ }
+
+DEF_PRIMITIVE_FORWARDING_CDC_GETTER(kind, CodeKind)
+DEF_PRIMITIVE_FORWARDING_CDC_GETTER(builtin_id, Builtin)
+DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_builtin, bool)
+DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_interpreter_trampoline_builtin, bool)
+
+DEF_FORWARDING_CDC_GETTER(deoptimization_data, FixedArray)
+DEF_FORWARDING_CDC_GETTER(bytecode_or_interpreter_data, HeapObject)
+DEF_FORWARDING_CDC_GETTER(source_position_table, ByteArray)
+DEF_FORWARDING_CDC_GETTER(bytecode_offset_table, ByteArray)
+
+#undef DEF_PRIMITIVE_FORWARDING_CDC_GETTER
+#undef DEF_FORWARDING_CDC_GETTER
+
+#endif // V8_EXTERNAL_CODE_SPACE
+
byte BytecodeArray::get(int index) const {
DCHECK(index >= 0 && index < this->length());
return ReadField<byte>(kHeaderSize + index * kCharSize);
@@ -1127,7 +1134,7 @@ int BytecodeArray::SizeIncludingMetadata() {
DEFINE_DEOPT_ELEMENT_ACCESSORS(TranslationByteArray, TranslationArray)
DEFINE_DEOPT_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
-DEFINE_DEOPT_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(LiteralArray, DeoptimizationLiteralArray)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi)
@@ -1155,6 +1162,41 @@ int DeoptimizationData::DeoptCount() {
return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize;
}
+inline DeoptimizationLiteralArray::DeoptimizationLiteralArray(Address ptr)
+ : WeakFixedArray(ptr) {
+ // No type check is possible beyond that for WeakFixedArray.
+}
+
+inline Object DeoptimizationLiteralArray::get(int index) const {
+ return get(GetPtrComprCageBase(*this), index);
+}
+
+inline Object DeoptimizationLiteralArray::get(PtrComprCageBase cage_base,
+ int index) const {
+ MaybeObject maybe = Get(cage_base, index);
+
+ // Slots in the DeoptimizationLiteralArray should only be cleared when there
+ // is no possible code path that could need that slot. This works because the
+ // weakly-held deoptimization literals are basically local variables that
+ // TurboFan has decided not to keep on the stack. Thus, if the deoptimization
+ // literal goes away, then whatever code needed it should be unreachable. The
+ // exception is currently running Code: in that case, the deoptimization
+ // literals array might be the only thing keeping the target object alive.
+ // Thus, when a Code is running, we strongly mark all of its deoptimization
+ // literals.
+ CHECK(!maybe.IsCleared());
+
+ return maybe.GetHeapObjectOrSmi();
+}
+
+inline void DeoptimizationLiteralArray::set(int index, Object value) {
+ MaybeObject maybe = MaybeObject::FromObject(value);
+ if (Code::IsWeakObjectInDeoptimizationLiteralArray(value)) {
+ maybe = MaybeObject::MakeWeak(maybe);
+ }
+ Set(index, maybe);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index 6917b5c7ef..328b3cae7c 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -332,7 +332,8 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
// thus process-independent. See also: FinalizeEmbeddedCodeTargets.
if (RelocInfo::IsCodeTargetMode(it.rinfo()->rmode())) {
Address target_address = it.rinfo()->target_address();
- if (InstructionStream::PcIsOffHeap(isolate, target_address)) continue;
+ if (OffHeapInstructionStream::PcIsOffHeap(isolate, target_address))
+ continue;
Code target = Code::GetCodeFromTargetAddress(target_address);
CHECK(target.IsCode());
@@ -354,7 +355,7 @@ bool Code::Inlines(SharedFunctionInfo sfi) {
DeoptimizationData::cast(deoptimization_data());
if (data.length() == 0) return false;
if (data.SharedFunctionInfo() == sfi) return true;
- FixedArray const literals = data.LiteralArray();
+ DeoptimizationLiteralArray const literals = data.LiteralArray();
int const inlined_count = data.InlinedFunctionCount().value();
for (int i = 0; i < inlined_count; ++i) {
if (SharedFunctionInfo::cast(literals.get(i)) == sfi) return true;
@@ -585,33 +586,16 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
if (has_safepoint_info()) {
SafepointTable table(isolate, current_pc, *this);
- os << "Safepoints (size = " << table.size() << ")\n";
- for (unsigned i = 0; i < table.length(); i++) {
- unsigned pc_offset = table.GetPcOffset(i);
- os << reinterpret_cast<const void*>(InstructionStart() + pc_offset)
- << " ";
- os << std::setw(6) << std::hex << pc_offset << " " << std::setw(4);
- int trampoline_pc = table.GetTrampolinePcOffset(i);
- print_pc(os, trampoline_pc);
- os << std::dec << " ";
- table.PrintEntry(i, os);
- os << " (sp -> fp) ";
- SafepointEntry entry = table.GetEntry(i);
- if (entry.has_deoptimization_index()) {
- os << std::setw(6) << entry.deoptimization_index();
- } else {
- os << "<none>";
- }
- os << "\n";
- }
+ table.Print(os);
os << "\n";
}
if (has_handler_table()) {
HandlerTable table(*this);
os << "Handler Table (size = " << table.NumberOfReturnEntries() << ")\n";
- if (CodeKindIsOptimizedJSFunction(kind()))
+ if (CodeKindIsOptimizedJSFunction(kind())) {
table.HandlerTableReturnPrint(os);
+ }
os << "\n";
}
@@ -763,152 +747,150 @@ void DependentCode::SetDependentCode(Handle<HeapObject> object,
}
}
+namespace {
+
+void PrintDependencyGroups(DependentCode::DependencyGroups groups) {
+ while (groups != 0) {
+ auto group = static_cast<DependentCode::DependencyGroup>(
+ 1 << base::bits::CountTrailingZeros(static_cast<uint32_t>(groups)));
+ StdoutStream{} << DependentCode::DependencyGroupName(group);
+ groups &= ~group;
+ if (groups != 0) StdoutStream{} << ",";
+ }
+}
+
+} // namespace
+
void DependentCode::InstallDependency(Isolate* isolate, Handle<Code> code,
Handle<HeapObject> object,
- DependencyGroup group) {
+ DependencyGroups groups) {
if (V8_UNLIKELY(FLAG_trace_compilation_dependencies)) {
StdoutStream{} << "Installing dependency of [" << code->GetHeapObject()
- << "] on [" << object << "] in group ["
- << DependencyGroupName(group) << "]\n";
+ << "] on [" << object << "] in groups [";
+ PrintDependencyGroups(groups);
+ StdoutStream{} << "]\n";
}
Handle<DependentCode> old_deps(DependentCode::GetDependentCode(object),
isolate);
Handle<DependentCode> new_deps =
- InsertWeakCode(isolate, old_deps, group, code);
+ InsertWeakCode(isolate, old_deps, groups, code);
+
// Update the list head if necessary.
- if (!new_deps.is_identical_to(old_deps))
+ if (!new_deps.is_identical_to(old_deps)) {
DependentCode::SetDependentCode(object, new_deps);
+ }
}
Handle<DependentCode> DependentCode::InsertWeakCode(
- Isolate* isolate, Handle<DependentCode> entries, DependencyGroup group,
+ Isolate* isolate, Handle<DependentCode> entries, DependencyGroups groups,
Handle<Code> code) {
- if (entries->length() == 0 || entries->group() > group) {
- // There is no such group.
- return DependentCode::New(isolate, group, code, entries);
- }
- if (entries->group() < group) {
- // The group comes later in the list.
- Handle<DependentCode> old_next(entries->next_link(), isolate);
- Handle<DependentCode> new_next =
- InsertWeakCode(isolate, old_next, group, code);
- if (!old_next.is_identical_to(new_next)) {
- entries->set_next_link(*new_next);
- }
- return entries;
+ if (entries->length() == entries->capacity()) {
+ // We'd have to grow - try to compact first.
+ entries->IterateAndCompact([](CodeT, DependencyGroups) { return false; });
}
- DCHECK_EQ(group, entries->group());
- int count = entries->count();
- // Check for existing entry to avoid duplicates.
- {
- DisallowHeapAllocation no_gc;
- HeapObjectReference weak_code_entry =
- HeapObjectReference::Weak(ToCodeT(*code));
- for (int i = 0; i < count; i++) {
- if (entries->object_at(i) == weak_code_entry) return entries;
- }
- }
- if (entries->length() < kCodesStartIndex + count + 1) {
- entries = EnsureSpace(isolate, entries);
- // Count could have changed, reload it.
- count = entries->count();
- }
- DisallowHeapAllocation no_gc;
- HeapObjectReference weak_code_entry =
- HeapObjectReference::Weak(ToCodeT(*code));
- entries->set_object_at(count, weak_code_entry);
- entries->set_count(count + 1);
+ MaybeObjectHandle code_slot(HeapObjectReference::Weak(ToCodeT(*code)),
+ isolate);
+ MaybeObjectHandle group_slot(MaybeObject::FromSmi(Smi::FromInt(groups)),
+ isolate);
+ entries = Handle<DependentCode>::cast(
+ WeakArrayList::AddToEnd(isolate, entries, code_slot, group_slot));
return entries;
}
Handle<DependentCode> DependentCode::New(Isolate* isolate,
- DependencyGroup group,
- Handle<Code> code,
- Handle<DependentCode> next) {
- Handle<DependentCode> result =
- Handle<DependentCode>::cast(isolate->factory()->NewWeakFixedArray(
- kCodesStartIndex + 1, AllocationType::kOld));
- result->set_next_link(*next);
- result->set_flags(GroupField::encode(group) | CountField::encode(1));
-
- HeapObjectReference weak_code_entry =
- HeapObjectReference::Weak(ToCodeT(*code));
- result->set_object_at(0, weak_code_entry);
+ DependencyGroups groups,
+ Handle<Code> code) {
+ Handle<DependentCode> result = Handle<DependentCode>::cast(
+ isolate->factory()->NewWeakArrayList(LengthFor(1), AllocationType::kOld));
+ result->Set(0, HeapObjectReference::Weak(ToCodeT(*code)));
+ result->Set(1, Smi::FromInt(groups));
return result;
}
-Handle<DependentCode> DependentCode::EnsureSpace(
- Isolate* isolate, Handle<DependentCode> entries) {
- if (entries->Compact()) return entries;
- int capacity = kCodesStartIndex + DependentCode::Grow(entries->count());
- int grow_by = capacity - entries->length();
- return Handle<DependentCode>::cast(
- isolate->factory()->CopyWeakFixedArrayAndGrow(entries, grow_by));
-}
+void DependentCode::IterateAndCompact(const IterateAndCompactFn& fn) {
+ DisallowGarbageCollection no_gc;
-bool DependentCode::Compact() {
- int old_count = count();
- int new_count = 0;
- for (int i = 0; i < old_count; i++) {
- MaybeObject obj = object_at(i);
- if (!obj->IsCleared()) {
- if (i != new_count) {
- copy(i, new_count);
- }
- new_count++;
+ int len = length();
+ if (len == 0) return;
+
+ // We compact during traversal, thus use a somewhat custom loop construct:
+ //
+ // - Loop back-to-front s.t. trailing cleared entries can simply drop off
+ // the back of the list.
+ // - Any cleared slots are filled from the back of the list.
+ int i = len - kSlotsPerEntry;
+ while (i >= 0) {
+ MaybeObject obj = Get(i + kCodeSlotOffset);
+ if (obj->IsCleared()) {
+ len = FillEntryFromBack(i, len);
+ i -= kSlotsPerEntry;
+ continue;
}
+
+ if (fn(CodeT::cast(obj->GetHeapObjectAssumeWeak()),
+ static_cast<DependencyGroups>(
+ Get(i + kGroupsSlotOffset).ToSmi().value()))) {
+ len = FillEntryFromBack(i, len);
+ }
+
+ i -= kSlotsPerEntry;
}
- set_count(new_count);
- for (int i = new_count; i < old_count; i++) {
- clear_at(i);
- }
- return new_count < old_count;
+
+ set_length(len);
}
bool DependentCode::MarkCodeForDeoptimization(
- DependentCode::DependencyGroup group) {
- if (this->length() == 0 || this->group() > group) {
- // There is no such group.
- return false;
- }
- if (this->group() < group) {
- // The group comes later in the list.
- return next_link().MarkCodeForDeoptimization(group);
- }
- DCHECK_EQ(group, this->group());
- DisallowGarbageCollection no_gc_scope;
- // Mark all the code that needs to be deoptimized.
- bool marked = false;
- int count = this->count();
- for (int i = 0; i < count; i++) {
- MaybeObject obj = object_at(i);
- if (obj->IsCleared()) continue;
+ DependentCode::DependencyGroups deopt_groups) {
+ DisallowGarbageCollection no_gc;
+
+ bool marked_something = false;
+ IterateAndCompact([&](CodeT codet, DependencyGroups groups) {
+ if ((groups & deopt_groups) == 0) return false;
+
// TODO(v8:11880): avoid roundtrips between cdc and code.
- Code code = FromCodeT(CodeT::cast(obj->GetHeapObjectAssumeWeak()));
+ Code code = FromCodeT(codet);
if (!code.marked_for_deoptimization()) {
- code.SetMarkedForDeoptimization(DependencyGroupName(group));
- marked = true;
+ code.SetMarkedForDeoptimization("code dependencies");
+ marked_something = true;
}
+
+ return true;
+ });
+
+ return marked_something;
+}
+
+int DependentCode::FillEntryFromBack(int index, int length) {
+ DCHECK_EQ(index % 2, 0);
+ DCHECK_EQ(length % 2, 0);
+ for (int i = length - kSlotsPerEntry; i > index; i -= kSlotsPerEntry) {
+ MaybeObject obj = Get(i + kCodeSlotOffset);
+ if (obj->IsCleared()) continue;
+
+ Set(index + kCodeSlotOffset, obj);
+ Set(index + kGroupsSlotOffset, Get(i + kGroupsSlotOffset),
+ SKIP_WRITE_BARRIER);
+ return i;
}
- for (int i = 0; i < count; i++) {
- clear_at(i);
- }
- set_count(0);
- return marked;
+ return index; // No non-cleared entry found.
}
void DependentCode::DeoptimizeDependentCodeGroup(
- DependentCode::DependencyGroup group) {
+ Isolate* isolate, DependentCode::DependencyGroups groups) {
DisallowGarbageCollection no_gc_scope;
- bool marked = MarkCodeForDeoptimization(group);
- if (marked) {
+ bool marked_something = MarkCodeForDeoptimization(groups);
+ if (marked_something) {
DCHECK(AllowCodeDependencyChange::IsAllowed());
- // TODO(11527): pass Isolate as an argument.
- Deoptimizer::DeoptimizeMarkedCode(GetIsolateFromWritableObject(*this));
+ Deoptimizer::DeoptimizeMarkedCode(isolate);
}
}
+// static
+DependentCode DependentCode::empty_dependent_code(const ReadOnlyRoots& roots) {
+ return DependentCode::cast(roots.empty_weak_array_list());
+}
+
void Code::SetMarkedForDeoptimization(const char* reason) {
set_marked_for_deoptimization(true);
Deoptimizer::TraceMarkForDeoptimization(*this, reason);
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 18d36e2c02..28c483cb1d 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -85,6 +85,24 @@ class CodeDataContainer : public HeapObject {
// Alias for code_entry_point to make it API compatible with Code.
inline Address InstructionStart() const;
+#ifdef V8_EXTERNAL_CODE_SPACE
+ //
+ // A collection of getters and predicates that forward queries to associated
+ // Code object.
+ //
+
+ inline CodeKind kind() const;
+ inline Builtin builtin_id() const;
+ inline bool is_builtin() const;
+ inline bool is_interpreter_trampoline_builtin() const;
+
+ DECL_GETTER(deoptimization_data, FixedArray)
+ DECL_GETTER(bytecode_or_interpreter_data, HeapObject)
+ DECL_GETTER(source_position_table, ByteArray)
+ DECL_GETTER(bytecode_offset_table, ByteArray)
+
+#endif // V8_EXTERNAL_CODE_SPACE
+
DECL_CAST(CodeDataContainer)
// Dispatched behavior.
@@ -501,11 +519,14 @@ class Code : public HeapObject {
return RoundUp(kHeaderSize + body_size, kCodeAlignment);
}
+ inline int CodeSize() const;
+
+ // Hides HeapObject::Size(...) and redirects queries to CodeSize().
+ DECL_GETTER(Size, int)
+
DECL_CAST(Code)
// Dispatched behavior.
- inline int CodeSize() const;
-
DECL_PRINTER(Code)
DECL_VERIFIER(Code)
@@ -523,6 +544,8 @@ class Code : public HeapObject {
static inline bool IsWeakObjectInOptimizedCode(HeapObject object);
+ static inline bool IsWeakObjectInDeoptimizationLiteralArray(Object object);
+
// Returns false if this is an embedded builtin Code object that's in
// read_only_space and hence doesn't have execute permissions.
inline bool IsExecutable();
@@ -725,7 +748,7 @@ class AbstractCode : public HeapObject {
inline int SizeIncludingMetadata();
// Returns true if pc is inside this object's instructions.
- inline bool contains(Address pc);
+ inline bool contains(Isolate* isolate, Address pc);
// Returns the kind of the code.
inline CodeKind kind();
@@ -744,34 +767,26 @@ class AbstractCode : public HeapObject {
inline ByteArray SourcePositionTableInternal();
};
-// Dependent code is a singly linked list of weak fixed arrays. Each array
-// contains weak pointers to code objects for one dependent group. The suffix of
-// the array can be filled with the undefined value if the number of codes is
-// less than the length of the array.
+// Dependent code is conceptually the list of {Code, DependencyGroup} tuples
+// associated with an object, where the dependency group is a reason that could
+// lead to a deopt of the corresponding code.
//
-// +------+-----------------+--------+--------+-----+--------+-----------+-----+
-// | next | count & group 1 | code 1 | code 2 | ... | code n | undefined | ... |
-// +------+-----------------+--------+--------+-----+--------+-----------+-----+
-// |
-// V
-// +------+-----------------+--------+--------+-----+--------+-----------+-----+
-// | next | count & group 2 | code 1 | code 2 | ... | code m | undefined | ... |
-// +------+-----------------+--------+--------+-----+--------+-----------+-----+
-// |
-// V
-// empty_weak_fixed_array()
+// Implementation details: DependentCode is a weak array list containing
+// entries, where each entry consists of a (weak) Code object and the
+// DependencyGroups bitset as a Smi.
//
-// The list of weak fixed arrays is ordered by dependency groups.
-
-class DependentCode : public WeakFixedArray {
+// Note the underlying weak array list currently never shrinks physically (the
+// contents may shrink).
+// TODO(jgruber): Consider adding physical shrinking.
+class DependentCode : public WeakArrayList {
public:
DECL_CAST(DependentCode)
enum DependencyGroup {
- // Group of code that embed a transition to this map, and depend on being
- // deoptimized when the transition is replaced by a new version.
- kTransitionGroup,
- // Group of code that omit run-time prototype checks for prototypes
+ // Group of code objects that embed a transition to this map, and depend on
+ // being deoptimized when the transition is replaced by a new version.
+ kTransitionGroup = 1 << 0,
+ // Group of code objects that omit run-time prototype checks for prototypes
// described by this map. The group is deoptimized whenever the following
// conditions hold, possibly invalidating the assumptions embedded in the
// code:
@@ -780,89 +795,82 @@ class DependentCode : public WeakFixedArray {
// b) A dictionary-mode prototype described by this map changes shape, the
// const-ness of one of its properties changes, or its [[Prototype]]
// changes (only the latter causes a transition).
- kPrototypeCheckGroup,
- // Group of code that depends on global property values in property cells
- // not being changed.
- kPropertyCellChangedGroup,
- // Group of code that omit run-time checks for field(s) introduced by
- // this map, i.e. for the field type.
- kFieldTypeGroup,
- kFieldConstGroup,
- kFieldRepresentationGroup,
- // Group of code that omit run-time type checks for initial maps of
+ kPrototypeCheckGroup = 1 << 1,
+ // Group of code objects that depends on global property values in property
+ // cells not being changed.
+ kPropertyCellChangedGroup = 1 << 2,
+ // Group of code objects that omit run-time checks for field(s) introduced
+ // by this map, i.e. for the field type.
+ kFieldTypeGroup = 1 << 3,
+ kFieldConstGroup = 1 << 4,
+ kFieldRepresentationGroup = 1 << 5,
+ // Group of code objects that omit run-time type checks for initial maps of
// constructors.
- kInitialMapChangedGroup,
- // Group of code that depends on tenuring information in AllocationSites
- // not being changed.
- kAllocationSiteTenuringChangedGroup,
- // Group of code that depends on element transition information in
+ kInitialMapChangedGroup = 1 << 6,
+ // Group of code objects that depends on tenuring information in
// AllocationSites not being changed.
- kAllocationSiteTransitionChangedGroup
+ kAllocationSiteTenuringChangedGroup = 1 << 7,
+ // Group of code objects that depends on element transition information in
+ // AllocationSites not being changed.
+ kAllocationSiteTransitionChangedGroup = 1 << 8,
+ // IMPORTANT: The last bit must fit into a Smi, i.e. into 31 bits.
};
+ using DependencyGroups = base::Flags<DependencyGroup, uint32_t>;
+
+ static const char* DependencyGroupName(DependencyGroup group);
- // Register a dependency of {code} on {object}, of the kind given by {group}.
+ // Register a dependency of {code} on {object}, of the kinds given by
+ // {groups}.
V8_EXPORT_PRIVATE static void InstallDependency(Isolate* isolate,
Handle<Code> code,
Handle<HeapObject> object,
- DependencyGroup group);
+ DependencyGroups groups);
- void DeoptimizeDependentCodeGroup(DependencyGroup group);
+ void DeoptimizeDependentCodeGroup(Isolate* isolate, DependencyGroups groups);
- bool MarkCodeForDeoptimization(DependencyGroup group);
+ bool MarkCodeForDeoptimization(DependencyGroups deopt_groups);
- // The following low-level accessors are exposed only for tests.
- inline DependencyGroup group();
- inline MaybeObject object_at(int i);
- inline int count();
- inline DependentCode next_link();
+ V8_EXPORT_PRIVATE static DependentCode empty_dependent_code(
+ const ReadOnlyRoots& roots);
+ static constexpr RootIndex kEmptyDependentCode =
+ RootIndex::kEmptyWeakArrayList;
- private:
- static const char* DependencyGroupName(DependencyGroup group);
+ // Constants exposed for tests.
+ static constexpr int kSlotsPerEntry = 2; // {code: weak Code, groups: Smi}.
+ static constexpr int kCodeSlotOffset = 0;
+ static constexpr int kGroupsSlotOffset = 1;
+ private:
// Get/Set {object}'s {DependentCode}.
static DependentCode GetDependentCode(Handle<HeapObject> object);
static void SetDependentCode(Handle<HeapObject> object,
Handle<DependentCode> dep);
- static Handle<DependentCode> New(Isolate* isolate, DependencyGroup group,
- Handle<Code> code,
- Handle<DependentCode> next);
- static Handle<DependentCode> EnsureSpace(Isolate* isolate,
- Handle<DependentCode> entries);
+ static Handle<DependentCode> New(Isolate* isolate, DependencyGroups groups,
+ Handle<Code> code);
static Handle<DependentCode> InsertWeakCode(Isolate* isolate,
Handle<DependentCode> entries,
- DependencyGroup group,
+ DependencyGroups groups,
Handle<Code> code);
- // Compact by removing cleared weak cells and return true if there was
- // any cleared weak cell.
- bool Compact();
-
- static int Grow(int number_of_entries) {
- if (number_of_entries < 5) return number_of_entries + 1;
- return number_of_entries * 5 / 4;
- }
-
- static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1;
- static const int kNextLinkIndex = 0;
- static const int kFlagsIndex = 1;
- static const int kCodesStartIndex = 2;
+ // The callback is called for all non-cleared entries, and should return true
+ // iff the current entry should be cleared.
+ using IterateAndCompactFn = std::function<bool(CodeT, DependencyGroups)>;
+ void IterateAndCompact(const IterateAndCompactFn& fn);
- inline void set_next_link(DependentCode next);
- inline void set_count(int value);
- inline void set_object_at(int i, MaybeObject object);
- inline void clear_at(int i);
- inline void copy(int from, int to);
+ // Fills the given entry with the last non-cleared entry in this list, and
+ // returns the new length after the last non-cleared entry has been moved.
+ int FillEntryFromBack(int index, int length);
- inline int flags();
- inline void set_flags(int flags);
- using GroupField = base::BitField<int, 0, 5>;
- using CountField = base::BitField<int, 5, 27>;
- STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
+ static constexpr int LengthFor(int number_of_entries) {
+ return number_of_entries * kSlotsPerEntry;
+ }
- OBJECT_CONSTRUCTORS(DependentCode, WeakFixedArray);
+ OBJECT_CONSTRUCTORS(DependentCode, WeakArrayList);
};
+DEFINE_OPERATORS_FOR_FLAGS(DependentCode::DependencyGroups)
+
// BytecodeArray represents a sequence of interpreter bytecodes.
class BytecodeArray
: public TorqueGeneratedBytecodeArray<BytecodeArray, FixedArrayBase> {
@@ -974,6 +982,24 @@ class BytecodeArray
TQ_OBJECT_CONSTRUCTORS(BytecodeArray)
};
+// This class holds data required during deoptimization. It does not have its
+// own instance type.
+class DeoptimizationLiteralArray : public WeakFixedArray {
+ public:
+ // Getters for literals. These include runtime checks that the pointer was not
+ // cleared, if the literal was held weakly.
+ inline Object get(int index) const;
+ inline Object get(PtrComprCageBase cage_base, int index) const;
+
+ // Setter for literals. This will set the object as strong or weak depending
+ // on Code::IsWeakObjectInOptimizedCode.
+ inline void set(int index, Object value);
+
+ DECL_CAST(DeoptimizationLiteralArray)
+
+ OBJECT_CONSTRUCTORS(DeoptimizationLiteralArray, WeakFixedArray);
+};
+
// DeoptimizationData is a fixed array used to hold the deoptimization data for
// optimized code. It also contains information about functions that were
// inlined. If N different functions were inlined then the first N elements of
@@ -1014,7 +1040,7 @@ class DeoptimizationData : public FixedArray {
DECL_ELEMENT_ACCESSORS(TranslationByteArray, TranslationArray)
DECL_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
- DECL_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
+ DECL_ELEMENT_ACCESSORS(LiteralArray, DeoptimizationLiteralArray)
DECL_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
DECL_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
DECL_ELEMENT_ACCESSORS(OptimizationId, Smi)
diff --git a/deps/v8/src/objects/contexts.cc b/deps/v8/src/objects/contexts.cc
index a5cfeaa3f0..cdf9edce24 100644
--- a/deps/v8/src/objects/contexts.cc
+++ b/deps/v8/src/objects/contexts.cc
@@ -218,16 +218,17 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
context->global_object().native_context().script_context_table();
VariableLookupResult r;
if (ScriptContextTable::Lookup(isolate, script_contexts, *name, &r)) {
- Context context = script_contexts.get_context(r.context_index);
+ Context script_context = script_contexts.get_context(r.context_index);
if (FLAG_trace_contexts) {
PrintF("=> found property in script context %d: %p\n",
- r.context_index, reinterpret_cast<void*>(context.ptr()));
+ r.context_index,
+ reinterpret_cast<void*>(script_context.ptr()));
}
*index = r.slot_index;
*variable_mode = r.mode;
*init_flag = r.init_flag;
*attributes = GetAttributesForMode(r.mode);
- return handle(context, isolate);
+ return handle(script_context, isolate);
}
}
@@ -380,9 +381,9 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
// Check the original context, but do not follow its context chain.
Object obj = context->get(WRAPPED_CONTEXT_INDEX);
if (obj.IsContext()) {
- Handle<Context> context(Context::cast(obj), isolate);
+ Handle<Context> wrapped_context(Context::cast(obj), isolate);
Handle<Object> result =
- Context::Lookup(context, name, DONT_FOLLOW_CHAINS, index,
+ Context::Lookup(wrapped_context, name, DONT_FOLLOW_CHAINS, index,
attributes, init_flag, variable_mode);
if (!result.is_null()) return result;
}
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index 36ce5ce895..46228428b9 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -312,6 +312,7 @@ enum ContextLookupFlags {
V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor) \
V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor) \
+ V(WASM_SUSPENDER_CONSTRUCTOR_INDEX, JSFunction, wasm_suspender_constructor) \
V(TEMPLATE_WEAKMAP_INDEX, HeapObject, template_weakmap) \
V(TYPED_ARRAY_FUN_INDEX, JSFunction, typed_array_function) \
V(TYPED_ARRAY_PROTOTYPE_INDEX, JSObject, typed_array_prototype) \
@@ -357,7 +358,7 @@ enum ContextLookupFlags {
V(WEAKMAP_GET_INDEX, JSFunction, weakmap_get) \
V(WEAKMAP_DELETE_INDEX, JSFunction, weakmap_delete) \
V(WEAKSET_ADD_INDEX, JSFunction, weakset_add) \
- V(RETAINED_MAPS, WeakArrayList, retained_maps) \
+ V(RETAINED_MAPS, Object, retained_maps) \
V(OSR_CODE_CACHE_INDEX, WeakFixedArray, osr_code_cache)
#include "torque-generated/src/objects/contexts-tq.inc"
diff --git a/deps/v8/src/objects/contexts.tq b/deps/v8/src/objects/contexts.tq
index f7c0b875ef..8ace1c204b 100644
--- a/deps/v8/src/objects/contexts.tq
+++ b/deps/v8/src/objects/contexts.tq
@@ -3,12 +3,10 @@
// found in the LICENSE file.
@abstract
-@export
-@customCppClass
// We normally don't generate a BodyDescriptor for an abstact class, but here we
// do since all context classes share the same BodyDescriptor.
@generateBodyDescriptor
-class Context extends HeapObject {
+extern class Context extends HeapObject {
macro GetScopeInfo(): ScopeInfo {
return *ContextSlot(this, ContextSlot::SCOPE_INFO_INDEX);
}
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 77c9b80e65..00aae07e46 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -21,6 +21,7 @@ namespace internal {
class BreakPoint;
class BytecodeArray;
+class StructBodyDescriptor;
#include "torque-generated/src/objects/debug-objects-tq.inc"
@@ -132,6 +133,8 @@ class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> {
static const int kEstimatedNofBreakPointsInFunction = 4;
+ using BodyDescriptor = StructBodyDescriptor;
+
private:
// Get the break point info object for a source position.
Object GetBreakPointInfo(Isolate* isolate, int source_position);
@@ -163,6 +166,8 @@ class BreakPointInfo
int GetStatementPosition(Handle<DebugInfo> debug_info);
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(BreakPointInfo)
};
@@ -193,6 +198,8 @@ class CoverageInfo
// Holds breakpoint related information. This object is used by inspector.
class BreakPoint : public TorqueGeneratedBreakPoint<BreakPoint, Struct> {
public:
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(BreakPoint)
};
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 1a9eb7fae1..38a1874102 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -24,6 +24,7 @@ template <typename T>
class Handle;
class Isolate;
+class StructBodyDescriptor;
#include "torque-generated/src/objects/descriptor-array-tq.inc"
@@ -32,6 +33,8 @@ class EnumCache : public TorqueGeneratedEnumCache<EnumCache, Struct> {
public:
DECL_VERIFIER(EnumCache)
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(EnumCache)
};
diff --git a/deps/v8/src/objects/descriptor-array.tq b/deps/v8/src/objects/descriptor-array.tq
index 9e15812cb2..35c1e9a62a 100644
--- a/deps/v8/src/objects/descriptor-array.tq
+++ b/deps/v8/src/objects/descriptor-array.tq
@@ -14,9 +14,9 @@ struct DescriptorEntry {
value: JSAny|Weak<Map>|AccessorInfo|AccessorPair|ClassPositions;
}
-@export
-@customCppClass
-class DescriptorArray extends HeapObject {
+@generateBodyDescriptor
+@generateUniqueMap
+extern class DescriptorArray extends HeapObject {
const number_of_all_descriptors: uint16;
number_of_descriptors: uint16;
raw_number_of_marked_descriptors: uint16;
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index f4772afb39..22fd0ada9f 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -1081,7 +1081,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
PropertyDetails details = Subclass::GetDetailsImpl(*object, entry);
Handle<Object> value;
- if (details.kind() == kData) {
+ if (details.kind() == PropertyKind::kData) {
value = Subclass::GetInternalImpl(object, entry);
} else {
// This might modify the elements and/or change the elements kind.
@@ -1364,11 +1364,13 @@ class ElementsAccessorBase : public InternalElementsAccessor {
static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store,
InternalIndex entry) {
- return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
+ return PropertyDetails(PropertyKind::kData, NONE,
+ PropertyCellType::kNoCell);
}
static PropertyDetails GetDetailsImpl(JSObject holder, InternalIndex entry) {
- return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
+ return PropertyDetails(PropertyKind::kData, NONE,
+ PropertyCellType::kNoCell);
}
PropertyDetails GetDetails(JSObject holder, InternalIndex entry) final {
@@ -1488,7 +1490,7 @@ class DictionaryElementsAccessor
Object key = dict.KeyAt(cage_base, i);
if (!dict.IsKey(roots, key)) continue;
PropertyDetails details = dict.DetailsAt(i);
- if (details.kind() == kAccessor) return true;
+ if (details.kind() == PropertyKind::kAccessor) return true;
}
return false;
}
@@ -1521,8 +1523,9 @@ class DictionaryElementsAccessor
if (attributes != NONE) object->RequireSlowElements(dictionary);
dictionary.ValueAtPut(entry, *value);
PropertyDetails details = dictionary.DetailsAt(entry);
- details = PropertyDetails(kData, attributes, PropertyCellType::kNoCell,
- details.dictionary_index());
+ details =
+ PropertyDetails(PropertyKind::kData, attributes,
+ PropertyCellType::kNoCell, details.dictionary_index());
dictionary.DetailsAtPut(entry, details);
}
@@ -1531,7 +1534,8 @@ class DictionaryElementsAccessor
Handle<Object> value,
PropertyAttributes attributes,
uint32_t new_capacity) {
- PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
+ PropertyDetails details(PropertyKind::kData, attributes,
+ PropertyCellType::kNoCell);
Handle<NumberDictionary> dictionary =
object->HasFastElements() || object->HasFastStringWrapperElements()
? JSObject::NormalizeElements(object)
@@ -1695,7 +1699,7 @@ class DictionaryElementsAccessor
continue;
}
- if (dictionary.DetailsAt(i).kind() == kAccessor) {
+ if (dictionary.DetailsAt(i).kind() == PropertyKind::kAccessor) {
// Restart from beginning in slow path, otherwise we may observably
// access getters out of order
return false;
@@ -1740,12 +1744,12 @@ class DictionaryElementsAccessor
PropertyDetails details = GetDetailsImpl(*dictionary, entry);
switch (details.kind()) {
- case kData: {
+ case PropertyKind::kData: {
Object element_k = dictionary->ValueAt(entry);
if (value->SameValueZero(element_k)) return Just(true);
break;
}
- case kAccessor: {
+ case PropertyKind::kAccessor: {
LookupIterator it(isolate, receiver, k,
LookupIterator::OWN_SKIP_INTERCEPTOR);
DCHECK(it.IsFound());
@@ -1812,14 +1816,14 @@ class DictionaryElementsAccessor
PropertyDetails details =
GetDetailsImpl(*dictionary, InternalIndex(entry));
switch (details.kind()) {
- case kData: {
+ case PropertyKind::kData: {
Object element_k = dictionary->ValueAt(entry);
if (value->StrictEquals(element_k)) {
return Just<int64_t>(k);
}
break;
}
- case kAccessor: {
+ case PropertyKind::kAccessor: {
LookupIterator it(isolate, receiver, k,
LookupIterator::OWN_SKIP_INTERCEPTOR);
DCHECK(it.IsFound());
@@ -2186,7 +2190,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
dst_elms = BackingStore::cast(
isolate->heap()->LeftTrimFixedArray(dst_elms, src_index));
// Update all the copies of this backing_store handle.
- *backing_store.location() = dst_elms.ptr();
+ backing_store.PatchValue(dst_elms);
receiver->set_elements(dst_elms);
// Adjust the hole offset as the array has been shrunk.
hole_end -= src_index;
@@ -2306,7 +2310,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
} else {
if (!value.IsNaN()) {
- double search_value = value.Number();
+ double search_number = value.Number();
if (IsDoubleElementsKind(Subclass::kind())) {
// Search for non-NaN Number in PACKED_DOUBLE_ELEMENTS or
// HOLEY_DOUBLE_ELEMENTS --- Skip TheHole, and trust UCOMISD or
@@ -2316,7 +2320,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
for (size_t k = start_from; k < length; ++k) {
if (elements.is_the_hole(static_cast<int>(k))) continue;
- if (elements.get_scalar(static_cast<int>(k)) == search_value) {
+ if (elements.get_scalar(static_cast<int>(k)) == search_number) {
return Just(true);
}
}
@@ -2329,7 +2333,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
for (size_t k = start_from; k < length; ++k) {
Object element_k = elements.get(static_cast<int>(k));
- if (element_k.IsNumber() && element_k.Number() == search_value) {
+ if (element_k.IsNumber() && element_k.Number() == search_number) {
return Just(true);
}
}
@@ -3204,12 +3208,14 @@ class TypedElementsAccessor
}
static PropertyDetails GetDetailsImpl(JSObject holder, InternalIndex entry) {
- return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
+ return PropertyDetails(PropertyKind::kData, NONE,
+ PropertyCellType::kNoCell);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store,
InternalIndex entry) {
- return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
+ return PropertyDetails(PropertyKind::kData, NONE,
+ PropertyCellType::kNoCell);
}
static bool HasElementImpl(Isolate* isolate, JSObject holder, size_t index,
@@ -4473,7 +4479,8 @@ class SloppyArgumentsElementsAccessor
SloppyArgumentsElements::cast(holder.elements());
uint32_t length = elements.length();
if (entry.as_uint32() < length) {
- return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
+ return PropertyDetails(PropertyKind::kData, NONE,
+ PropertyCellType::kNoCell);
}
FixedArray arguments = elements.arguments();
return ArgumentsAccessor::GetDetailsImpl(arguments,
@@ -4687,7 +4694,8 @@ class SlowSloppyArgumentsElementsAccessor
old_arguments->IsNumberDictionary()
? Handle<NumberDictionary>::cast(old_arguments)
: JSObject::NormalizeElements(object);
- PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
+ PropertyDetails details(PropertyKind::kData, attributes,
+ PropertyCellType::kNoCell);
Handle<NumberDictionary> new_dictionary =
NumberDictionary::Add(isolate, dictionary, index, value, details);
if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
@@ -4721,7 +4729,8 @@ class SlowSloppyArgumentsElementsAccessor
value = isolate->factory()->NewAliasedArgumentsEntry(context_entry);
}
- PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
+ PropertyDetails details(PropertyKind::kData, attributes,
+ PropertyCellType::kNoCell);
Handle<NumberDictionary> arguments(
NumberDictionary::cast(elements->arguments()), isolate);
arguments = NumberDictionary::Add(isolate, arguments, entry.as_uint32(),
@@ -4899,7 +4908,8 @@ class StringWrapperElementsAccessor
if (entry.as_uint32() < length) {
PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- return PropertyDetails(kData, attributes, PropertyCellType::kNoCell);
+ return PropertyDetails(PropertyKind::kData, attributes,
+ PropertyCellType::kNoCell);
}
return BackingStoreAccessor::GetDetailsImpl(holder,
entry.adjust_down(length));
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index 10428d18a5..15a0851c57 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -95,7 +95,7 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(IsolateT* isolate,
FeedbackSlotKind kind = spec->GetKind(FeedbackSlot(i));
int entry_size = FeedbackMetadata::GetSlotSize(kind);
for (int j = 1; j < entry_size; j++) {
- FeedbackSlotKind kind = spec->GetKind(FeedbackSlot(i + j));
+ kind = spec->GetKind(FeedbackSlot(i + j));
DCHECK_EQ(FeedbackSlotKind::kInvalid, kind);
}
i += entry_size;
@@ -725,20 +725,20 @@ InlineCacheState FeedbackNexus::ic_state() const {
switch (kind()) {
case FeedbackSlotKind::kLiteral:
- if (feedback->IsSmi()) return UNINITIALIZED;
- return MONOMORPHIC;
+ if (feedback->IsSmi()) return InlineCacheState::UNINITIALIZED;
+ return InlineCacheState::MONOMORPHIC;
case FeedbackSlotKind::kStoreGlobalSloppy:
case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
- if (feedback->IsSmi()) return MONOMORPHIC;
+ if (feedback->IsSmi()) return InlineCacheState::MONOMORPHIC;
DCHECK(feedback->IsWeakOrCleared());
if (!feedback->IsCleared() || extra != UninitializedSentinel()) {
- return MONOMORPHIC;
+ return InlineCacheState::MONOMORPHIC;
}
- return UNINITIALIZED;
+ return InlineCacheState::UNINITIALIZED;
}
case FeedbackSlotKind::kStoreNamedSloppy:
@@ -752,32 +752,33 @@ InlineCacheState FeedbackNexus::ic_state() const {
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kHasKeyed: {
if (feedback == UninitializedSentinel()) {
- return UNINITIALIZED;
+ return InlineCacheState::UNINITIALIZED;
}
if (feedback == MegamorphicSentinel()) {
- return MEGAMORPHIC;
+ return InlineCacheState::MEGAMORPHIC;
}
if (feedback == MegaDOMSentinel()) {
DCHECK(IsLoadICKind(kind()));
- return MEGADOM;
+ return InlineCacheState::MEGADOM;
}
if (feedback->IsWeakOrCleared()) {
// Don't check if the map is cleared.
- return MONOMORPHIC;
+ return InlineCacheState::MONOMORPHIC;
}
HeapObject heap_object;
if (feedback->GetHeapObjectIfStrong(&heap_object)) {
if (heap_object.IsWeakFixedArray()) {
// Determine state purely by our structure, don't check if the maps
// are cleared.
- return POLYMORPHIC;
+ return InlineCacheState::POLYMORPHIC;
}
if (heap_object.IsName()) {
DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
IsKeyedHasICKind(kind()) || IsDefineOwnICKind(kind()));
Object extra_object = extra->GetHeapObjectAssumeStrong();
WeakFixedArray extra_array = WeakFixedArray::cast(extra_object);
- return extra_array.length() > 2 ? POLYMORPHIC : MONOMORPHIC;
+ return extra_array.length() > 2 ? InlineCacheState::POLYMORPHIC
+ : InlineCacheState::MONOMORPHIC;
}
}
UNREACHABLE();
@@ -785,97 +786,97 @@ InlineCacheState FeedbackNexus::ic_state() const {
case FeedbackSlotKind::kCall: {
HeapObject heap_object;
if (feedback == MegamorphicSentinel()) {
- return GENERIC;
+ return InlineCacheState::GENERIC;
} else if (feedback->IsWeakOrCleared()) {
if (feedback->GetHeapObjectIfWeak(&heap_object)) {
if (heap_object.IsFeedbackCell()) {
- return POLYMORPHIC;
+ return InlineCacheState::POLYMORPHIC;
}
CHECK(heap_object.IsJSFunction() || heap_object.IsJSBoundFunction());
}
- return MONOMORPHIC;
+ return InlineCacheState::MONOMORPHIC;
} else if (feedback->GetHeapObjectIfStrong(&heap_object) &&
heap_object.IsAllocationSite()) {
- return MONOMORPHIC;
+ return InlineCacheState::MONOMORPHIC;
}
CHECK_EQ(feedback, UninitializedSentinel());
- return UNINITIALIZED;
+ return InlineCacheState::UNINITIALIZED;
}
case FeedbackSlotKind::kBinaryOp: {
BinaryOperationHint hint = GetBinaryOperationFeedback();
if (hint == BinaryOperationHint::kNone) {
- return UNINITIALIZED;
+ return InlineCacheState::UNINITIALIZED;
} else if (hint == BinaryOperationHint::kAny) {
- return GENERIC;
+ return InlineCacheState::GENERIC;
}
- return MONOMORPHIC;
+ return InlineCacheState::MONOMORPHIC;
}
case FeedbackSlotKind::kCompareOp: {
CompareOperationHint hint = GetCompareOperationFeedback();
if (hint == CompareOperationHint::kNone) {
- return UNINITIALIZED;
+ return InlineCacheState::UNINITIALIZED;
} else if (hint == CompareOperationHint::kAny) {
- return GENERIC;
+ return InlineCacheState::GENERIC;
}
- return MONOMORPHIC;
+ return InlineCacheState::MONOMORPHIC;
}
case FeedbackSlotKind::kForIn: {
ForInHint hint = GetForInFeedback();
if (hint == ForInHint::kNone) {
- return UNINITIALIZED;
+ return InlineCacheState::UNINITIALIZED;
} else if (hint == ForInHint::kAny) {
- return GENERIC;
+ return InlineCacheState::GENERIC;
}
- return MONOMORPHIC;
+ return InlineCacheState::MONOMORPHIC;
}
case FeedbackSlotKind::kInstanceOf: {
if (feedback == UninitializedSentinel()) {
- return UNINITIALIZED;
+ return InlineCacheState::UNINITIALIZED;
} else if (feedback == MegamorphicSentinel()) {
- return MEGAMORPHIC;
+ return InlineCacheState::MEGAMORPHIC;
}
- return MONOMORPHIC;
+ return InlineCacheState::MONOMORPHIC;
}
case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
if (feedback == UninitializedSentinel()) {
- return UNINITIALIZED;
+ return InlineCacheState::UNINITIALIZED;
} else if (feedback->IsWeakOrCleared()) {
// Don't check if the map is cleared.
- return MONOMORPHIC;
+ return InlineCacheState::MONOMORPHIC;
}
- return MEGAMORPHIC;
+ return InlineCacheState::MEGAMORPHIC;
}
case FeedbackSlotKind::kTypeProfile: {
if (feedback == UninitializedSentinel()) {
- return UNINITIALIZED;
+ return InlineCacheState::UNINITIALIZED;
}
- return MONOMORPHIC;
+ return InlineCacheState::MONOMORPHIC;
}
case FeedbackSlotKind::kCloneObject: {
if (feedback == UninitializedSentinel()) {
- return UNINITIALIZED;
+ return InlineCacheState::UNINITIALIZED;
}
if (feedback == MegamorphicSentinel()) {
- return MEGAMORPHIC;
+ return InlineCacheState::MEGAMORPHIC;
}
if (feedback->IsWeakOrCleared()) {
- return MONOMORPHIC;
+ return InlineCacheState::MONOMORPHIC;
}
DCHECK(feedback->GetHeapObjectAssumeStrong().IsWeakFixedArray());
- return POLYMORPHIC;
+ return InlineCacheState::POLYMORPHIC;
}
case FeedbackSlotKind::kInvalid:
case FeedbackSlotKind::kKindsNumber:
UNREACHABLE();
}
- return UNINITIALIZED;
+ return InlineCacheState::UNINITIALIZED;
}
void FeedbackNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
@@ -925,12 +926,12 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
}
}
switch (ic_state()) {
- case UNINITIALIZED:
+ case InlineCacheState::UNINITIALIZED:
// Cache the first map seen which meets the fast case requirements.
SetFeedback(HeapObjectReference::Weak(*source_map), UPDATE_WRITE_BARRIER,
*result_map);
break;
- case MONOMORPHIC:
+ case InlineCacheState::MONOMORPHIC:
if (feedback.is_null() || feedback.is_identical_to(source_map) ||
Map::cast(*feedback).is_deprecated()) {
SetFeedback(HeapObjectReference::Weak(*source_map),
@@ -947,7 +948,7 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
HeapObjectReference::ClearedValue(isolate));
}
break;
- case POLYMORPHIC: {
+ case InlineCacheState::POLYMORPHIC: {
const int kMaxElements = FLAG_max_valid_polymorphic_map_count *
kCloneObjectPolymorphicEntrySize;
Handle<WeakFixedArray> array = Handle<WeakFixedArray>::cast(feedback);
@@ -1170,7 +1171,7 @@ Name FeedbackNexus::GetName() const {
KeyedAccessLoadMode FeedbackNexus::GetKeyedAccessLoadMode() const {
DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedHasICKind(kind()));
- if (GetKeyType() == PROPERTY) return STANDARD_LOAD;
+ if (GetKeyType() == IcCheckType::kProperty) return STANDARD_LOAD;
std::vector<MapAndHandler> maps_and_handlers;
ExtractMapsAndHandlers(&maps_and_handlers);
@@ -1238,7 +1239,7 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
IsStoreDataPropertyInLiteralKind(kind()) || IsDefineOwnICKind(kind()));
KeyedAccessStoreMode mode = STANDARD_STORE;
- if (GetKeyType() == PROPERTY) return mode;
+ if (GetKeyType() == IcCheckType::kProperty) return mode;
std::vector<MapAndHandler> maps_and_handlers;
ExtractMapsAndHandlers(&maps_and_handlers);
@@ -1310,7 +1311,8 @@ IcCheckType FeedbackNexus::GetKeyType() const {
IsStoreDataPropertyInLiteralKind(kind()) || IsDefineOwnICKind(kind())
? pair.second
: feedback;
- return IsPropertyNameFeedback(maybe_name) ? PROPERTY : ELEMENT;
+ return IsPropertyNameFeedback(maybe_name) ? IcCheckType::kProperty
+ : IcCheckType::kElement;
}
BinaryOperationHint FeedbackNexus::GetBinaryOperationFeedback() const {
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index 0fcb97fcfd..bab91fd497 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -747,9 +747,13 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
}
InlineCacheState ic_state() const;
- bool IsUninitialized() const { return ic_state() == UNINITIALIZED; }
- bool IsMegamorphic() const { return ic_state() == MEGAMORPHIC; }
- bool IsGeneric() const { return ic_state() == GENERIC; }
+ bool IsUninitialized() const {
+ return ic_state() == InlineCacheState::UNINITIALIZED;
+ }
+ bool IsMegamorphic() const {
+ return ic_state() == InlineCacheState::MEGAMORPHIC;
+ }
+ bool IsGeneric() const { return ic_state() == InlineCacheState::GENERIC; }
void Print(std::ostream& os);
@@ -773,7 +777,7 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
bool IsCleared() const {
InlineCacheState state = ic_state();
- return !FLAG_use_ic || state == UNINITIALIZED;
+ return !FLAG_use_ic || state == InlineCacheState::UNINITIALIZED;
}
// Clear() returns true if the state of the underlying vector was changed.
diff --git a/deps/v8/src/objects/feedback-vector.tq b/deps/v8/src/objects/feedback-vector.tq
index 5c1fbd4e4e..bc2adf0718 100644
--- a/deps/v8/src/objects/feedback-vector.tq
+++ b/deps/v8/src/objects/feedback-vector.tq
@@ -21,9 +21,9 @@ extern class FeedbackVector extends HeapObject {
// Padding was necessary for GCMole.
flags: FeedbackVectorFlags;
shared_function_info: SharedFunctionInfo;
+ closure_feedback_cell_array: ClosureFeedbackCellArray;
@if(V8_EXTERNAL_CODE_SPACE) maybe_optimized_code: Weak<CodeDataContainer>;
@ifnot(V8_EXTERNAL_CODE_SPACE) maybe_optimized_code: Weak<Code>;
- closure_feedback_cell_array: ClosureFeedbackCellArray;
@cppRelaxedLoad raw_feedback_slots[length]: MaybeObject;
}
diff --git a/deps/v8/src/objects/field-index.h b/deps/v8/src/objects/field-index.h
index 7ccf049269..aa1669b032 100644
--- a/deps/v8/src/objects/field-index.h
+++ b/deps/v8/src/objects/field-index.h
@@ -42,6 +42,8 @@ class FieldIndex final {
int offset() const { return OffsetBits::decode(bit_field_); }
+ uint64_t bit_field() const { return bit_field_; }
+
// Zero-indexed from beginning of the object.
int index() const {
DCHECK(IsAligned(offset(), kTaggedSize));
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index a6a404c63a..b9e348f7fd 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -84,7 +84,6 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(isolate, index).IsTheHole(isolate);
}
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
@@ -92,7 +91,6 @@ void FixedArray::set(int index, Smi value) {
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
-#endif
void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
@@ -491,6 +489,10 @@ void WeakArrayList::Set(int index, MaybeObject value, WriteBarrierMode mode) {
set_objects(index, value, mode);
}
+void WeakArrayList::Set(int index, Smi value) {
+ Set(index, MaybeObject::FromSmi(value), SKIP_WRITE_BARRIER);
+}
+
MaybeObjectSlot WeakArrayList::data_start() {
return RawMaybeWeakField(kObjectsOffset);
}
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 969f8cb23e..c8f2e19044 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -134,20 +134,7 @@ class FixedArray
inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
inline void set(int index, Smi value);
-#else
- inline void set(int index, Smi value) {
-#if !defined(_WIN32)
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
-#endif
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- DCHECK(Object(value).IsSmi());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
- }
-#endif
-
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
@@ -380,6 +367,7 @@ class WeakArrayList
// instead.
inline void Set(int index, MaybeObject value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void Set(int index, Smi value);
static constexpr int SizeForCapacity(int capacity) {
return SizeFor(capacity);
diff --git a/deps/v8/src/objects/foreign.tq b/deps/v8/src/objects/foreign.tq
index be7113769f..0aba494737 100644
--- a/deps/v8/src/objects/foreign.tq
+++ b/deps/v8/src/objects/foreign.tq
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@apiExposedInstanceTypeValue(0x46)
+@apiExposedInstanceTypeValue(0xcc)
extern class Foreign extends HeapObject {
foreign_address: ExternalPointer;
}
diff --git a/deps/v8/src/objects/free-space.h b/deps/v8/src/objects/free-space.h
index d2f569bb33..275fb781de 100644
--- a/deps/v8/src/objects/free-space.h
+++ b/deps/v8/src/objects/free-space.h
@@ -37,6 +37,8 @@ class FreeSpace : public TorqueGeneratedFreeSpace<FreeSpace, HeapObject> {
// Dispatched behavior.
DECL_PRINTER(FreeSpace)
+ class BodyDescriptor;
+
private:
inline bool IsValid();
diff --git a/deps/v8/src/objects/function-kind.h b/deps/v8/src/objects/function-kind.h
index 9115f8b6ce..da8dfe6f95 100644
--- a/deps/v8/src/objects/function-kind.h
+++ b/deps/v8/src/objects/function-kind.h
@@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
-enum FunctionKind : uint8_t {
+enum class FunctionKind : uint8_t {
// BEGIN constructable functions
kNormalFunction,
kModule,
@@ -66,7 +66,8 @@ enum FunctionKind : uint8_t {
};
constexpr int kFunctionKindBitSize = 5;
-STATIC_ASSERT(kLastFunctionKind < (1 << kFunctionKindBitSize));
+STATIC_ASSERT(static_cast<int>(FunctionKind::kLastFunctionKind) <
+ (1 << kFunctionKindBitSize));
inline bool IsArrowFunction(FunctionKind kind) {
return base::IsInRange(kind, FunctionKind::kArrowFunction,
diff --git a/deps/v8/src/objects/heap-number.h b/deps/v8/src/objects/heap-number.h
index d9d8ddb742..cb66f8bad8 100644
--- a/deps/v8/src/objects/heap-number.h
+++ b/deps/v8/src/objects/heap-number.h
@@ -59,6 +59,8 @@ class HeapNumber
DECL_PRINTER(HeapNumber)
V8_EXPORT_PRIVATE void HeapNumberShortPrint(std::ostream& os);
+ class BodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(HeapNumber)
};
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index b7f6c411e0..b9eca4cc5b 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -121,10 +121,10 @@ class HeapObject : public Object {
// Iterates over pointers contained in the object (including the Map).
// If it's not performance critical iteration use the non-templatized
// version.
- void Iterate(ObjectVisitor* v);
+ void Iterate(PtrComprCageBase cage_base, ObjectVisitor* v);
template <typename ObjectVisitor>
- inline void IterateFast(ObjectVisitor* v);
+ inline void IterateFast(PtrComprCageBase cage_base, ObjectVisitor* v);
// Iterates over all pointers contained in the object except the
// first map pointer. The object type is given in the first
@@ -132,11 +132,11 @@ class HeapObject : public Object {
// object, and so is safe to call while the map pointer is modified.
// If it's not performance critical iteration use the non-templatized
// version.
- void IterateBody(ObjectVisitor* v);
+ void IterateBody(PtrComprCageBase cage_base, ObjectVisitor* v);
void IterateBody(Map map, int object_size, ObjectVisitor* v);
template <typename ObjectVisitor>
- inline void IterateBodyFast(ObjectVisitor* v);
+ inline void IterateBodyFast(PtrComprCageBase cage_base, ObjectVisitor* v);
template <typename ObjectVisitor>
inline void IterateBodyFast(Map map, int object_size, ObjectVisitor* v);
@@ -147,7 +147,7 @@ class HeapObject : public Object {
V8_EXPORT_PRIVATE bool IsValidSlot(Map map, int offset);
// Returns the heap object's size in bytes
- inline int Size() const;
+ DECL_GETTER(Size, int)
// Given a heap object's map pointer, returns the heap size in bytes
// Useful when the map pointer field is used for other purposes.
@@ -196,12 +196,12 @@ class HeapObject : public Object {
// content depends on FLAG_hash_seed. When the object is deserialized into
// a heap with a different hash seed, these objects need to adapt.
bool NeedsRehashing(InstanceType instance_type) const;
- bool NeedsRehashing() const;
+ bool NeedsRehashing(PtrComprCageBase cage_base) const;
// Rehashing support is not implemented for all objects that need rehashing.
// With objects that need rehashing but cannot be rehashed, rehashing has to
// be disabled.
- bool CanBeRehashed() const;
+ bool CanBeRehashed(PtrComprCageBase cage_base) const;
// Rehash the object based on the layout inferred from its map.
template <typename IsolateT>
diff --git a/deps/v8/src/objects/instance-type-inl.h b/deps/v8/src/objects/instance-type-inl.h
index a56bd7ad12..146a8f5006 100644
--- a/deps/v8/src/objects/instance-type-inl.h
+++ b/deps/v8/src/objects/instance-type-inl.h
@@ -102,41 +102,21 @@ HEAP_OBJECT_TYPE_LIST(DECL_TYPE)
} // namespace InstanceTypeTraits
-#define TYPE_CHECKER(type, ...) \
- bool HeapObject::Is##type() const { \
- PtrComprCageBase cage_base = GetPtrComprCageBase(*this); \
- return HeapObject::Is##type(cage_base); \
- } \
- /* The cage_base passed here is supposed to be the base of the pointer */ \
- /* compression cage where the Map space is allocated. */ \
- /* However when external code space is enabled it's not always the case */ \
- /* yet and the predicate has to work if the cage_base corresponds to the */ \
- /* cage containing external code space. */ \
- /* TODO(v8:11880): Ensure that the cage_base value always corresponds to */ \
- /* the main pointer compression cage. */ \
- bool HeapObject::Is##type(PtrComprCageBase cage_base) const { \
- if (V8_EXTERNAL_CODE_SPACE_BOOL) { \
- if (IsCodeObject(*this)) { \
- /* Code space contains only Code objects and free space fillers. */ \
- if (std::is_same<InstanceTypeTraits::type, \
- InstanceTypeTraits::Code>::value || \
- std::is_same<InstanceTypeTraits::type, \
- InstanceTypeTraits::FreeSpace>::value || \
- std::is_same<InstanceTypeTraits::type, \
- InstanceTypeTraits::FreeSpaceOrFiller>::value) { \
- /* Code space objects are never read-only, so it's safe to query */ \
- /* heap value in order to compute proper cage base. */ \
- Heap* heap = GetHeapFromWritableObject(*this); \
- Map map_object = map(Isolate::FromHeap(heap)); \
- return InstanceTypeChecker::Is##type(map_object.instance_type()); \
- } \
- /* For all the other queries we can return false. */ \
- return false; \
- } \
- /* Fallback to checking map instance type. */ \
- } \
- Map map_object = map(cage_base); \
- return InstanceTypeChecker::Is##type(map_object.instance_type()); \
+#define TYPE_CHECKER(type, ...) \
+ bool HeapObject::Is##type() const { \
+ /* In general, parameterless IsBlah() must not be used for objects */ \
+ /* that might be located in external code space. Note that this version */ \
+ /* is still called from Blah::cast() methods but it's fine because in */ \
+ /* production builds these checks are not enabled anyway and debug */ \
+ /* builds are allowed to be a bit slower. */ \
+ PtrComprCageBase cage_base = GetPtrComprCageBaseSlow(*this); \
+ return HeapObject::Is##type(cage_base); \
+ } \
+ /* The cage_base passed here is must to be the base of the pointer */ \
+ /* compression cage where the Map space is allocated. */ \
+ bool HeapObject::Is##type(PtrComprCageBase cage_base) const { \
+ Map map_object = map(cage_base); \
+ return InstanceTypeChecker::Is##type(map_object.instance_type()); \
}
// TODO(v8:7786): For instance types that have a single map instance on the
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index 7cc27f23d0..7661561cbb 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -16,9 +16,9 @@ namespace v8 {
namespace internal {
// We use the full 16 bits of the instance_type field to encode heap object
-// instance types. All the high-order bits (bits 6-15) are cleared if the object
+// instance types. All the high-order bits (bits 7-15) are cleared if the object
// is a string, and contain set bits if it is not a string.
-const uint32_t kIsNotStringMask = ~((1 << 6) - 1);
+const uint32_t kIsNotStringMask = ~((1 << 7) - 1);
const uint32_t kStringTag = 0x0;
// For strings, bits 0-2 indicate the representation of the string. In
@@ -46,6 +46,16 @@ const uint32_t kStringEncodingMask = 1 << 3;
const uint32_t kTwoByteStringTag = 0;
const uint32_t kOneByteStringTag = 1 << 3;
+// Combined tags for convenience (add more if needed).
+constexpr uint32_t kStringRepresentationAndEncodingMask =
+ kStringRepresentationMask | kStringEncodingMask;
+constexpr uint32_t kSeqOneByteStringTag = kSeqStringTag | kOneByteStringTag;
+constexpr uint32_t kSeqTwoByteStringTag = kSeqStringTag | kTwoByteStringTag;
+constexpr uint32_t kExternalOneByteStringTag =
+ kExternalStringTag | kOneByteStringTag;
+constexpr uint32_t kExternalTwoByteStringTag =
+ kExternalStringTag | kTwoByteStringTag;
+
// For strings, bit 4 indicates whether the data pointer of an external string
// is cached. Note that the string representation is expected to be
// kExternalStringTag.
@@ -58,6 +68,26 @@ const uint32_t kIsNotInternalizedMask = 1 << 5;
const uint32_t kNotInternalizedTag = 1 << 5;
const uint32_t kInternalizedTag = 0;
+// For strings, bit 6 indicates that the string is accessible by more than one
+// thread. Note that a string that is allocated in the shared heap is not
+// accessible by more than one thread until it is explicitly shared (e.g. by
+// postMessage).
+//
+// Runtime code that shares strings with other threads directly need to manually
+// set this bit.
+//
+// TODO(v8:12007): External strings cannot be shared yet.
+//
+// TODO(v8:12007): This bit is currently ignored on internalized strings, which
+// are either always shared or always not shared depending on
+// FLAG_shared_string_table. This will be hardcoded once
+// FLAG_shared_string_table is removed.
+const uint32_t kSharedStringMask = 1 << 6;
+const uint32_t kSharedStringTag = 1 << 6;
+
+constexpr uint32_t kStringRepresentationEncodingAndSharedMask =
+ kStringRepresentationAndEncodingMask | kSharedStringMask;
+
// A ConsString with an empty string as the right side is a candidate
// for being shortcut by the garbage collector. We don't allocate any
// non-flat internalized strings, so we do not shortcut them thereby
@@ -109,6 +139,11 @@ enum InstanceType : uint16_t {
THIN_STRING_TYPE = kTwoByteStringTag | kThinStringTag | kNotInternalizedTag,
THIN_ONE_BYTE_STRING_TYPE =
kOneByteStringTag | kThinStringTag | kNotInternalizedTag,
+ SHARED_STRING_TYPE = STRING_TYPE | kSharedStringTag,
+ SHARED_ONE_BYTE_STRING_TYPE = ONE_BYTE_STRING_TYPE | kSharedStringTag,
+ SHARED_THIN_STRING_TYPE = THIN_STRING_TYPE | kSharedStringTag,
+ SHARED_THIN_ONE_BYTE_STRING_TYPE =
+ THIN_ONE_BYTE_STRING_TYPE | kSharedStringTag,
// Most instance types are defined in Torque, with the exception of the string
// types above. They are ordered by inheritance hierarchy so that we can easily
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index f28f406be8..cd735d34e9 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -16,6 +16,7 @@
#include "src/api/api-inl.h"
#include "src/base/strings.h"
#include "src/execution/isolate.h"
+#include "src/execution/local-isolate.h"
#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
#include "src/objects/js-collator-inl.h"
@@ -612,27 +613,6 @@ Maybe<std::string> Intl::ToLanguageTag(const icu::Locale& locale) {
return Just(res);
}
-namespace {
-std::string DefaultLocale(Isolate* isolate) {
- if (isolate->default_locale().empty()) {
- icu::Locale default_locale;
- // Translate ICU's fallback locale to a well-known locale.
- if (strcmp(default_locale.getName(), "en_US_POSIX") == 0 ||
- strcmp(default_locale.getName(), "c") == 0) {
- isolate->set_default_locale("en-US");
- } else {
- // Set the locale
- isolate->set_default_locale(
- default_locale.isBogus()
- ? "und"
- : Intl::ToLanguageTag(default_locale).FromJust());
- }
- DCHECK(!isolate->default_locale().empty());
- }
- return isolate->default_locale();
-}
-} // namespace
-
// See ecma402/#legacy-constructor.
MaybeHandle<Object> Intl::LegacyUnwrapReceiver(Isolate* isolate,
Handle<JSReceiver> receiver,
@@ -894,7 +874,7 @@ MaybeHandle<String> Intl::StringLocaleConvertCase(Isolate* isolate,
return MaybeHandle<String>();
}
std::string requested_locale = requested_locales.size() == 0
- ? DefaultLocale(isolate)
+ ? isolate->DefaultLocale()
: requested_locales[0];
size_t dash = requested_locale.find('-');
if (dash != std::string::npos) {
@@ -932,8 +912,9 @@ MaybeHandle<String> Intl::StringLocaleConvertCase(Isolate* isolate,
}
// static
+template <class IsolateT>
Intl::CompareStringsOptions Intl::CompareStringsOptionsFor(
- Isolate* isolate, Handle<Object> locales, Handle<Object> options) {
+ IsolateT* isolate, Handle<Object> locales, Handle<Object> options) {
if (!options->IsUndefined(isolate)) {
return CompareStringsOptions::kNone;
}
@@ -947,34 +928,27 @@ Intl::CompareStringsOptions Intl::CompareStringsOptionsFor(
// The actual conditions are verified in debug builds in
// CollatorAllowsFastComparison.
static const char* const kFastLocales[] = {
- "en-US", "en", "fr", "es", "de", "pt", "it", "ca", "de-AT", "fi",
- "id", "id-ID", "ms", "nl", "pl", "ro", "sl", "sv", "sw", "vi",
+ "en-US", "en", "fr", "es", "de", "pt", "it", "ca",
+ "de-AT", "fi", "id", "id-ID", "ms", "nl", "pl", "ro",
+ "sl", "sv", "sw", "vi", "en-DE", "en-GB",
};
if (locales->IsUndefined(isolate)) {
- static bool default_is_fast = false;
-
- // The default locale is immutable after initialization.
- static base::OnceType once = V8_ONCE_INIT;
- base::CallOnce(&once, [&]() {
- const std::string& default_locale = DefaultLocale(isolate);
- for (const char* fast_locale : kFastLocales) {
- if (strcmp(fast_locale, default_locale.c_str()) == 0) {
- default_is_fast = true;
- return;
- }
+ const std::string& default_locale = isolate->DefaultLocale();
+ for (const char* fast_locale : kFastLocales) {
+ if (strcmp(fast_locale, default_locale.c_str()) == 0) {
+ return CompareStringsOptions::kTryFastPath;
}
- });
+ }
- return default_is_fast ? CompareStringsOptions::kTryFastPath
- : CompareStringsOptions::kNone;
+ return CompareStringsOptions::kNone;
}
if (!locales->IsString()) return CompareStringsOptions::kNone;
Handle<String> locales_string = Handle<String>::cast(locales);
for (const char* fast_locale : kFastLocales) {
- if (locales_string->IsOneByteEqualTo(base::CStrVector(fast_locale))) {
+ if (locales_string->IsEqualTo(base::CStrVector(fast_locale), isolate)) {
return CompareStringsOptions::kTryFastPath;
}
}
@@ -982,6 +956,12 @@ Intl::CompareStringsOptions Intl::CompareStringsOptionsFor(
return CompareStringsOptions::kNone;
}
+// Instantiations.
+template Intl::CompareStringsOptions Intl::CompareStringsOptionsFor(
+ Isolate*, Handle<Object>, Handle<Object>);
+template Intl::CompareStringsOptions Intl::CompareStringsOptionsFor(
+ LocalIsolate*, Handle<Object>, Handle<Object>);
+
base::Optional<int> Intl::StringLocaleCompare(
Isolate* isolate, Handle<String> string1, Handle<String> string2,
Handle<Object> locales, Handle<Object> options, const char* method_name) {
@@ -1049,7 +1029,7 @@ namespace {
// return [weight_map[x] for x in ws]
//
// def print_weight_list(array_name, ws):
-// print("constexpr uint8_t %s[] = {" % array_name, end = "")
+// print("constexpr uint8_t %s[256] = {" % array_name, end = "")
// i = 0
// for w in ws:
// if (i % 16) == 0:
@@ -1069,7 +1049,7 @@ namespace {
// print_weight_list("kCollationWeightsL3", to_ordinal(l3s))
// clang-format off
-constexpr uint8_t kCollationWeightsL1[] = {
+constexpr uint8_t kCollationWeightsL1[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6, 12, 16, 28, 38, 29, 27, 15, 17, 18, 24, 32, 9, 8, 14, 25,
@@ -1079,7 +1059,7 @@ constexpr uint8_t kCollationWeightsL1[] = {
30, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 21, 36, 22, 37, 0,
};
-constexpr uint8_t kCollationWeightsL3[] = {
+constexpr uint8_t kCollationWeightsL3[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -1089,9 +1069,7 @@ constexpr uint8_t kCollationWeightsL3[] = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
};
-constexpr int kCollationWeightsLength =
- arraysize(kCollationWeightsL1);
-STATIC_ASSERT(kCollationWeightsLength == 128);
+constexpr int kCollationWeightsLength = arraysize(kCollationWeightsL1);
STATIC_ASSERT(kCollationWeightsLength == arraysize(kCollationWeightsL3));
// clang-format on
@@ -1198,6 +1176,12 @@ bool CharIsAsciiOrOutOfBounds(const String::FlatContent& string,
return index >= string_length || isascii(string.Get(index));
}
+bool CharCanFastCompareOrOutOfBounds(const String::FlatContent& string,
+ int string_length, int index) {
+ DCHECK_EQ(string.length(), string_length);
+ return index >= string_length || CanFastCompare(string.Get(index));
+}
+
#ifdef DEBUG
bool USetContainsAllAsciiItem(USet* set) {
static constexpr int kBufferSize = 64;
@@ -1394,7 +1378,15 @@ base::Optional<UCollationResult> TryFastCompareStrings(
// Strings are L1-equal up to their common length, length differences win.
UCollationResult length_result = ToUCollationResult(length1 - length2);
- if (length_result != UCollationResult::UCOL_EQUAL) return length_result;
+ if (length_result != UCollationResult::UCOL_EQUAL) {
+ // Strings of different lengths may still compare as equal if the longer
+ // string has a fully ignored suffix, e.g. "a" vs. "a\u{1}".
+ if (!CharCanFastCompareOrOutOfBounds(flat1, length1, common_length) ||
+ !CharCanFastCompareOrOutOfBounds(flat2, length2, common_length)) {
+ return d.FastCompareFailed(processed_until_out);
+ }
+ return length_result;
+ }
// L1-equal and same length, the L3 result wins.
return d.l3_result;
@@ -1402,6 +1394,19 @@ base::Optional<UCollationResult> TryFastCompareStrings(
} // namespace
+// static
+const uint8_t* Intl::AsciiCollationWeightsL1() {
+ return &kCollationWeightsL1[0];
+}
+
+// static
+const uint8_t* Intl::AsciiCollationWeightsL3() {
+ return &kCollationWeightsL3[0];
+}
+
+// static
+const int Intl::kAsciiCollationWeightsLength = kCollationWeightsLength;
+
// ecma402/#sec-collator-comparestrings
int Intl::CompareStrings(Isolate* isolate, const icu::Collator& icu_collator,
Handle<String> string1, Handle<String> string2,
@@ -1582,9 +1587,6 @@ Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
// 15. Else If mnfd is not undefined or mxfd is not undefined, then
if (!mnfd_obj->IsUndefined(isolate) || !mxfd_obj->IsUndefined(isolate)) {
- Handle<String> mxfd_str = factory->maximumFractionDigits_string();
- Handle<String> mnfd_str = factory->minimumFractionDigits_string();
-
int specified_mnfd;
int specified_mxfd;
@@ -1802,7 +1804,7 @@ icu::LocaleMatcher BuildLocaleMatcher(
Isolate* isolate, const std::set<std::string>& available_locales,
UErrorCode* status) {
icu::Locale default_locale =
- icu::Locale::forLanguageTag(DefaultLocale(isolate), *status);
+ icu::Locale::forLanguageTag(isolate->DefaultLocale(), *status);
icu::LocaleMatcher::Builder builder;
if (U_FAILURE(*status)) {
return builder.build(*status);
@@ -2031,7 +2033,6 @@ class ResourceAvailableCurrencies {
// Work around the issue that we do support the following currency codes
// in DisplayNames but the ICU API is not reporting it.
AddIfAvailable("SVC");
- AddIfAvailable("VES");
AddIfAvailable("XDR");
AddIfAvailable("XSU");
AddIfAvailable("ZWL");
@@ -2373,7 +2374,7 @@ std::string LookupMatcher(Isolate* isolate,
// 3. Let defLocale be DefaultLocale();
// 4. Set result.[[locale]] to defLocale.
// 5. Return result.
- return DefaultLocale(isolate);
+ return isolate->DefaultLocale();
}
} // namespace
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index f7dab09e57..7ac37894ad 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -100,8 +100,9 @@ class Intl {
kNone,
kTryFastPath,
};
+ template <class IsolateT>
V8_EXPORT_PRIVATE static CompareStringsOptions CompareStringsOptionsFor(
- Isolate* isolate, Handle<Object> locales, Handle<Object> options);
+ IsolateT* isolate, Handle<Object> locales, Handle<Object> options);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static int CompareStrings(
Isolate* isolate, const icu::Collator& collator, Handle<String> s1,
Handle<String> s2,
@@ -273,6 +274,10 @@ class Intl {
static const uint8_t* ToLatin1LowerTable();
+ static const uint8_t* AsciiCollationWeightsL1();
+ static const uint8_t* AsciiCollationWeightsL3();
+ static const int kAsciiCollationWeightsLength;
+
static String ConvertOneByteToLower(String src, String dst);
static const std::set<std::string>& GetAvailableLocales();
diff --git a/deps/v8/src/objects/intl-objects.tq b/deps/v8/src/objects/intl-objects.tq
index 61f85facfd..672f419657 100644
--- a/deps/v8/src/objects/intl-objects.tq
+++ b/deps/v8/src/objects/intl-objects.tq
@@ -3,3 +3,128 @@
// found in the LICENSE file.
#include 'src/objects/js-objects.h'
+#include 'src/objects/intl-objects.h'
+
+extern macro IntlAsciiCollationWeightsL1(): RawPtr<uint8>;
+extern macro IntlAsciiCollationWeightsL3(): RawPtr<uint8>;
+const kIntlAsciiCollationWeightsLength:
+ constexpr int31 generates 'Intl::kAsciiCollationWeightsLength';
+
+macro IntlAsciiCollationWeightL1(c: char8): uint8 labels _Bailout {
+ static_assert(kIntlAsciiCollationWeightsLength == 256);
+ return IntlAsciiCollationWeightsL1()[Convert<intptr>(c)];
+}
+macro IntlAsciiCollationWeightL1(c: char16): uint8 labels Bailout {
+ if (Convert<uint32>(c) >= kIntlAsciiCollationWeightsLength) goto Bailout;
+ return IntlAsciiCollationWeightsL1()[Convert<intptr>(c)];
+}
+
+macro IntlAsciiCollationWeightL3(c: char8): uint8 labels _Bailout {
+ static_assert(kIntlAsciiCollationWeightsLength == 256);
+ return IntlAsciiCollationWeightsL3()[Convert<intptr>(c)];
+}
+macro IntlAsciiCollationWeightL3(c: char16): uint8 labels Bailout {
+ if (Convert<uint32>(c) >= kIntlAsciiCollationWeightsLength) goto Bailout;
+ return IntlAsciiCollationWeightsL3()[Convert<intptr>(c)];
+}
+
+macro CheckEmptyOr1Byte(
+ _it: torque_internal::SliceIterator<char8, const &char8>):
+ void labels _Bailout {
+ // char8 is always within 0xFF.
+}
+macro CheckEmptyOr1Byte(
+ it: torque_internal::SliceIterator<char16, const &char16>):
+ void labels Bailout {
+ let it = it;
+ if ((it.Next() otherwise return ) > 0xFF) goto Bailout;
+}
+
+// This fast path works for ASCII-only strings and is based on the assumption
+// that most strings are either bytewise equal or differ on L1 (i.e., not just
+// in capitalization). So we first compare the strings on L1 and only afterwards
+// consider L3. This makes use of the 256-entry L1 and L3 tables defined in
+// src/objects/intl-objects.cc.
+macro LocaleCompareFastPath<T1: type, T2: type>(
+ left: ConstSlice<T1>, right: ConstSlice<T2>): Number labels Bailout {
+ if (EqualContent(left, right)) return 0;
+ let leftIt = left.Iterator();
+ let rightIt = right.Iterator();
+ while (true) {
+ try {
+ const lChar = leftIt.Next() otherwise goto LeftExhausted;
+ const leftWeight = IntlAsciiCollationWeightL1(lChar) otherwise Bailout;
+ if (leftWeight == 0) goto Bailout;
+ // If rightIt is exhausted, we already checked that the next char of the
+ // left string has non-zero weight, so it cannot be ignorable or a
+ // combining character.
+ // Return 1 because right string is shorter and L1 is equal.
+ const rChar = rightIt.Next() otherwise return 1;
+ const rightWeight = IntlAsciiCollationWeightL1(rChar) otherwise Bailout;
+ if (rightWeight == 0) goto Bailout;
+ if (leftWeight == rightWeight) continue;
+ // The result is only valid if the last processed character is not
+ // followed by a unicode combining character (we are overly strict and
+ // restrict to code points up to 0xFF).
+ CheckEmptyOr1Byte(leftIt) otherwise Bailout;
+ CheckEmptyOr1Byte(rightIt) otherwise Bailout;
+ if (leftWeight < rightWeight) return -1;
+ return 1;
+ } label LeftExhausted {
+ const rChar = rightIt.Next() otherwise break;
+ const rightWeight = IntlAsciiCollationWeightL1(rChar) otherwise Bailout;
+ // If the following character might be ignorable or a combining character,
+ // we bail out because the strings might still be considered equal.
+ if (rightWeight == 0) goto Bailout;
+ // Return -1 because left string is shorter and L1 is equal.
+ return -1;
+ }
+ }
+ leftIt = left.Iterator();
+ rightIt = right.Iterator();
+ while (true) {
+ const lChar = leftIt.Next() otherwise unreachable;
+ const leftWeight = IntlAsciiCollationWeightL3(lChar) otherwise unreachable;
+ dcheck(leftWeight != 0);
+ const rChar = rightIt.Next() otherwise unreachable;
+ const rightWeight = IntlAsciiCollationWeightL3(rChar) otherwise unreachable;
+ dcheck(rightWeight != 0);
+ dcheck(
+ IntlAsciiCollationWeightL1(lChar) otherwise unreachable ==
+ IntlAsciiCollationWeightL1(rChar) otherwise unreachable);
+ if (leftWeight == rightWeight) continue;
+ if (leftWeight < rightWeight) return -1;
+ return 1;
+ }
+ VerifiedUnreachable();
+}
+
+transitioning builtin StringFastLocaleCompare(implicit context: Context)(
+ localeCompareFn: JSFunction, left: JSAny, right: JSAny,
+ locales: JSAny): JSAny {
+ if (TaggedEqual(left, right)) return SmiConstant(0);
+ try {
+ const left = Cast<String>(left) otherwise Bailout;
+ StringToSlice(left) otherwise LeftOneByte, LeftTwoByte;
+ } label LeftOneByte(leftSlice: ConstSlice<char8>) {
+ try {
+ const right = Cast<String>(right) otherwise Bailout;
+ StringToSlice(right) otherwise RightOneByte, RightTwoByte;
+ } label RightOneByte(rightSlice: ConstSlice<char8>) {
+ return LocaleCompareFastPath(leftSlice, rightSlice) otherwise Bailout;
+ } label RightTwoByte(rightSlice: ConstSlice<char16>) {
+ return LocaleCompareFastPath(leftSlice, rightSlice) otherwise Bailout;
+ }
+ } label LeftTwoByte(leftSlice: ConstSlice<char16>) {
+ try {
+ const right = Cast<String>(right) otherwise Bailout;
+ StringToSlice(right) otherwise RightOneByte, RightTwoByte;
+ } label RightOneByte(rightSlice: ConstSlice<char8>) {
+ return LocaleCompareFastPath(leftSlice, rightSlice) otherwise Bailout;
+ } label RightTwoByte(rightSlice: ConstSlice<char16>) {
+ return LocaleCompareFastPath(leftSlice, rightSlice) otherwise Bailout;
+ }
+ } label Bailout deferred {
+ return Call(context, localeCompareFn, left, right, locales);
+ }
+}
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index eea3a94a80..0fd66630ca 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -36,12 +36,30 @@ void JSArrayBuffer::set_byte_length(size_t value) {
}
DEF_GETTER(JSArrayBuffer, backing_store, void*) {
- return reinterpret_cast<void*>(ReadField<Address>(kBackingStoreOffset));
+ Address value = ReadCagedPointerField(kBackingStoreOffset, cage_base);
+ return reinterpret_cast<void*>(value);
}
-void JSArrayBuffer::set_backing_store(void* value) {
+void JSArrayBuffer::set_backing_store(Isolate* isolate, void* value) {
DCHECK(IsValidBackingStorePointer(value));
- WriteField<Address>(kBackingStoreOffset, reinterpret_cast<Address>(value));
+ Address addr = reinterpret_cast<Address>(value);
+ WriteCagedPointerField(kBackingStoreOffset, isolate, addr);
+}
+
+std::shared_ptr<BackingStore> JSArrayBuffer::GetBackingStore() const {
+ if (!extension()) return nullptr;
+ return extension()->backing_store();
+}
+
+size_t JSArrayBuffer::GetByteLength() const {
+ if V8_UNLIKELY (is_shared() && is_resizable()) {
+ // Invariant: byte_length for GSAB is 0 (it needs to be read from the
+ // BackingStore).
+ DCHECK_EQ(0, byte_length());
+
+ return GetBackingStore()->byte_length(std::memory_order_seq_cst);
+ }
+ return byte_length();
}
uint32_t JSArrayBuffer::GetBackingStoreRefForDeserialization() const {
@@ -114,20 +132,6 @@ uint32_t* JSArrayBuffer::extension_hi() const {
}
#endif
-size_t JSArrayBuffer::allocation_length() const {
- if (backing_store() == nullptr) {
- return 0;
- }
- return byte_length();
-}
-
-void* JSArrayBuffer::allocation_base() const {
- if (backing_store() == nullptr) {
- return nullptr;
- }
- return backing_store();
-}
-
void JSArrayBuffer::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
@@ -158,6 +162,13 @@ BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared,
BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_resizable,
JSArrayBuffer::IsResizableBit)
+bool JSArrayBuffer::IsEmpty() const {
+ auto backing_store = GetBackingStore();
+ bool is_empty = !backing_store || backing_store->IsEmpty();
+ DCHECK_IMPLIES(is_empty, byte_length() == 0);
+ return is_empty;
+}
+
size_t JSArrayBufferView::byte_offset() const {
return ReadField<size_t>(kByteOffsetOffset);
}
@@ -178,12 +189,12 @@ bool JSArrayBufferView::WasDetached() const {
return JSArrayBuffer::cast(buffer()).was_detached();
}
-BIT_FIELD_ACCESSORS(JSTypedArray, bit_field, is_length_tracking,
- JSTypedArray::IsLengthTrackingBit)
-BIT_FIELD_ACCESSORS(JSTypedArray, bit_field, is_backed_by_rab,
- JSTypedArray::IsBackedByRabBit)
+BIT_FIELD_ACCESSORS(JSArrayBufferView, bit_field, is_length_tracking,
+ JSArrayBufferView::IsLengthTrackingBit)
+BIT_FIELD_ACCESSORS(JSArrayBufferView, bit_field, is_backed_by_rab,
+ JSArrayBufferView::IsBackedByRabBit)
-bool JSTypedArray::IsVariableLength() const {
+bool JSArrayBufferView::IsVariableLength() const {
return is_length_tracking() || is_backed_by_rab();
}
@@ -240,16 +251,12 @@ void JSTypedArray::set_length(size_t value) {
}
DEF_GETTER(JSTypedArray, external_pointer, Address) {
- return ReadField<Address>(kExternalPointerOffset);
-}
-
-DEF_GETTER(JSTypedArray, external_pointer_raw, Address) {
- return ReadField<Address>(kExternalPointerOffset);
+ return ReadCagedPointerField(kExternalPointerOffset, cage_base);
}
void JSTypedArray::set_external_pointer(Isolate* isolate, Address value) {
DCHECK(IsValidBackingStorePointer(reinterpret_cast<void*>(value)));
- WriteField<Address>(kExternalPointerOffset, value);
+ WriteCagedPointerField(kExternalPointerOffset, isolate, value);
}
Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
@@ -274,19 +281,17 @@ void JSTypedArray::SetExternalBackingStoreRefForSerialization(uint32_t ref) {
void JSTypedArray::RemoveExternalPointerCompensationForSerialization(
Isolate* isolate) {
DCHECK(is_on_heap());
- // TODO(v8:10391): once we have an external table, avoid the need for
- // compensation by replacing external_pointer and base_pointer fields
- // with one data_pointer field which can point to either external data
- // backing store or into on-heap backing store.
Address offset =
external_pointer() - ExternalPointerCompensationForOnHeapArray(isolate);
-#ifdef V8_HEAP_SANDBOX
- // Write decompensated offset directly to the external pointer field, thus
- // allowing the offset to be propagated through serialization-deserialization.
- WriteField<ExternalPointer_t>(kExternalPointerOffset, offset);
-#else
- set_external_pointer(isolate, offset);
-#endif
+ WriteField<Address>(kExternalPointerOffset, offset);
+}
+
+void JSTypedArray::AddExternalPointerCompensationForDeserialization(
+ Isolate* isolate) {
+ DCHECK(is_on_heap());
+ Address pointer = ReadField<Address>(kExternalPointerOffset) +
+ ExternalPointerCompensationForOnHeapArray(isolate);
+ set_external_pointer(isolate, pointer);
}
void* JSTypedArray::DataPtr() {
@@ -313,14 +318,6 @@ void JSTypedArray::SetOffHeapDataPtr(Isolate* isolate, void* base,
DCHECK_EQ(address, reinterpret_cast<Address>(DataPtr()));
}
-void JSTypedArray::SetOnHeapDataPtr(Isolate* isolate, HeapObject base,
- Address offset) {
- set_base_pointer(base);
- set_external_pointer(
- isolate, offset + ExternalPointerCompensationForOnHeapArray(isolate));
- DCHECK_EQ(base.ptr() + offset, reinterpret_cast<Address>(DataPtr()));
-}
-
bool JSTypedArray::is_on_heap() const {
// Keep synced with `is_on_heap(AcquireLoadTag)`.
DisallowGarbageCollection no_gc;
@@ -369,12 +366,14 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
}
DEF_GETTER(JSDataView, data_pointer, void*) {
- return reinterpret_cast<void*>(ReadField<Address>(kDataPointerOffset));
+ Address value = ReadCagedPointerField(kDataPointerOffset, cage_base);
+ return reinterpret_cast<void*>(value);
}
-void JSDataView::set_data_pointer(Isolate* isolate, void* value) {
- DCHECK(IsValidBackingStorePointer(value));
- WriteField<Address>(kDataPointerOffset, reinterpret_cast<Address>(value));
+void JSDataView::set_data_pointer(Isolate* isolate, void* ptr) {
+ DCHECK(IsValidBackingStorePointer(ptr));
+ Address value = reinterpret_cast<Address>(ptr);
+ WriteCagedPointerField(kDataPointerOffset, isolate, value);
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index fd9f3133a5..dac3c8b563 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -56,7 +56,7 @@ void JSArrayBuffer::Setup(SharedFlag shared, ResizableFlag resizable,
}
set_extension(nullptr);
if (!backing_store) {
- set_backing_store(nullptr);
+ set_backing_store(GetIsolate(), EmptyBackingStoreBuffer());
set_byte_length(0);
set_max_byte_length(0);
} else {
@@ -76,13 +76,21 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
!backing_store->is_wasm_memory() && !backing_store->is_resizable(),
backing_store->byte_length() == backing_store->max_byte_length());
DCHECK(!was_detached());
- set_backing_store(backing_store->buffer_start());
+ DCHECK(IsValidBackingStorePointer(backing_store->buffer_start()));
+ Isolate* isolate = GetIsolate();
+
+ if (backing_store->IsEmpty()) {
+ set_backing_store(isolate, EmptyBackingStoreBuffer());
+ } else {
+ DCHECK_NE(nullptr, backing_store->buffer_start());
+ set_backing_store(isolate, backing_store->buffer_start());
+ }
+
if (is_shared() && is_resizable()) {
// GSABs need to read their byte_length from the BackingStore. Maintain the
// invariant that their byte_length field is always 0.
set_byte_length(0);
} else {
- CHECK_LE(backing_store->byte_length(), kMaxByteLength);
set_byte_length(backing_store->byte_length());
}
set_max_byte_length(backing_store->max_byte_length());
@@ -92,7 +100,7 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
size_t bytes = backing_store->PerIsolateAccountingLength();
extension->set_accounting_length(bytes);
extension->set_backing_store(std::move(backing_store));
- GetIsolate()->heap()->AppendArrayBufferExtension(*this, extension);
+ isolate->heap()->AppendArrayBufferExtension(*this, extension);
}
void JSArrayBuffer::Detach(bool force_for_wasm_memory) {
@@ -121,25 +129,22 @@ void JSArrayBuffer::Detach(bool force_for_wasm_memory) {
DCHECK(!is_shared());
DCHECK(!is_asmjs_memory());
- set_backing_store(nullptr);
+ set_backing_store(isolate, EmptyBackingStoreBuffer());
set_byte_length(0);
set_was_detached(true);
}
-std::shared_ptr<BackingStore> JSArrayBuffer::GetBackingStore() const {
- if (!extension()) return nullptr;
- return extension()->backing_store();
-}
-
-size_t JSArrayBuffer::GetByteLength() const {
- if V8_UNLIKELY (is_shared() && is_resizable()) {
- // Invariant: byte_length for GSAB is 0 (it needs to be read from the
- // BackingStore).
- DCHECK_EQ(0, byte_length());
-
- return GetBackingStore()->byte_length(std::memory_order_seq_cst);
- }
- return byte_length();
+size_t JSArrayBuffer::GsabByteLength(Isolate* isolate,
+ Address raw_array_buffer) {
+ // TODO(v8:11111): Cache the last seen length in JSArrayBuffer and use it
+ // in bounds checks to minimize the need for calling this function.
+ DCHECK(FLAG_harmony_rab_gsab);
+ DisallowGarbageCollection no_gc;
+ DisallowJavascriptExecution no_js(isolate);
+ JSArrayBuffer buffer = JSArrayBuffer::cast(Object(raw_array_buffer));
+ CHECK(buffer.is_resizable());
+ CHECK(buffer.is_shared());
+ return buffer.GetBackingStore()->byte_length(std::memory_order_seq_cst);
}
ArrayBufferExtension* JSArrayBuffer::EnsureExtension() {
@@ -195,7 +200,7 @@ Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
DCHECK(!array_buffer->is_resizable());
// The existing array buffer should be empty.
- DCHECK_NULL(array_buffer->backing_store());
+ DCHECK(array_buffer->IsEmpty());
// Allocate a new backing store and attach it to the existing array buffer.
size_t byte_length = self->byte_length();
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index dadc85659b..8e5446c687 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -37,17 +37,13 @@ class JSArrayBuffer
DECL_PRIMITIVE_ACCESSORS(byte_length, size_t)
// [backing_store]: backing memory for this array
+ // It should not be assumed that this will be nullptr for empty ArrayBuffers.
DECL_GETTER(backing_store, void*)
- inline void set_backing_store(void* value);
+ inline void set_backing_store(Isolate* isolate, void* value);
// [extension]: extension object used for GC
DECL_PRIMITIVE_ACCESSORS(extension, ArrayBufferExtension*)
- // For non-wasm, allocation_length and allocation_base are byte_length and
- // backing_store, respectively.
- inline size_t allocation_length() const;
- inline void* allocation_base() const;
-
// [bit_field]: boolean flags
DECL_PRIMITIVE_ACCESSORS(bit_field, uint32_t)
@@ -80,6 +76,13 @@ class JSArrayBuffer
// GrowableSharedArrayBuffer.
DECL_BOOLEAN_ACCESSORS(is_resizable)
+ // An ArrayBuffer is empty if its BackingStore is empty or if there is none.
+ // An empty ArrayBuffer will have a byte_length of zero but not necessarily a
+ // nullptr backing_store. An ArrayBuffer with a byte_length of zero may not
+ // necessarily be empty though, as it may be a GrowableSharedArrayBuffer.
+ // An ArrayBuffer with a size greater than zero is never empty.
+ DECL_GETTER(IsEmpty, bool)
+
// Initializes the fields of the ArrayBuffer. The provided backing_store can
// be nullptr. If it is not nullptr, then the function registers it with
// src/heap/array-buffer-tracker.h.
@@ -104,9 +107,11 @@ class JSArrayBuffer
// Get a reference to backing store of this array buffer, if there is a
// backing store. Returns nullptr if there is no backing store (e.g. detached
// or a zero-length array buffer).
- std::shared_ptr<BackingStore> GetBackingStore() const;
+ inline std::shared_ptr<BackingStore> GetBackingStore() const;
- size_t GetByteLength() const;
+ inline size_t GetByteLength() const;
+
+ static size_t GsabByteLength(Isolate* isolate, Address raw_array_buffer);
// Allocates an ArrayBufferExtension for this array buffer, unless it is
// already associated with an extension.
@@ -236,8 +241,15 @@ class JSArrayBufferView
DECL_VERIFIER(JSArrayBufferView)
+ // Bit positions for [bit_field].
+ DEFINE_TORQUE_GENERATED_JS_ARRAY_BUFFER_VIEW_FLAGS()
+
inline bool WasDetached() const;
+ DECL_BOOLEAN_ACCESSORS(is_length_tracking)
+ DECL_BOOLEAN_ACCESSORS(is_backed_by_rab)
+ inline bool IsVariableLength() const;
+
static constexpr int kEndOfTaggedFieldsOffset = kByteOffsetOffset;
STATIC_ASSERT(IsAligned(kByteOffsetOffset, kUIntptrSize));
@@ -253,9 +265,6 @@ class JSTypedArray
// eventually.
static constexpr size_t kMaxLength = v8::TypedArray::kMaxLength;
- // Bit positions for [bit_field].
- DEFINE_TORQUE_GENERATED_JS_TYPED_ARRAY_FLAGS()
-
// [length]: length of typed array in elements.
DECL_PRIMITIVE_GETTER(length, size_t)
@@ -280,16 +289,11 @@ class JSTypedArray
inline void* DataPtr();
inline void SetOffHeapDataPtr(Isolate* isolate, void* base, Address offset);
- inline void SetOnHeapDataPtr(Isolate* isolate, HeapObject base,
- Address offset);
// Whether the buffer's backing store is on-heap or off-heap.
inline bool is_on_heap() const;
inline bool is_on_heap(AcquireLoadTag tag) const;
- DECL_BOOLEAN_ACCESSORS(is_length_tracking)
- DECL_BOOLEAN_ACCESSORS(is_backed_by_rab)
- inline bool IsVariableLength() const;
inline size_t GetLengthOrOutOfBounds(bool& out_of_bounds) const;
inline size_t GetLength() const;
@@ -323,6 +327,9 @@ class JSTypedArray
// Subtracts external pointer compensation from the external pointer value.
inline void RemoveExternalPointerCompensationForSerialization(
Isolate* isolate);
+ // Adds external pointer compensation to the external pointer value.
+ inline void AddExternalPointerCompensationForDeserialization(
+ Isolate* isolate);
static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
Handle<Object> receiver,
@@ -332,8 +339,9 @@ class JSTypedArray
DECL_PRINTER(JSTypedArray)
DECL_VERIFIER(JSTypedArray)
- STATIC_ASSERT(IsAligned(kLengthOffset, kUIntptrSize));
- STATIC_ASSERT(IsAligned(kExternalPointerOffset, kSystemPointerSize));
+ // TODO(v8:9287): Re-enable when GCMole stops mixing 32/64 bit configs.
+ // STATIC_ASSERT(IsAligned(kLengthOffset, kTaggedSize));
+ // STATIC_ASSERT(IsAligned(kExternalPointerOffset, kTaggedSize));
static const int kSizeWithEmbedderFields =
kHeaderSize +
@@ -358,7 +366,6 @@ class JSTypedArray
inline size_t LengthUnchecked() const;
DECL_GETTER(external_pointer, Address)
- DECL_GETTER(external_pointer_raw, ExternalPointer_t)
DECL_SETTER(base_pointer, Object)
DECL_RELEASE_SETTER(base_pointer, Object)
@@ -379,7 +386,8 @@ class JSDataView
DECL_PRINTER(JSDataView)
DECL_VERIFIER(JSDataView)
- STATIC_ASSERT(IsAligned(kDataPointerOffset, kUIntptrSize));
+ // TODO(v8:9287): Re-enable when GCMole stops mixing 32/64 bit configs.
+ // STATIC_ASSERT(IsAligned(kDataPointerOffset, kTaggedSize));
static const int kSizeWithEmbedderFields =
kHeaderSize +
diff --git a/deps/v8/src/objects/js-array-buffer.tq b/deps/v8/src/objects/js-array-buffer.tq
index 914720457d..7fb62b0bb2 100644
--- a/deps/v8/src/objects/js-array-buffer.tq
+++ b/deps/v8/src/objects/js-array-buffer.tq
@@ -40,43 +40,50 @@ macro IsResizableArrayBuffer(buffer: JSArrayBuffer): bool {
return buffer.bit_field.is_resizable;
}
-@abstract
-extern class JSArrayBufferView extends JSObject {
- buffer: JSArrayBuffer;
- byte_offset: uintptr;
- byte_length: uintptr;
-}
-
-// We have 4 different TypedArrays:
+// We have 4 different DataViews & TypedArrays:
// 1) Normal (backed by AB / SAB) or non-length tracking backed by GSAB (can't
-// go oob once constructed) 2) Non-length tracking backed by RAB (can go oob
-// once constructed) 3) Length-tracking backed by RAB (JSArrayBuffer stores the
-// length) 4) Length-tracking backed by GSAB (BackingStore stores the length)
-bitfield struct JSTypedArrayFlags extends uint32 {
+// go oob once constructed)
+// 2) Non-length tracking backed by RAB (can go oob once constructed)
+// 3) Length-tracking backed by RAB (JSArrayBuffer stores the length)
+// 4) Length-tracking backed by GSAB (BackingStore stores the length)
+bitfield struct JSArrayBufferViewFlags extends uint32 {
is_length_tracking: bool: 1 bit;
is_backed_by_rab: bool: 1 bit;
}
-extern class JSTypedArray extends JSArrayBufferView {
- length: uintptr;
- external_pointer: ExternalPointer;
- base_pointer: ByteArray|Smi;
- bit_field: JSTypedArrayFlags;
+@abstract
+extern class JSArrayBufferView extends JSObject {
+ buffer: JSArrayBuffer;
+ byte_offset: uintptr;
+ byte_length: uintptr;
+ bit_field: JSArrayBufferViewFlags;
// Pads header size to be a multiple of kTaggedSize.
@if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
@ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void;
}
@export
-macro IsVariableLengthTypedArray(array: JSTypedArray): bool {
+macro IsVariableLengthJSArrayBufferView(array: JSArrayBufferView): bool {
return array.bit_field.is_length_tracking || array.bit_field.is_backed_by_rab;
}
@export
-macro IsLengthTrackingTypedArray(array: JSTypedArray): bool {
+macro IsLengthTrackingJSArrayBufferView(array: JSArrayBufferView): bool {
return array.bit_field.is_length_tracking;
}
+extern class JSTypedArray extends JSArrayBufferView {
+ length: uintptr;
+ external_pointer: ExternalPointer;
+ base_pointer: ByteArray|Smi;
+}
+
+@export
+macro IsOnHeapTypedArray(array: JSTypedArray): bool {
+ // See JSTypedArray::is_on_heap()
+ return TaggedNotEqual(array.base_pointer, SmiConstant(0));
+}
+
extern class JSDataView extends JSArrayBufferView {
data_pointer: ExternalPointer;
}
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
index dea0ce0422..5caf8d2ec9 100644
--- a/deps/v8/src/objects/js-collator.cc
+++ b/deps/v8/src/objects/js-collator.cc
@@ -401,9 +401,9 @@ MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
// This will need to be filtered out when creating the
// resolvedOptions object.
if (usage == Usage::SEARCH) {
- UErrorCode status = U_ZERO_ERROR;
- icu_locale.setUnicodeKeywordValue("co", "search", status);
- DCHECK(U_SUCCESS(status));
+ UErrorCode set_status = U_ZERO_ERROR;
+ icu_locale.setUnicodeKeywordValue("co", "search", set_status);
+ DCHECK(U_SUCCESS(set_status));
} else {
if (collation_str != nullptr &&
Intl::IsValidCollation(icu_locale, collation_str.get())) {
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index 2258a1ffdf..923580e6e6 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -145,16 +145,14 @@ static std::vector<PatternItem> BuildPatternItems() {
kNarrowLongShort2DigitNumeric));
items.push_back(PatternItem("day", {{"dd", "2-digit"}, {"d", "numeric"}},
k2DigitNumeric));
- if (FLAG_harmony_intl_dateformat_day_period) {
- items.push_back(PatternItem("dayPeriod",
- {{"BBBBB", "narrow"},
- {"bbbbb", "narrow"},
- {"BBBB", "long"},
- {"bbbb", "long"},
- {"B", "short"},
- {"b", "short"}},
- kNarrowLongShort));
- }
+ items.push_back(PatternItem("dayPeriod",
+ {{"BBBBB", "narrow"},
+ {"bbbbb", "narrow"},
+ {"BBBB", "long"},
+ {"bbbb", "long"},
+ {"B", "short"},
+ {"b", "short"}},
+ kNarrowLongShort));
items.push_back(PatternItem("hour",
{{"HH", "2-digit"},
{"H", "numeric"},
@@ -170,7 +168,6 @@ static std::vector<PatternItem> BuildPatternItems() {
items.push_back(PatternItem("second", {{"ss", "2-digit"}, {"s", "numeric"}},
k2DigitNumeric));
- if (FLAG_harmony_intl_more_timezone) {
const std::vector<const char*> kTimezone = {"long", "short",
"longOffset", "shortOffset",
"longGeneric", "shortGeneric"};
@@ -182,11 +179,7 @@ static std::vector<PatternItem> BuildPatternItems() {
{"vvvv", "longGeneric"},
{"v", "shortGeneric"}},
kTimezone));
- } else {
- items.push_back(PatternItem(
- "timeZoneName", {{"zzzz", "long"}, {"z", "short"}}, kLongShort));
- }
- return items;
+ return items;
}
class PatternItems {
@@ -929,9 +922,7 @@ MaybeHandle<JSObject> JSDateTimeFormat::ToDateTimeOptions(
// a. For each of the property names "dayPeriod", "hour", "minute",
// "second", "fractionalSecondDigits", do
std::vector<Handle<String>> list;
- if (FLAG_harmony_intl_dateformat_day_period) {
- list.push_back(factory->dayPeriod_string());
- }
+ list.push_back(factory->dayPeriod_string());
list.push_back(factory->hour_string());
list.push_back(factory->minute_string());
list.push_back(factory->second_string());
@@ -1081,7 +1072,7 @@ class CalendarCache {
icu::GregorianCalendar::getStaticClassID()) {
icu::GregorianCalendar* gc =
static_cast<icu::GregorianCalendar*>(calendar.get());
- UErrorCode status = U_ZERO_ERROR;
+ status = U_ZERO_ERROR;
// The beginning of ECMAScript time, namely -(2**53)
const double start_of_time = -9007199254740992;
gc->setGregorianChange(start_of_time, status);
@@ -1726,7 +1717,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
// 29. Let matcher be ? GetOption(options, "formatMatcher", "string", «
// "basic", "best fit" », "best fit").
- enum FormatMatcherOption { kBestFit, kBasic };
// We implement only best fit algorithm, but still need to check
// if the formatMatcher values are in range.
// c. Let matcher be ? GetOption(options, "formatMatcher", "string",
@@ -1870,7 +1860,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
if (dateTimeFormatHourCycle !=
ToHourCycle(hc_extension_it->second.c_str())) {
// Remove -hc- if it does not agree with what we used.
- UErrorCode status = U_ZERO_ERROR;
+ status = U_ZERO_ERROR;
resolved_locale.setUnicodeKeywordValue("hc", nullptr, status);
DCHECK(U_SUCCESS(status));
}
diff --git a/deps/v8/src/objects/js-display-names.cc b/deps/v8/src/objects/js-display-names.cc
index d2e1064967..ed2b833e39 100644
--- a/deps/v8/src/objects/js-display-names.cc
+++ b/deps/v8/src/objects/js-display-names.cc
@@ -454,24 +454,12 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
// 12. Let type be ? GetOption(options, "type", "string", « "language",
// "region", "script", "currency" , "calendar", "dateTimeField", "unit"»,
// undefined).
- Maybe<Type> maybe_type =
- FLAG_harmony_intl_displaynames_v2
- ? GetStringOption<Type>(
- isolate, options, "type", service,
- {"language", "region", "script", "currency", "calendar",
- "dateTimeField"},
- {Type::kLanguage, Type::kRegion, Type::kScript, Type::kCurrency,
- Type::kCalendar, Type::kDateTimeField},
- Type::kUndefined)
- : GetStringOption<Type>(isolate, options, "type", service,
- {"language", "region", "script", "currency"},
- {
- Type::kLanguage,
- Type::kRegion,
- Type::kScript,
- Type::kCurrency,
- },
- Type::kUndefined);
+ Maybe<Type> maybe_type = GetStringOption<Type>(
+ isolate, options, "type", service,
+ {"language", "region", "script", "currency", "calendar", "dateTimeField"},
+ {Type::kLanguage, Type::kRegion, Type::kScript, Type::kCurrency,
+ Type::kCalendar, Type::kDateTimeField},
+ Type::kUndefined);
MAYBE_RETURN(maybe_type, MaybeHandle<JSDisplayNames>());
Type type_enum = maybe_type.FromJust();
@@ -494,21 +482,18 @@ MaybeHandle<JSDisplayNames> JSDisplayNames::New(Isolate* isolate,
// 16. Set displayNames.[[Fallback]] to fallback.
LanguageDisplay language_display_enum = LanguageDisplay::kDialect;
- if (FLAG_harmony_intl_displaynames_v2) {
- // 24. Let languageDisplay be ? GetOption(options, "languageDisplay",
- // "string", « "dialect", "standard" », "dialect").
- Maybe<LanguageDisplay> maybe_language_display =
- GetStringOption<LanguageDisplay>(
- isolate, options, "languageDisplay", service,
- {"dialect", "standard"},
- {LanguageDisplay::kDialect, LanguageDisplay::kStandard},
- LanguageDisplay::kDialect);
- MAYBE_RETURN(maybe_language_display, MaybeHandle<JSDisplayNames>());
- // 25. If type is "language", then
- if (type_enum == Type::kLanguage) {
- // a. Set displayNames.[[LanguageDisplay]] to languageDisplay.
- language_display_enum = maybe_language_display.FromJust();
- }
+ // 24. Let languageDisplay be ? GetOption(options, "languageDisplay",
+ // "string", « "dialect", "standard" », "dialect").
+ Maybe<LanguageDisplay> maybe_language_display =
+ GetStringOption<LanguageDisplay>(
+ isolate, options, "languageDisplay", service, {"dialect", "standard"},
+ {LanguageDisplay::kDialect, LanguageDisplay::kStandard},
+ LanguageDisplay::kDialect);
+ MAYBE_RETURN(maybe_language_display, MaybeHandle<JSDisplayNames>());
+ // 25. If type is "language", then
+ if (type_enum == Type::kLanguage) {
+ // a. Set displayNames.[[LanguageDisplay]] to languageDisplay.
+ language_display_enum = maybe_language_display.FromJust();
}
// Set displayNames.[[Fallback]] to fallback.
@@ -596,7 +581,6 @@ Handle<JSObject> JSDisplayNames::ResolvedOptions(
DCHECK(maybe_create_fallback.FromJust());
USE(maybe_create_fallback);
- if (FLAG_harmony_intl_displaynames_v2) {
if (std::strcmp("language", internal->type()) == 0) {
Maybe<bool> maybe_create_language_display =
JSReceiver::CreateDataProperty(isolate, options,
@@ -605,7 +589,6 @@ Handle<JSObject> JSDisplayNames::ResolvedOptions(
DCHECK(maybe_create_language_display.FromJust());
USE(maybe_create_language_display);
}
- }
return options;
}
diff --git a/deps/v8/src/objects/js-function-inl.h b/deps/v8/src/objects/js-function-inl.h
index 314e7a85b0..27916a17ce 100644
--- a/deps/v8/src/objects/js-function-inl.h
+++ b/deps/v8/src/objects/js-function-inl.h
@@ -14,6 +14,7 @@
#include "src/ic/ic.h"
#include "src/init/bootstrapper.h"
#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/map-updater.h"
#include "src/objects/shared-function-info-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -123,7 +124,7 @@ bool JSFunction::IsInOptimizationQueue() {
void JSFunction::CompleteInobjectSlackTrackingIfActive() {
if (!has_prototype_slot()) return;
if (has_initial_map() && initial_map().IsInobjectSlackTrackingInProgress()) {
- initial_map().CompleteInobjectSlackTracking(GetIsolate());
+ MapUpdater::CompleteInobjectSlackTracking(GetIsolate(), initial_map());
}
}
@@ -155,6 +156,15 @@ void JSFunction::set_code(Code code, ReleaseStoreTag, WriteBarrierMode mode) {
set_raw_code(ToCodeT(code), kReleaseStore, mode);
}
+#ifdef V8_EXTERNAL_CODE_SPACE
+void JSFunction::set_code(CodeT code, WriteBarrierMode mode) {
+ set_raw_code(code, mode);
+}
+void JSFunction::set_code(CodeT code, ReleaseStoreTag, WriteBarrierMode mode) {
+ set_raw_code(code, kReleaseStore, mode);
+}
+#endif
+
Address JSFunction::code_entry_point() const {
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
return CodeDataContainer::cast(raw_code()).code_entry_point();
diff --git a/deps/v8/src/objects/js-function.cc b/deps/v8/src/objects/js-function.cc
index decc508023..bfb3f7ba96 100644
--- a/deps/v8/src/objects/js-function.cc
+++ b/deps/v8/src/objects/js-function.cc
@@ -408,7 +408,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
// Deoptimize all code that embeds the previous initial map.
initial_map->dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kInitialMapChangedGroup);
+ isolate, DependentCode::kInitialMapChangedGroup);
} else {
// Put the value in the initial map field until an initial map is
// needed. At that point, a new initial map is created and the
@@ -937,9 +937,9 @@ Handle<String> NativeCodeFunctionSourceString(
Handle<SharedFunctionInfo> shared_info) {
Isolate* const isolate = shared_info->GetIsolate();
IncrementalStringBuilder builder(isolate);
- builder.AppendCString("function ");
+ builder.AppendCStringLiteral("function ");
builder.AppendString(handle(shared_info->Name(), isolate));
- builder.AppendCString("() { [native code] }");
+ builder.AppendCStringLiteral("() { [native code] }");
return builder.Finish().ToHandleChecked();
}
diff --git a/deps/v8/src/objects/js-function.h b/deps/v8/src/objects/js-function.h
index 7dd35f10ff..866871628f 100644
--- a/deps/v8/src/objects/js-function.h
+++ b/deps/v8/src/objects/js-function.h
@@ -86,6 +86,14 @@ class JSFunction
// are fully initialized.
DECL_ACCESSORS(code, Code)
DECL_RELEASE_ACQUIRE_ACCESSORS(code, Code)
+#ifdef V8_EXTERNAL_CODE_SPACE
+ // Convenient overloads to avoid unnecessary Code <-> CodeT conversions.
+ // TODO(v8:11880): remove once |code| accessors are migrated to CodeT.
+ inline void set_code(CodeT code,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void set_code(CodeT code, ReleaseStoreTag,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+#endif
// Returns the address of the function code's instruction start.
inline Address code_entry_point() const;
diff --git a/deps/v8/src/objects/js-generator.h b/deps/v8/src/objects/js-generator.h
index 99f05abcbc..52c33fbd65 100644
--- a/deps/v8/src/objects/js-generator.h
+++ b/deps/v8/src/objects/js-generator.h
@@ -16,6 +16,7 @@ namespace internal {
// Forward declarations.
class JSPromise;
+class StructBodyDescriptor;
#include "torque-generated/src/objects/js-generator-tq.inc"
@@ -71,6 +72,8 @@ class AsyncGeneratorRequest
DECL_PRINTER(AsyncGeneratorRequest)
DECL_VERIFIER(AsyncGeneratorRequest)
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(AsyncGeneratorRequest)
};
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 05f4a7302d..27324f0d42 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -727,27 +727,29 @@ MaybeHandle<JSObject> JSLocale::WeekInfo(Isolate* isolate,
// Let fd be the weekday value indicating which day of the week is considered
// the 'first' day, for calendar purposes, in the locale.
int32_t fd = weekdayFromEDaysOfWeek(calendar->getFirstDayOfWeek());
- bool thursday_is_weekend =
- (UCAL_WEEKDAY != calendar->getDayOfWeekType(UCAL_THURSDAY, status));
- bool friday_is_weekend =
- (UCAL_WEEKDAY != calendar->getDayOfWeekType(UCAL_FRIDAY, status));
- bool saturday_is_weekend =
- (UCAL_WEEKDAY != calendar->getDayOfWeekType(UCAL_SATURDAY, status));
- bool sunday_is_weekend =
- (UCAL_WEEKDAY != calendar->getDayOfWeekType(UCAL_SUNDAY, status));
+
+ // Let wi be ! WeekInfoOfLocale(loc).
+ // Let we be ! CreateArrayFromList( wi.[[Weekend]] ).
+ Handle<FixedArray> wi = Handle<FixedArray>::cast(factory->NewFixedArray(2));
+ int32_t length = 0;
+ for (int32_t i = 1; i <= 7; i++) {
+ UCalendarDaysOfWeek day =
+ (i == 7) ? UCAL_SUNDAY : static_cast<UCalendarDaysOfWeek>(i + 1);
+ if (UCAL_WEEKDAY != calendar->getDayOfWeekType(day, status)) {
+ wi->set(length++, Smi::FromInt(i));
+ CHECK_LE(length, 2);
+ }
+ }
+ if (length != 2) {
+ wi = wi->ShrinkOrEmpty(isolate, wi, length);
+ }
+ Handle<JSArray> we = factory->NewJSArrayWithElements(wi);
+
if (U_FAILURE(status)) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
JSObject);
}
- // Let ws be the weekday value indicating which day of the week is considered
- // the starting day of the 'weekend', for calendar purposes, in the locale.
- int32_t ws = thursday_is_weekend ? 4 : (friday_is_weekend ? 5 : 6);
-
- // Let we be the weekday value indicating which day of the week is considered
- // the ending day of the 'weekend', for calendar purposes, in the locale.
- int32_t we = sunday_is_weekend ? 7 : (saturday_is_weekend ? 6 : 5);
-
// Let md be the minimal days required in the first week of a month or year,
// for calendar purposes, in the locale.
int32_t md = calendar->getMinimalDaysInFirstWeek();
@@ -758,16 +760,9 @@ MaybeHandle<JSObject> JSLocale::WeekInfo(Isolate* isolate,
factory->NewNumberFromInt(fd), Just(kDontThrow))
.FromJust());
- // Perform ! CreateDataPropertyOrThrow(info, "weekendStart", ws).
- CHECK(JSReceiver::CreateDataProperty(
- isolate, info, factory->weekendStart_string(),
- factory->NewNumberFromInt(ws), Just(kDontThrow))
- .FromJust());
-
- // Perform ! CreateDataPropertyOrThrow(info, "weekendEnd", we).
- CHECK(JSReceiver::CreateDataProperty(
- isolate, info, factory->weekendEnd_string(),
- factory->NewNumberFromInt(we), Just(kDontThrow))
+ // Perform ! CreateDataPropertyOrThrow(info, "weekend", we).
+ CHECK(JSReceiver::CreateDataProperty(isolate, info, factory->weekend_string(),
+ we, Just(kDontThrow))
.FromJust());
// Perform ! CreateDataPropertyOrThrow(info, "minimalDays", md).
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 5926721d59..24e86512de 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -341,9 +341,8 @@ Object JSObject::RawFastPropertyAt(PtrComprCageBase cage_base,
}
}
-base::Optional<Object> JSObject::RawInobjectPropertyAt(Map original_map,
- FieldIndex index) const {
- PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+base::Optional<Object> JSObject::RawInobjectPropertyAt(
+ PtrComprCageBase cage_base, Map original_map, FieldIndex index) const {
CHECK(index.is_inobject());
// This method implements a "snapshot" protocol to protect against reading out
@@ -373,7 +372,7 @@ base::Optional<Object> JSObject::RawInobjectPropertyAt(Map original_map,
// given by the map and it will be a valid Smi or object pointer.
Object maybe_tagged_object =
TaggedField<Object>::Acquire_Load(cage_base, *this, index.offset());
- if (original_map != map(kAcquireLoad)) return {};
+ if (original_map != map(cage_base, kAcquireLoad)) return {};
return maybe_tagged_object;
}
@@ -398,7 +397,7 @@ void JSObject::FastPropertyAtPut(FieldIndex index, Object value,
void JSObject::WriteToField(InternalIndex descriptor, PropertyDetails details,
Object value) {
DCHECK_EQ(PropertyLocation::kField, details.location());
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
DisallowGarbageCollection no_gc;
FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
if (details.representation().IsDouble()) {
@@ -470,7 +469,8 @@ void JSObject::InitializeBody(Map map, int start_offset,
}
DEF_GETTER(JSGlobalObject, native_context_unchecked, Object) {
- return TaggedField<Object, kNativeContextOffset>::load(cage_base, *this);
+ return TaggedField<Object, kNativeContextOffset>::Relaxed_Load(cage_base,
+ *this);
}
bool JSMessageObject::DidEnsureSourcePositionsAvailable() const {
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 933c3cda0e..1df13df72c 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -284,7 +284,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
PropertyDetails details = descriptors->GetDetails(i);
if (!details.IsEnumerable()) continue;
- if (details.kind() == kData) {
+ if (details.kind() == PropertyKind::kData) {
if (details.location() == PropertyLocation::kDescriptor) {
prop_value = handle(descriptors->GetStrongValue(i), isolate);
} else {
@@ -2035,7 +2035,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
PropertyDetails details = descriptors->GetDetails(index);
if (!details.IsEnumerable()) continue;
- if (details.kind() == kData) {
+ if (details.kind() == PropertyKind::kData) {
if (details.location() == PropertyLocation::kDescriptor) {
prop_value = handle(descriptors->GetStrongValue(index), isolate);
} else {
@@ -2393,6 +2393,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return WasmMemoryObject::kHeaderSize;
case WASM_MODULE_OBJECT_TYPE:
return WasmModuleObject::kHeaderSize;
+ case WASM_SUSPENDER_OBJECT_TYPE:
+ return WasmSuspenderObject::kHeaderSize;
case WASM_TABLE_OBJECT_TYPE:
return WasmTableObject::kHeaderSize;
case WASM_VALUE_OBJECT_TYPE:
@@ -2929,7 +2931,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
value = isolate->factory()->uninitialized_value();
}
DCHECK_EQ(PropertyLocation::kField, details.location());
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
DCHECK(!index.is_inobject()); // Must be a backing store index.
new_storage->set(index.outobject_array_index(), *value);
@@ -2979,13 +2981,13 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
for (InternalIndex i : InternalIndex::Range(old_nof)) {
PropertyDetails details = new_descriptors->GetDetails(i);
if (details.location() != PropertyLocation::kField) continue;
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
PropertyDetails old_details = old_descriptors->GetDetails(i);
Representation old_representation = old_details.representation();
Representation representation = details.representation();
Handle<Object> value;
if (old_details.location() == PropertyLocation::kDescriptor) {
- if (old_details.kind() == kAccessor) {
+ if (old_details.kind() == PropertyKind::kAccessor) {
// In case of kAccessor -> kData property reconfiguration, the property
// must already be prepared for data of certain type.
DCHECK(!details.representation().IsNone());
@@ -2995,7 +2997,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
value = isolate->factory()->uninitialized_value();
}
} else {
- DCHECK_EQ(kData, old_details.kind());
+ DCHECK_EQ(PropertyKind::kData, old_details.kind());
value = handle(old_descriptors->GetStrongValue(isolate, i), isolate);
DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
}
@@ -3023,7 +3025,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
for (InternalIndex i : InternalIndex::Range(old_nof, new_nof)) {
PropertyDetails details = new_descriptors->GetDetails(i);
if (details.location() != PropertyLocation::kField) continue;
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
Handle<Object> value;
if (details.representation().IsDouble()) {
value = isolate->factory()->NewHeapNumberWithHoleNaN();
@@ -3113,7 +3115,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
Handle<Object> value;
if (details.location() == PropertyLocation::kField) {
FieldIndex index = FieldIndex::ForDescriptor(isolate, *map, i);
- if (details.kind() == kData) {
+ if (details.kind() == PropertyKind::kData) {
value = handle(object->RawFastPropertyAt(isolate, index), isolate);
if (details.representation().IsDouble()) {
DCHECK(value->IsHeapNumber(isolate));
@@ -3121,7 +3123,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
value = isolate->factory()->NewHeapNumber(old_value);
}
} else {
- DCHECK_EQ(kAccessor, details.kind());
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
value = handle(object->RawFastPropertyAt(isolate, index), isolate);
}
@@ -3604,7 +3606,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
kind = dictionary->DetailsAt(index).kind();
}
- if (kind == kData) {
+ if (kind == PropertyKind::kData) {
number_of_fields += 1;
}
}
@@ -3698,7 +3700,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
details.constness() == PropertyConstness::kMutable);
Descriptor d;
- if (details.kind() == kData) {
+ if (details.kind() == PropertyKind::kData) {
// Ensure that we make constant field only when elements kind is not
// transitionable.
PropertyConstness constness = is_transitionable_elements_kind
@@ -3713,7 +3715,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// TODO(verwaest): value->OptimalRepresentation();
Representation::Tagged(), MaybeObjectHandle(FieldType::Any(isolate)));
} else {
- DCHECK_EQ(kAccessor, details.kind());
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
d = Descriptor::AccessorConstant(key, handle(value, isolate),
details.attributes());
}
@@ -3897,7 +3899,8 @@ bool TestDictionaryPropertiesIntegrityLevel(Dictionary dict,
if (key.FilterKey(ALL_PROPERTIES)) continue;
PropertyDetails details = dict.DetailsAt(i);
if (details.IsConfigurable()) return false;
- if (level == FROZEN && details.kind() == kData && !details.IsReadOnly()) {
+ if (level == FROZEN && details.kind() == PropertyKind::kData &&
+ !details.IsReadOnly()) {
return false;
}
}
@@ -3914,7 +3917,8 @@ bool TestFastPropertiesIntegrityLevel(Map map, PropertyAttributes level) {
if (descriptors.GetKey(i).IsPrivate()) continue;
PropertyDetails details = descriptors.GetDetails(i);
if (details.IsConfigurable()) return false;
- if (level == FROZEN && details.kind() == kData && !details.IsReadOnly()) {
+ if (level == FROZEN && details.kind() == PropertyKind::kData &&
+ !details.IsReadOnly()) {
return false;
}
}
@@ -4076,7 +4080,7 @@ void JSObject::ApplyAttributesToDictionary(
PropertyDetails details = dictionary->DetailsAt(i);
int attrs = attributes;
// READ_ONLY is an invalid attribute for JS setters/getters.
- if ((attributes & READ_ONLY) && details.kind() == kAccessor) {
+ if ((attributes & READ_ONLY) && details.kind() == PropertyKind::kAccessor) {
Object v = dictionary->ValueAt(i);
if (v.IsAccessorPair()) attrs &= ~READ_ONLY;
}
@@ -4509,7 +4513,7 @@ Object JSObject::SlowReverseLookup(Object value) {
for (InternalIndex i : map().IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
if (details.location() == PropertyLocation::kField) {
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
Object property = RawFastPropertyAt(field_index);
if (field_index.is_double()) {
@@ -4522,7 +4526,7 @@ Object JSObject::SlowReverseLookup(Object value) {
}
} else {
DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
- if (details.kind() == kData) {
+ if (details.kind() == PropertyKind::kData) {
if (descs.GetStrongValue(i) == value) {
return descs.GetKey(i);
}
@@ -4775,8 +4779,10 @@ void InvalidateOnePrototypeValidityCellInternal(Map map) {
// of the validity cell is not used. Therefore, we always trigger the de-opt
// here, even if the cell was already invalid.
if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL && map.is_dictionary_map()) {
+ // TODO(11527): pass Isolate as an argument.
+ Isolate* isolate = GetIsolateFromWritableObject(map);
map.dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kPrototypeCheckGroup);
+ isolate, DependentCode::kPrototypeCheckGroup);
}
}
@@ -5477,6 +5483,9 @@ void JSMessageObject::EnsureSourcePositionsAvailable(
DCHECK_GE(message->bytecode_offset().value(), kFunctionEntryBytecodeOffset);
Handle<SharedFunctionInfo> shared_info(
SharedFunctionInfo::cast(message->shared_info()), isolate);
+ IsCompiledScope is_compiled_scope;
+ SharedFunctionInfo::EnsureBytecodeArrayAvailable(
+ isolate, shared_info, &is_compiled_scope, CreateSourcePositions::kYes);
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
DCHECK(shared_info->HasBytecodeArray());
int position = shared_info->abstract_code(isolate).SourcePosition(
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index 3904144e40..ff7a268470 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -683,8 +683,8 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
// in which this method is meant to be used, and what guarantees it
// provides against invalid reads from another thread during object
// mutation.
- inline base::Optional<Object> RawInobjectPropertyAt(Map original_map,
- FieldIndex index) const;
+ inline base::Optional<Object> RawInobjectPropertyAt(
+ PtrComprCageBase cage_base, Map original_map, FieldIndex index) const;
inline void FastPropertyAtPut(FieldIndex index, Object value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
diff --git a/deps/v8/src/objects/keys.cc b/deps/v8/src/objects/keys.cc
index acd94fcf86..77a1268b29 100644
--- a/deps/v8/src/objects/keys.cc
+++ b/deps/v8/src/objects/keys.cc
@@ -421,7 +421,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
if (details.IsDontEnum()) continue;
Object key = descriptors->GetKey(i);
if (key.IsSymbol()) continue;
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
DCHECK_EQ(PropertyLocation::kField, details.location());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
indices->set(index, Smi::FromInt(field_index.GetLoadByFieldIndex()));
@@ -767,7 +767,7 @@ base::Optional<int> CollectOwnPropertyNamesInternal(
}
if (filter & ONLY_ALL_CAN_READ) {
- if (details.kind() != kAccessor) continue;
+ if (details.kind() != PropertyKind::kAccessor) continue;
Object accessors = descs->GetStrongValue(i);
if (!accessors.IsAccessorInfo()) continue;
if (!AccessorInfo::cast(accessors).all_can_read()) continue;
@@ -925,7 +925,7 @@ ExceptionStatus CollectKeysFromDictionary(Handle<Dictionary> dictionary,
continue;
}
if (filter & ONLY_ALL_CAN_READ) {
- if (details.kind() != kAccessor) continue;
+ if (details.kind() != PropertyKind::kAccessor) continue;
Object accessors = raw_dictionary.ValueAt(i);
if (!accessors.IsAccessorInfo()) continue;
if (!AccessorInfo::cast(accessors).all_can_read()) continue;
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index 7406a9dff1..8a447af789 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -172,8 +172,9 @@ void AddToDictionaryTemplate(IsolateT* isolate, Handle<Dictionary> dictionary,
: ComputeEnumerationIndex(key_index);
Handle<Object> value_handle;
PropertyDetails details(
- value_kind != ClassBoilerplate::kData ? kAccessor : kData, DONT_ENUM,
- PropertyDetails::kConstIfDictConstnessTracking, enum_order);
+ value_kind != ClassBoilerplate::kData ? PropertyKind::kAccessor
+ : PropertyKind::kData,
+ DONT_ENUM, PropertyDetails::kConstIfDictConstnessTracking, enum_order);
if (value_kind == ClassBoilerplate::kData) {
value_handle = handle(value, isolate);
} else {
@@ -222,7 +223,8 @@ void AddToDictionaryTemplate(IsolateT* isolate, Handle<Dictionary> dictionary,
// method or just one of them was defined before while the other one
// was not defined yet, so overwrite property to kData.
PropertyDetails details(
- kData, DONT_ENUM, PropertyDetails::kConstIfDictConstnessTracking,
+ PropertyKind::kData, DONT_ENUM,
+ PropertyDetails::kConstIfDictConstnessTracking,
enum_order_existing);
dictionary->DetailsAtPut(entry, details);
dictionary->ValueAtPut(entry, value);
@@ -283,7 +285,8 @@ void AddToDictionaryTemplate(IsolateT* isolate, Handle<Dictionary> dictionary,
// one (AccessorInfo "length" and "name" properties are always defined
// before).
PropertyDetails details(
- kData, DONT_ENUM, PropertyDetails::kConstIfDictConstnessTracking,
+ PropertyKind::kData, DONT_ENUM,
+ PropertyDetails::kConstIfDictConstnessTracking,
enum_order_existing);
dictionary->DetailsAtPut(entry, details);
dictionary->ValueAtPut(entry, value);
@@ -295,7 +298,7 @@ void AddToDictionaryTemplate(IsolateT* isolate, Handle<Dictionary> dictionary,
// which is why we don't need to update the property details if
// |is_elements_dictionary| holds.
PropertyDetails details(
- kData, DONT_ENUM,
+ PropertyKind::kData, DONT_ENUM,
PropertyDetails::kConstIfDictConstnessTracking,
enum_order_computed);
@@ -323,7 +326,7 @@ void AddToDictionaryTemplate(IsolateT* isolate, Handle<Dictionary> dictionary,
// |is_elements_dictionary| holds.
PropertyDetails details(
- kAccessor, DONT_ENUM,
+ PropertyKind::kAccessor, DONT_ENUM,
PropertyDetails::kConstIfDictConstnessTracking,
enum_order_computed);
dictionary->DetailsAtPut(entry, details);
@@ -340,7 +343,7 @@ void AddToDictionaryTemplate(IsolateT* isolate, Handle<Dictionary> dictionary,
Handle<AccessorPair> pair(isolate->factory()->NewAccessorPair());
pair->set(component, value);
PropertyDetails details(
- kAccessor, DONT_ENUM,
+ PropertyKind::kAccessor, DONT_ENUM,
PropertyDetails::kConstIfDictConstnessTracking,
enum_order_existing);
dictionary->DetailsAtPut(entry, details);
@@ -355,7 +358,7 @@ void AddToDictionaryTemplate(IsolateT* isolate, Handle<Dictionary> dictionary,
// which is why we don't need to update the property details if
// |is_elements_dictionary| holds.
PropertyDetails details(
- kData, DONT_ENUM,
+ PropertyKind::kData, DONT_ENUM,
PropertyDetails::kConstIfDictConstnessTracking,
enum_order_computed);
@@ -447,7 +450,8 @@ class ObjectDescriptor {
bool is_accessor = value->IsAccessorInfo();
DCHECK(!value->IsAccessorPair());
if (HasDictionaryProperties()) {
- PropertyKind kind = is_accessor ? i::kAccessor : i::kData;
+ PropertyKind kind =
+ is_accessor ? i::PropertyKind::kAccessor : i::PropertyKind::kData;
int enum_order = V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL
? kDummyEnumerationIndex
: next_enumeration_index_++;
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 242b3a6469..360c7f6df6 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -16,6 +16,7 @@ namespace v8 {
namespace internal {
class ClassLiteral;
+class StructBodyDescriptor;
#include "torque-generated/src/objects/literal-objects-tq.inc"
@@ -71,6 +72,8 @@ class ArrayBoilerplateDescription
DECL_PRINTER(ArrayBoilerplateDescription)
void BriefPrintDetails(std::ostream& os);
+ using BodyDescriptor = StructBodyDescriptor;
+
private:
TQ_OBJECT_CONSTRUCTORS(ArrayBoilerplateDescription)
};
@@ -82,6 +85,8 @@ class RegExpBoilerplateDescription
// Dispatched behavior.
void BriefPrintDetails(std::ostream& os);
+ using BodyDescriptor = StructBodyDescriptor;
+
private:
TQ_OBJECT_CONSTRUCTORS(RegExpBoilerplateDescription)
};
diff --git a/deps/v8/src/objects/lookup-inl.h b/deps/v8/src/objects/lookup-inl.h
index 41db47e40a..c5d761aed1 100644
--- a/deps/v8/src/objects/lookup-inl.h
+++ b/deps/v8/src/objects/lookup-inl.h
@@ -101,11 +101,11 @@ LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
// is not a typed array, then this means "array index", otherwise we need to
// ensure the full generality so that typed arrays are handled correctly.
if (!check_prototype_chain() && !lookup_start_object->IsJSTypedArray()) {
- uint32_t index;
- DCHECK(!name_->AsArrayIndex(&index));
+ uint32_t array_index;
+ DCHECK(!name_->AsArrayIndex(&array_index));
} else {
- size_t index;
- DCHECK(!name_->AsIntegerIndex(&index));
+ size_t integer_index;
+ DCHECK(!name_->AsIntegerIndex(&integer_index));
}
#endif // DEBUG
Start<false>();
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index 5b10337e41..dd087b49c5 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -375,7 +375,7 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
PropertyConstness new_constness = PropertyConstness::kConst;
if (constness() == PropertyConstness::kConst) {
- DCHECK_EQ(kData, property_details_.kind());
+ DCHECK_EQ(PropertyKind::kData, property_details_.kind());
// Check that current value matches new value otherwise we should make
// the property mutable.
if (holder->HasFastProperties(isolate_)) {
@@ -485,8 +485,8 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
// Force mutable to avoid changing constant value by reconfiguring
// kData -> kAccessor -> kData.
Handle<Map> new_map = MapUpdater::ReconfigureExistingProperty(
- isolate_, old_map, descriptor_number(), i::kData, attributes,
- PropertyConstness::kMutable);
+ isolate_, old_map, descriptor_number(), i::PropertyKind::kData,
+ attributes, PropertyConstness::kMutable);
if (!new_map->is_dictionary_map()) {
// Make sure that the data property has a compatible representation.
// TODO(leszeks): Do this as part of ReconfigureExistingProperty.
@@ -512,7 +512,8 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
JSObject::InvalidatePrototypeChains(holder->map(isolate_));
}
if (holder_obj->IsJSGlobalObject(isolate_)) {
- PropertyDetails details(kData, attributes, PropertyCellType::kMutable);
+ PropertyDetails details(PropertyKind::kData, attributes,
+ PropertyCellType::kMutable);
Handle<GlobalDictionary> dictionary(
JSGlobalObject::cast(*holder_obj)
.global_dictionary(isolate_, kAcquireLoad),
@@ -523,7 +524,8 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
property_details_ = cell->property_details();
DCHECK_EQ(cell->value(), *value);
} else {
- PropertyDetails details(kData, attributes, PropertyConstness::kMutable);
+ PropertyDetails details(PropertyKind::kData, attributes,
+ PropertyConstness::kMutable);
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
Handle<SwissNameDictionary> dictionary(
holder_obj->property_dictionary_swiss(isolate_), isolate());
@@ -583,15 +585,17 @@ void LookupIterator::PrepareTransitionToDataProperty(
if (map->IsJSGlobalObjectMap()) {
DCHECK(!value->IsTheHole(isolate_));
// Don't set enumeration index (it will be set during value store).
- property_details_ = PropertyDetails(
- kData, attributes, PropertyCell::InitialType(isolate_, value));
+ property_details_ =
+ PropertyDetails(PropertyKind::kData, attributes,
+ PropertyCell::InitialType(isolate_, value));
transition_ = isolate_->factory()->NewPropertyCell(
name(), property_details_, value);
has_property_ = true;
} else {
// Don't set enumeration index (it will be set during value store).
- property_details_ = PropertyDetails(
- kData, attributes, PropertyDetails::kConstIfDictConstnessTracking);
+ property_details_ =
+ PropertyDetails(PropertyKind::kData, attributes,
+ PropertyDetails::kConstIfDictConstnessTracking);
transition_ = map;
}
return;
@@ -606,8 +610,9 @@ void LookupIterator::PrepareTransitionToDataProperty(
if (transition->is_dictionary_map()) {
DCHECK(!transition->IsJSGlobalObjectMap());
// Don't set enumeration index (it will be set during value store).
- property_details_ = PropertyDetails(
- kData, attributes, PropertyDetails::kConstIfDictConstnessTracking);
+ property_details_ =
+ PropertyDetails(PropertyKind::kData, attributes,
+ PropertyDetails::kConstIfDictConstnessTracking);
} else {
property_details_ = transition->GetLastDescriptorDetails(isolate_);
has_property_ = true;
@@ -800,7 +805,8 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
Handle<JSObject> receiver = GetStoreTarget<JSObject>();
holder_ = receiver;
- PropertyDetails details(kAccessor, attributes, PropertyCellType::kMutable);
+ PropertyDetails details(PropertyKind::kAccessor, attributes,
+ PropertyCellType::kMutable);
if (IsElement(*receiver)) {
// TODO(verwaest): Move code into the element accessor.
@@ -891,7 +897,7 @@ Handle<Object> LookupIterator::FetchValue(
isolate_, dictionary_entry());
}
} else if (property_details_.location() == PropertyLocation::kField) {
- DCHECK_EQ(kData, property_details_.kind());
+ DCHECK_EQ(PropertyKind::kData, property_details_.kind());
#if V8_ENABLE_WEBASSEMBLY
if (V8_UNLIKELY(holder_->IsWasmObject(isolate_))) {
if (allocation_policy == AllocationPolicy::kAllocationDisallowed) {
@@ -1006,7 +1012,7 @@ int LookupIterator::GetFieldDescriptorIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties());
DCHECK_EQ(PropertyLocation::kField, property_details_.location());
- DCHECK_EQ(kData, property_details_.kind());
+ DCHECK_EQ(PropertyKind::kData, property_details_.kind());
// TODO(jkummerow): Propagate InternalIndex further.
return descriptor_number().as_int();
}
@@ -1015,7 +1021,7 @@ int LookupIterator::GetAccessorIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(PropertyLocation::kDescriptor, property_details_.location());
- DCHECK_EQ(kAccessor, property_details_.kind());
+ DCHECK_EQ(PropertyKind::kAccessor, property_details_.kind());
return descriptor_number().as_int();
}
@@ -1133,8 +1139,8 @@ void LookupIterator::WriteDataValueToWasmObject(Handle<Object> value) {
} else {
// WasmArrays don't have writable properties.
DCHECK(holder->IsWasmStruct());
- Handle<WasmStruct> holder = GetHolder<WasmStruct>();
- WasmStruct::SetField(isolate_, holder, property_details_.field_index(),
+ Handle<WasmStruct> wasm_holder = GetHolder<WasmStruct>();
+ WasmStruct::SetField(isolate_, wasm_holder, property_details_.field_index(),
value);
}
}
@@ -1237,9 +1243,9 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
property_details_ = cell.property_details();
has_property_ = true;
switch (property_details_.kind()) {
- case v8::internal::kData:
+ case v8::internal::PropertyKind::kData:
return DATA;
- case v8::internal::kAccessor:
+ case v8::internal::PropertyKind::kAccessor:
return ACCESSOR;
}
}
@@ -1272,9 +1278,10 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
number_ = index_ < wasm_array.length() ? InternalIndex(index_)
: InternalIndex::NotFound();
wasm::ArrayType* wasm_array_type = wasm_array.type();
- property_details_ = PropertyDetails(
- kData, wasm_array_type->mutability() ? SEALED : FROZEN,
- PropertyCellType::kNoCell);
+ property_details_ =
+ PropertyDetails(PropertyKind::kData,
+ wasm_array_type->mutability() ? SEALED : FROZEN,
+ PropertyCellType::kNoCell);
} else {
DCHECK(holder.IsWasmStruct(isolate_));
@@ -1320,9 +1327,9 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
}
has_property_ = true;
switch (property_details_.kind()) {
- case v8::internal::kData:
+ case v8::internal::PropertyKind::kData:
return DATA;
- case v8::internal::kAccessor:
+ case v8::internal::PropertyKind::kAccessor:
return ACCESSOR;
}
@@ -1538,7 +1545,7 @@ base::Optional<PropertyCell> ConcurrentLookupIterator::TryGetPropertyCell(
kRelaxedLoad);
if (!cell.has_value()) return {};
- if (cell->property_details(kAcquireLoad).kind() == kAccessor) {
+ if (cell->property_details(kAcquireLoad).kind() == PropertyKind::kAccessor) {
Object maybe_accessor_pair = cell->value(kAcquireLoad);
if (!maybe_accessor_pair.IsAccessorPair()) return {};
@@ -1552,11 +1559,12 @@ base::Optional<PropertyCell> ConcurrentLookupIterator::TryGetPropertyCell(
isolate, handle(*maybe_cached_property_name, local_isolate),
kRelaxedLoad);
if (!cell.has_value()) return {};
- if (cell->property_details(kAcquireLoad).kind() != kData) return {};
+ if (cell->property_details(kAcquireLoad).kind() != PropertyKind::kData)
+ return {};
}
DCHECK(cell.has_value());
- DCHECK_EQ(cell->property_details(kAcquireLoad).kind(), kData);
+ DCHECK_EQ(cell->property_details(kAcquireLoad).kind(), PropertyKind::kData);
return cell;
}
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index c8eb400424..37d189b67a 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -12,6 +12,7 @@
#include "src/objects/field-type.h"
#include "src/objects/instance-type-inl.h"
#include "src/objects/js-function-inl.h"
+#include "src/objects/map-updater.h"
#include "src/objects/map.h"
#include "src/objects/objects-inl.h"
#include "src/objects/property.h"
@@ -654,7 +655,7 @@ bool Map::CanBeDeprecated() const {
for (InternalIndex i : IterateOwnDescriptors()) {
PropertyDetails details = instance_descriptors(kRelaxedLoad).GetDetails(i);
if (details.representation().MightCauseMapDeprecation()) return true;
- if (details.kind() == kData &&
+ if (details.kind() == PropertyKind::kData &&
details.location() == PropertyLocation::kDescriptor) {
return true;
}
@@ -666,7 +667,7 @@ void Map::NotifyLeafMapLayoutChange(Isolate* isolate) {
if (is_stable()) {
mark_unstable();
dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kPrototypeCheckGroup);
+ isolate, DependentCode::kPrototypeCheckGroup);
}
}
@@ -790,7 +791,8 @@ ACCESSORS_CHECKED(Map, native_context_or_null, Object,
#if V8_ENABLE_WEBASSEMBLY
ACCESSORS_CHECKED(Map, wasm_type_info, WasmTypeInfo,
kConstructorOrBackPointerOrNativeContextOffset,
- IsWasmStructMap() || IsWasmArrayMap())
+ IsWasmStructMap() || IsWasmArrayMap() ||
+ IsWasmInternalFunctionMap())
#endif // V8_ENABLE_WEBASSEMBLY
bool Map::IsPrototypeValidityCellValid() const {
@@ -856,7 +858,7 @@ void Map::InobjectSlackTrackingStep(Isolate* isolate) {
int counter = construction_counter();
set_construction_counter(counter - 1);
if (counter == kSlackTrackingCounterEnd) {
- CompleteInobjectSlackTracking(isolate);
+ MapUpdater::CompleteInobjectSlackTracking(isolate, *this);
}
}
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index 3bfd3922a3..6d8b1cf482 100644
--- a/deps/v8/src/objects/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -186,7 +186,7 @@ Handle<Map> MapUpdater::ReconfigureToDataField(InternalIndex descriptor,
isolate_->map_updater_access());
modified_descriptor_ = descriptor;
- new_kind_ = kData;
+ new_kind_ = PropertyKind::kData;
new_attributes_ = attributes;
new_location_ = PropertyLocation::kField;
@@ -420,21 +420,50 @@ MapUpdater::State MapUpdater::Normalize(const char* reason) {
return state_; // Done.
}
-void MapUpdater::ShrinkInstanceSize(base::SharedMutex* map_updater_access,
- Map map, int slack) {
+// static
+void MapUpdater::CompleteInobjectSlackTracking(Isolate* isolate,
+ Map initial_map) {
+ DisallowGarbageCollection no_gc;
+ // Has to be an initial map.
+ DCHECK(initial_map.GetBackPointer().IsUndefined(isolate));
+
+ const int slack = initial_map.ComputeMinObjectSlack(isolate);
DCHECK_GE(slack, 0);
+
+ TransitionsAccessor transitions(isolate, initial_map, &no_gc);
+ TransitionsAccessor::TraverseCallback callback;
+ if (slack != 0) {
+ // Resize the initial map and all maps in its transition tree.
+ callback = [slack](Map map) {
#ifdef DEBUG
- int old_visitor_id = Map::GetVisitorId(map);
- int new_unused = map.UnusedPropertyFields() - slack;
+ int old_visitor_id = Map::GetVisitorId(map);
+ int new_unused = map.UnusedPropertyFields() - slack;
#endif
+ map.set_instance_size(map.InstanceSizeFromSlack(slack));
+ map.set_construction_counter(Map::kNoSlackTracking);
+ DCHECK_EQ(old_visitor_id, Map::GetVisitorId(map));
+ DCHECK_EQ(new_unused, map.UnusedPropertyFields());
+ };
+ } else {
+ // Stop slack tracking for this map.
+ callback = [](Map map) {
+ map.set_construction_counter(Map::kNoSlackTracking);
+ };
+ }
{
- base::SharedMutexGuard<base::kExclusive> mutex_guard(map_updater_access);
- map.set_instance_size(map.InstanceSizeFromSlack(slack));
+ // The map_updater_access lock is taken here to guarantee atomicity of all
+ // related map changes (instead of guaranteeing only atomicity of each
+ // single map change). This is needed e.g. by InstancesNeedsRewriting,
+ // which expects certain relations between maps to hold.
+ //
+ // Note: Avoid locking the full_transition_array_access lock inside this
+ // call to TraverseTransitionTree to prevent dependencies between the two
+ // locks.
+ base::SharedMutexGuard<base::kExclusive> mutex_guard(
+ isolate->map_updater_access());
+ transitions.TraverseTransitionTree(callback);
}
- map.set_construction_counter(Map::kNoSlackTracking);
- DCHECK_EQ(old_visitor_id, Map::GetVisitorId(map));
- DCHECK_EQ(new_unused, map.UnusedPropertyFields());
}
MapUpdater::State MapUpdater::TryReconfigureToDataFieldInplace() {
@@ -588,8 +617,8 @@ MapUpdater::State MapUpdater::FindRootMap() {
return Normalize("Normalize_RootModification4");
}
- DCHECK_EQ(kData, old_details.kind());
- DCHECK_EQ(kData, new_kind_);
+ DCHECK_EQ(PropertyKind::kData, old_details.kind());
+ DCHECK_EQ(PropertyKind::kData, new_kind_);
DCHECK_EQ(PropertyLocation::kField, new_location_);
// Modify root map in-place. The GeneralizeField method is a no-op
@@ -625,7 +654,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
DCHECK_EQ(old_details.kind(), tmp_details.kind());
DCHECK_EQ(old_details.attributes(), tmp_details.attributes());
- if (old_details.kind() == kAccessor &&
+ if (old_details.kind() == PropertyKind::kAccessor &&
!EqualImmutableValues(GetValue(i),
tmp_descriptors->GetStrongValue(i))) {
// TODO(ishell): mutable accessors are not implemented yet.
@@ -723,7 +752,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
DCHECK_EQ(old_details.kind(), tmp_details.kind());
DCHECK_EQ(old_details.attributes(), tmp_details.attributes());
#endif
- if (old_details.kind() == kAccessor &&
+ if (old_details.kind() == PropertyKind::kAccessor &&
!EqualImmutableValues(GetValue(i),
tmp_descriptors->GetStrongValue(i))) {
return Normalize("Normalize_Incompatible");
@@ -826,7 +855,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
MaybeObjectHandle wrapped_type(
Map::WrapFieldType(isolate_, next_field_type));
Descriptor d;
- if (next_kind == kData) {
+ if (next_kind == PropertyKind::kData) {
d = Descriptor::DataField(key, current_offset, next_attributes,
next_constness, next_representation,
wrapped_type);
@@ -841,7 +870,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
DCHECK_EQ(PropertyConstness::kConst, next_constness);
Handle<Object> value(GetValue(i), isolate_);
- DCHECK_EQ(kAccessor, next_kind);
+ DCHECK_EQ(PropertyKind::kAccessor, next_kind);
Descriptor d = Descriptor::AccessorConstant(key, value, next_attributes);
new_descriptors->Set(i, &d);
}
@@ -859,7 +888,6 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
PropertyLocation next_location = old_details.location();
Representation next_representation = old_details.representation();
- Descriptor d;
if (next_location == PropertyLocation::kField) {
Handle<FieldType> next_field_type =
GetOrComputeFieldType(i, old_details.location(), next_representation);
@@ -874,7 +902,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
MaybeObjectHandle wrapped_type(
Map::WrapFieldType(isolate_, next_field_type));
Descriptor d;
- if (next_kind == kData) {
+ if (next_kind == PropertyKind::kData) {
d = Descriptor::DataField(key, current_offset, next_attributes,
next_constness, next_representation,
wrapped_type);
@@ -889,10 +917,11 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
DCHECK_EQ(PropertyConstness::kConst, next_constness);
Handle<Object> value(GetValue(i), isolate_);
- if (next_kind == kData) {
+ Descriptor d;
+ if (next_kind == PropertyKind::kData) {
d = Descriptor::DataConstant(key, value, next_attributes);
} else {
- DCHECK_EQ(kAccessor, next_kind);
+ DCHECK_EQ(PropertyKind::kAccessor, next_kind);
d = Descriptor::AccessorConstant(key, value, next_attributes);
}
new_descriptors->Set(i, &d);
@@ -1056,7 +1085,8 @@ void PrintReconfiguration(Isolate* isolate, Handle<Map> map, FILE* file,
} else {
os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
}
- os << ": " << (kind == kData ? "kData" : "ACCESSORS") << ", attrs: ";
+ os << ": " << (kind == PropertyKind::kData ? "kData" : "ACCESSORS")
+ << ", attrs: ";
os << attributes << " [";
JavaScriptFrame::PrintTop(isolate, file, false, true);
os << "]\n";
@@ -1071,7 +1101,7 @@ Handle<Map> MapUpdater::ReconfigureExistingProperty(
PropertyConstness constness) {
// Dictionaries have to be reconfigured in-place.
DCHECK(!map->is_dictionary_map());
- DCHECK_EQ(kData, kind); // Only kData case is supported so far.
+ DCHECK_EQ(PropertyKind::kData, kind); // Only kData case is supported so far.
if (!map->GetBackPointer().IsMap()) {
// There is no benefit from reconstructing transition tree for maps without
@@ -1101,7 +1131,7 @@ void MapUpdater::UpdateFieldType(Isolate* isolate, Handle<Map> map,
PropertyDetails details =
map->instance_descriptors(isolate).GetDetails(descriptor);
if (details.location() != PropertyLocation::kField) return;
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
if (new_constness != details.constness() && map->is_prototype_map()) {
JSObject::InvalidatePrototypeChains(*map);
@@ -1121,7 +1151,7 @@ void MapUpdater::UpdateFieldType(Isolate* isolate, Handle<Map> map,
backlog.push(target);
}
DescriptorArray descriptors = current.instance_descriptors(isolate);
- PropertyDetails details = descriptors.GetDetails(descriptor);
+ details = descriptors.GetDetails(descriptor);
// It is allowed to change representation here only from None
// to something or from Smi or HeapObject to Tagged.
@@ -1191,21 +1221,20 @@ void MapUpdater::GeneralizeField(Isolate* isolate, Handle<Map> map,
UpdateFieldType(isolate, field_owner, modify_index, name, new_constness,
new_representation, wrapped_type);
+ DependentCode::DependencyGroups dep_groups;
if (new_constness != old_constness) {
- field_owner->dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kFieldConstGroup);
+ dep_groups |= DependentCode::kFieldConstGroup;
}
-
if (!new_field_type->Equals(*old_field_type)) {
- field_owner->dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kFieldTypeGroup);
+ dep_groups |= DependentCode::kFieldTypeGroup;
}
-
if (!new_representation.Equals(old_representation)) {
- field_owner->dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kFieldRepresentationGroup);
+ dep_groups |= DependentCode::kFieldRepresentationGroup;
}
+ field_owner->dependent_code().DeoptimizeDependentCodeGroup(isolate,
+ dep_groups);
+
if (FLAG_trace_generalization) {
PrintGeneralization(
isolate, map, stdout, "field type generalization", modify_index,
diff --git a/deps/v8/src/objects/map-updater.h b/deps/v8/src/objects/map-updater.h
index 6f022e1d39..6b241a8602 100644
--- a/deps/v8/src/objects/map-updater.h
+++ b/deps/v8/src/objects/map-updater.h
@@ -86,8 +86,9 @@ class V8_EXPORT_PRIVATE MapUpdater {
Representation new_representation,
Handle<FieldType> new_field_type);
- static void ShrinkInstanceSize(base::SharedMutex* map_updater_access, Map map,
- int slack);
+ // Completes inobject slack tracking for the transition tree starting at the
+ // initial map.
+ static void CompleteInobjectSlackTracking(Isolate* isolate, Map initial_map);
private:
enum State {
@@ -227,7 +228,7 @@ class V8_EXPORT_PRIVATE MapUpdater {
// If |modified_descriptor_.is_found()|, then the fields below form
// an "update" of the |old_map_|'s descriptors.
InternalIndex modified_descriptor_ = InternalIndex::NotFound();
- PropertyKind new_kind_ = kData;
+ PropertyKind new_kind_ = PropertyKind::kData;
PropertyAttributes new_attributes_ = NONE;
PropertyConstness new_constness_ = PropertyConstness::kMutable;
PropertyLocation new_location_ = PropertyLocation::kField;
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index e2ef2f8ce5..47a0e36ee3 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -249,7 +249,6 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_DATE_TYPE:
case JS_ERROR_TYPE:
- case JS_FINALIZATION_REGISTRY_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_ITERATOR_PROTOTYPE_TYPE:
case JS_MAP_ITERATOR_PROTOTYPE_TYPE:
@@ -324,6 +323,9 @@ VisitorId Map::GetVisitorId(Map map) {
case WEAK_CELL_TYPE:
return kVisitWeakCell;
+ case JS_FINALIZATION_REGISTRY_TYPE:
+ return kVisitJSFinalizationRegistry;
+
case FILLER_TYPE:
case FOREIGN_TYPE:
case HEAP_NUMBER_TYPE:
@@ -366,6 +368,8 @@ VisitorId Map::GetVisitorId(Map map) {
return kVisitWasmStruct;
case WASM_TYPE_INFO_TYPE:
return kVisitWasmTypeInfo;
+ case WASM_INTERNAL_FUNCTION_TYPE:
+ return kVisitWasmInternalFunction;
case WASM_JS_FUNCTION_DATA_TYPE:
return kVisitWasmJSFunctionData;
case WASM_API_FUNCTION_REF_TYPE:
@@ -374,6 +378,8 @@ VisitorId Map::GetVisitorId(Map map) {
return kVisitWasmExportedFunctionData;
case WASM_CAPI_FUNCTION_DATA_TYPE:
return kVisitWasmCapiFunctionData;
+ case WASM_SUSPENDER_OBJECT_TYPE:
+ return kVisitWasmSuspenderObject;
#endif // V8_ENABLE_WEBASSEMBLY
#define MAKE_TQ_CASE(TYPE, Name) \
@@ -560,7 +566,7 @@ void Map::DeprecateTransitionTree(Isolate* isolate) {
LOG(isolate, MapEvent("Deprecate", handle(*this, isolate), Handle<Map>()));
}
dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kTransitionGroup);
+ isolate, DependentCode::kTransitionGroup);
NotifyLeafMapLayoutChange(isolate);
}
@@ -596,13 +602,14 @@ void Map::ReplaceDescriptors(Isolate* isolate,
Map Map::FindRootMap(Isolate* isolate) const {
DisallowGarbageCollection no_gc;
Map result = *this;
+ PtrComprCageBase cage_base(isolate);
while (true) {
- Object back = result.GetBackPointer(isolate);
+ Object back = result.GetBackPointer(cage_base);
if (back.IsUndefined(isolate)) {
// Initial map must not contain descriptors in the descriptors array
// that do not belong to the map.
DCHECK_LE(result.NumberOfOwnDescriptors(),
- result.instance_descriptors(isolate, kRelaxedLoad)
+ result.instance_descriptors(cage_base, kRelaxedLoad)
.number_of_descriptors());
return result;
}
@@ -650,7 +657,7 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
for (InternalIndex i : old_map.IterateOwnDescriptors()) {
PropertyDetails old_details = old_descriptors.GetDetails(i);
if (old_details.location() == PropertyLocation::kField &&
- old_details.kind() == kData) {
+ old_details.kind() == PropertyKind::kData) {
FieldType old_type = old_descriptors.GetFieldType(i);
if (Map::FieldTypeIsCleared(old_details.representation(), old_type)) {
return Map();
@@ -724,14 +731,14 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
return Map();
}
if (new_details.location() == PropertyLocation::kField) {
- if (new_details.kind() == kData) {
+ if (new_details.kind() == PropertyKind::kData) {
FieldType new_type = new_descriptors.GetFieldType(i);
// Cleared field types need special treatment. They represent lost
// knowledge, so we must first generalize the new_type to "Any".
if (FieldTypeIsCleared(new_details.representation(), new_type)) {
return Map();
}
- DCHECK_EQ(kData, old_details.kind());
+ DCHECK_EQ(PropertyKind::kData, old_details.kind());
DCHECK_EQ(PropertyLocation::kField, old_details.location());
FieldType old_type = old_descriptors.GetFieldType(i);
if (FieldTypeIsCleared(old_details.representation(), old_type) ||
@@ -739,7 +746,7 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map,
return Map();
}
} else {
- DCHECK_EQ(kAccessor, new_details.kind());
+ DCHECK_EQ(PropertyKind::kAccessor, new_details.kind());
#ifdef DEBUG
FieldType new_type = new_descriptors.GetFieldType(i);
DCHECK(new_type.IsAny());
@@ -1728,19 +1735,19 @@ bool CanHoldValue(DescriptorArray descriptors, InternalIndex descriptor,
PropertyConstness constness, Object value) {
PropertyDetails details = descriptors.GetDetails(descriptor);
if (details.location() == PropertyLocation::kField) {
- if (details.kind() == kData) {
+ if (details.kind() == PropertyKind::kData) {
return IsGeneralizableTo(constness, details.constness()) &&
value.FitsRepresentation(details.representation()) &&
descriptors.GetFieldType(descriptor).NowContains(value);
} else {
- DCHECK_EQ(kAccessor, details.kind());
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
return false;
}
} else {
DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
DCHECK_EQ(PropertyConstness::kConst, details.constness());
- DCHECK_EQ(kAccessor, details.kind());
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
return false;
}
UNREACHABLE();
@@ -1796,8 +1803,9 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
// Migrate to the newest map before storing the property.
map = Update(isolate, map);
- Map maybe_transition = TransitionsAccessor(isolate, map)
- .SearchTransition(*name, kData, attributes);
+ Map maybe_transition =
+ TransitionsAccessor(isolate, map)
+ .SearchTransition(*name, PropertyKind::kData, attributes);
if (!maybe_transition.is_null()) {
Handle<Map> transition(maybe_transition, isolate);
InternalIndex descriptor = transition->LastAdded();
@@ -1851,7 +1859,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
// Deoptimize all code that embeds the previous initial map.
initial_map->dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kInitialMapChangedGroup);
+ isolate, DependentCode::kInitialMapChangedGroup);
if (!result->EquivalentToForNormalization(*map,
CLEAR_INOBJECT_PROPERTIES)) {
result =
@@ -1891,18 +1899,21 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
? KEEP_INOBJECT_PROPERTIES
: CLEAR_INOBJECT_PROPERTIES;
- Map maybe_transition = TransitionsAccessor(isolate, map)
- .SearchTransition(*name, kAccessor, attributes);
+ Map maybe_transition =
+ TransitionsAccessor(isolate, map)
+ .SearchTransition(*name, PropertyKind::kAccessor, attributes);
if (!maybe_transition.is_null()) {
Handle<Map> transition(maybe_transition, isolate);
DescriptorArray descriptors = transition->instance_descriptors(isolate);
- InternalIndex descriptor = transition->LastAdded();
- DCHECK(descriptors.GetKey(descriptor).Equals(*name));
+ InternalIndex last_descriptor = transition->LastAdded();
+ DCHECK(descriptors.GetKey(last_descriptor).Equals(*name));
- DCHECK_EQ(kAccessor, descriptors.GetDetails(descriptor).kind());
- DCHECK_EQ(attributes, descriptors.GetDetails(descriptor).attributes());
+ DCHECK_EQ(PropertyKind::kAccessor,
+ descriptors.GetDetails(last_descriptor).kind());
+ DCHECK_EQ(attributes, descriptors.GetDetails(last_descriptor).attributes());
- Handle<Object> maybe_pair(descriptors.GetStrongValue(descriptor), isolate);
+ Handle<Object> maybe_pair(descriptors.GetStrongValue(last_descriptor),
+ isolate);
if (!maybe_pair->IsAccessorPair()) {
return Map::Normalize(isolate, map, mode,
"TransitionToAccessorFromNonPair");
@@ -1924,7 +1935,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonLast");
}
PropertyDetails old_details = old_descriptors.GetDetails(descriptor);
- if (old_details.kind() != kAccessor) {
+ if (old_details.kind() != PropertyKind::kAccessor) {
return Map::Normalize(isolate, map, mode,
"AccessorsOverwritingNonAccessors");
}
@@ -2145,28 +2156,6 @@ int Map::ComputeMinObjectSlack(Isolate* isolate) {
return slack;
}
-void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
- DisallowGarbageCollection no_gc;
- // Has to be an initial map.
- DCHECK(GetBackPointer().IsUndefined(isolate));
-
- int slack = ComputeMinObjectSlack(isolate);
- TransitionsAccessor transitions(isolate, *this, &no_gc);
- TransitionsAccessor::TraverseCallback callback;
- if (slack != 0) {
- // Resize the initial map and all maps in its transition tree.
- callback = [&](Map map) {
- MapUpdater::ShrinkInstanceSize(isolate->map_updater_access(), map, slack);
- };
- } else {
- callback = [](Map map) {
- // Stop slack tracking for this map.
- map.set_construction_counter(Map::kNoSlackTracking);
- };
- }
- transitions.TraverseTransitionTree(callback);
-}
-
void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors) {
set_instance_descriptors(descriptors, kReleaseStore);
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 4e1991579e..fe2cdf150a 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -46,6 +46,7 @@ enum InstanceType : uint16_t;
V(JSApiObject) \
V(JSArrayBuffer) \
V(JSDataView) \
+ V(JSFinalizationRegistry) \
V(JSFunction) \
V(JSObject) \
V(JSObjectFast) \
@@ -68,15 +69,17 @@ enum InstanceType : uint16_t;
V(Symbol) \
V(SyntheticModule) \
V(TransitionArray) \
+ IF_WASM(V, WasmApiFunctionRef) \
IF_WASM(V, WasmArray) \
IF_WASM(V, WasmCapiFunctionData) \
IF_WASM(V, WasmExportedFunctionData) \
IF_WASM(V, WasmFunctionData) \
IF_WASM(V, WasmIndirectFunctionTable) \
IF_WASM(V, WasmInstanceObject) \
+ IF_WASM(V, WasmInternalFunction) \
IF_WASM(V, WasmJSFunctionData) \
- IF_WASM(V, WasmApiFunctionRef) \
IF_WASM(V, WasmStruct) \
+ IF_WASM(V, WasmSuspenderObject) \
IF_WASM(V, WasmTypeInfo) \
V(WeakCell)
@@ -353,10 +356,6 @@ class Map : public TorqueGeneratedMap<Map, HeapObject> {
int ComputeMinObjectSlack(Isolate* isolate);
inline int InstanceSizeFromSlack(int slack) const;
- // Completes inobject slack tracking for the transition tree starting at this
- // initial map.
- V8_EXPORT_PRIVATE void CompleteInobjectSlackTracking(Isolate* isolate);
-
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
// property is set to a value that is not a JSObject, the prototype
diff --git a/deps/v8/src/objects/map.tq b/deps/v8/src/objects/map.tq
index 27b1197f77..a8b367ff82 100644
--- a/deps/v8/src/objects/map.tq
+++ b/deps/v8/src/objects/map.tq
@@ -73,8 +73,8 @@ extern class Map extends HeapObject {
instance_descriptors: DescriptorArray;
dependent_code: DependentCode;
prototype_validity_cell: Smi|Cell;
- weak transitions_or_prototype_info: Map|Weak<Map>|TransitionArray|
- PrototypeInfo|Smi;
+ transitions_or_prototype_info: Map|Weak<Map>|TransitionArray|PrototypeInfo|
+ Smi;
}
@export
diff --git a/deps/v8/src/objects/microtask.h b/deps/v8/src/objects/microtask.h
index 8b1446373c..bc0cd1b95e 100644
--- a/deps/v8/src/objects/microtask.h
+++ b/deps/v8/src/objects/microtask.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+class StructBodyDescriptor;
+
#include "torque-generated/src/objects/microtask-tq.inc"
// Abstract base class for all microtasks that can be scheduled on the
@@ -30,6 +32,8 @@ class Microtask : public TorqueGeneratedMicrotask<Microtask, Struct> {
class CallbackTask
: public TorqueGeneratedCallbackTask<CallbackTask, Microtask> {
public:
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(CallbackTask)
};
@@ -43,6 +47,8 @@ class CallableTask
DECL_VERIFIER(CallableTask)
void BriefPrintDetails(std::ostream& os);
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(CallableTask)
};
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 70d5009fc6..5e9349d401 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -247,15 +247,7 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
PrintStatusMessage(*module, "Evaluating module ");
#endif // DEBUG
STACK_CHECK(isolate, MaybeHandle<Object>());
- if (FLAG_harmony_top_level_await) {
- return Module::EvaluateMaybeAsync(isolate, module);
- } else {
- return Module::InnerEvaluate(isolate, module);
- }
-}
-MaybeHandle<Object> Module::EvaluateMaybeAsync(Isolate* isolate,
- Handle<Module> module) {
// In the event of errored evaluation, return a rejected promise.
if (module->status() == kErrored) {
// If we have a top level capability we assume it has already been
@@ -292,32 +284,6 @@ MaybeHandle<Object> Module::EvaluateMaybeAsync(Isolate* isolate,
DCHECK(module->top_level_capability().IsUndefined());
if (module->IsSourceTextModule()) {
- return SourceTextModule::EvaluateMaybeAsync(
- isolate, Handle<SourceTextModule>::cast(module));
- } else {
- return SyntheticModule::Evaluate(isolate,
- Handle<SyntheticModule>::cast(module));
- }
-}
-
-MaybeHandle<Object> Module::InnerEvaluate(Isolate* isolate,
- Handle<Module> module) {
- if (module->status() == kErrored) {
- isolate->Throw(module->GetException());
- return MaybeHandle<Object>();
- } else if (module->status() == kEvaluated) {
- return isolate->factory()->undefined_value();
- }
-
- // InnerEvaluate can be called both to evaluate top level modules without
- // the harmony_top_level_await flag and recursively to evaluate
- // SyntheticModules in the dependency graphs of SourceTextModules.
- //
- // However, SyntheticModules transition directly to 'Evaluated,' so we should
- // never see an 'Evaluating' module at this point.
- CHECK_EQ(module->status(), kLinked);
-
- if (module->IsSourceTextModule()) {
return SourceTextModule::Evaluate(isolate,
Handle<SourceTextModule>::cast(module));
} else {
@@ -378,11 +344,13 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
if (name->AsArrayIndex(&index)) {
JSObject::SetNormalizedElement(
ns, index, Accessors::MakeModuleNamespaceEntryInfo(isolate, name),
- PropertyDetails(kAccessor, attr, PropertyCellType::kMutable));
+ PropertyDetails(PropertyKind::kAccessor, attr,
+ PropertyCellType::kMutable));
} else {
JSObject::SetNormalizedProperty(
ns, name, Accessors::MakeModuleNamespaceEntryInfo(isolate, name),
- PropertyDetails(kAccessor, attr, PropertyCellType::kMutable));
+ PropertyDetails(PropertyKind::kAccessor, attr,
+ PropertyCellType::kMutable));
}
}
JSObject::PreventExtensions(ns, kThrowOnError).ToChecked();
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index af603c6d45..0930dfdf91 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -116,12 +116,6 @@ class Module : public TorqueGeneratedModule<Module, HeapObject> {
ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index,
Zone* zone);
- static V8_WARN_UNUSED_RESULT MaybeHandle<Object> EvaluateMaybeAsync(
- Isolate* isolate, Handle<Module> module);
-
- static V8_WARN_UNUSED_RESULT MaybeHandle<Object> InnerEvaluate(
- Isolate* isolate, Handle<Module> module);
-
// Set module's status back to kUnlinked and reset other internal state.
// This is used when instantiation fails.
static void Reset(Isolate* isolate, Handle<Module> module);
@@ -181,6 +175,9 @@ class ScriptOrModule
: public TorqueGeneratedScriptOrModule<ScriptOrModule, Struct> {
public:
DECL_PRINTER(ScriptOrModule)
+
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(ScriptOrModule)
};
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index 40df12d83d..b9d8733043 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -96,11 +96,13 @@ class ZoneForwardList;
V(CompilationCacheTable) \
V(ConsString) \
V(Constructor) \
+ V(Context) \
V(CoverageInfo) \
V(ClosureFeedbackCellArray) \
V(DataHandler) \
V(DeoptimizationData) \
V(DependentCode) \
+ V(DescriptorArray) \
V(EmbedderDataArray) \
V(EphemeronHashTable) \
V(ExternalOneByteString) \
@@ -187,6 +189,7 @@ class ZoneForwardList;
V(NumberWrapper) \
V(ObjectHashSet) \
V(ObjectHashTable) \
+ V(Oddball) \
V(OrderedHashMap) \
V(OrderedHashSet) \
V(OrderedNameDictionary) \
@@ -203,6 +206,7 @@ class ZoneForwardList;
V(SeqOneByteString) \
V(SeqString) \
V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
V(SimpleNumberDictionary) \
V(SlicedString) \
V(SmallOrderedHashMap) \
@@ -223,8 +227,14 @@ class ZoneForwardList;
V(TemplateList) \
V(ThinString) \
V(TransitionArray) \
+ V(UncompiledData) \
+ V(UncompiledDataWithPreparseData) \
+ V(UncompiledDataWithoutPreparseData) \
+ V(UncompiledDataWithPreparseDataAndJob) \
+ V(UncompiledDataWithoutPreparseDataWithJob) \
V(Undetectable) \
V(UniqueName) \
+ IF_WASM(V, WasmApiFunctionRef) \
IF_WASM(V, WasmArray) \
IF_WASM(V, WasmCapiFunctionData) \
IF_WASM(V, WasmTagObject) \
@@ -232,9 +242,9 @@ class ZoneForwardList;
IF_WASM(V, WasmExportedFunctionData) \
IF_WASM(V, WasmFunctionData) \
IF_WASM(V, WasmGlobalObject) \
+ IF_WASM(V, WasmInternalFunction) \
IF_WASM(V, WasmInstanceObject) \
IF_WASM(V, WasmJSFunctionData) \
- IF_WASM(V, WasmApiFunctionRef) \
IF_WASM(V, WasmMemoryObject) \
IF_WASM(V, WasmModuleObject) \
IF_WASM(V, WasmObject) \
@@ -242,6 +252,7 @@ class ZoneForwardList;
IF_WASM(V, WasmTypeInfo) \
IF_WASM(V, WasmTableObject) \
IF_WASM(V, WasmValueObject) \
+ IF_WASM(V, WasmSuspenderObject) \
V(WeakFixedArray) \
V(WeakArrayList) \
V(WeakCell) \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 5e2373fbb2..23c15fc4b3 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -8,20 +8,28 @@
#include <algorithm>
#include "src/codegen/reloc-info.h"
+#include "src/ic/handler-configuration.h"
#include "src/objects/arguments-inl.h"
+#include "src/objects/bigint.h"
#include "src/objects/cell.h"
#include "src/objects/data-handler.h"
+#include "src/objects/fixed-array.h"
#include "src/objects/foreign-inl.h"
+#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table.h"
+#include "src/objects/heap-number.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-weak-refs.h"
+#include "src/objects/literal-objects.h"
#include "src/objects/megadom-handler-inl.h"
#include "src/objects/objects-body-descriptors.h"
-#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/property-descriptor-object.h"
#include "src/objects/source-text-module.h"
+#include "src/objects/stack-frame-info.h"
#include "src/objects/swiss-name-dictionary-inl.h"
#include "src/objects/synthetic-module.h"
+#include "src/objects/template-objects.h"
#include "src/objects/torque-defined-classes-inl.h"
#include "src/objects/transitions.h"
#include "src/objects/turbofan-types-inl.h"
@@ -156,6 +164,34 @@ void BodyDescriptorBase::IterateCustomWeakPointer(HeapObject obj, int offset,
v->VisitCustomWeakPointer(obj, obj.RawField(offset));
}
+class HeapNumber::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {}
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return HeapNumber::kSize;
+ }
+};
+
+// This is a descriptor for one/two pointer fillers.
+class FreeSpaceFillerBodyDescriptor final : public DataOnlyBodyDescriptor {
+ public:
+ static inline int SizeOf(Map map, HeapObject raw_object) {
+ return map.instance_size();
+ }
+};
+
+class FreeSpace::BodyDescriptor final : public DataOnlyBodyDescriptor {
+ public:
+ static inline int SizeOf(Map map, HeapObject raw_object) {
+ return FreeSpace::unchecked_cast(raw_object).Size();
+ }
+};
+
class JSObject::BodyDescriptor final : public BodyDescriptorBase {
public:
static const int kStartOffset = JSReceiver::kPropertiesOrHashOffset;
@@ -562,7 +598,7 @@ class PrototypeInfo::BodyDescriptor final : public BodyDescriptorBase {
ObjectVisitor* v) {
IteratePointers(obj, HeapObject::kHeaderSize, kObjectCreateMapOffset, v);
IterateMaybeWeakPointer(obj, kObjectCreateMapOffset, v);
- IteratePointers(obj, kObjectCreateMapOffset + kTaggedSize, object_size, v);
+ STATIC_ASSERT(kObjectCreateMapOffset + kTaggedSize == kHeaderSize);
}
static inline int SizeOf(Map map, HeapObject obj) {
@@ -624,24 +660,6 @@ class WasmTypeInfo::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
};
-class WasmJSFunctionData::BodyDescriptor final : public BodyDescriptorBase {
- public:
- static bool IsValidSlot(Map map, HeapObject obj, int offset) {
- UNREACHABLE();
- }
-
- template <typename ObjectVisitor>
- static inline void IterateBody(Map map, HeapObject obj, int object_size,
- ObjectVisitor* v) {
- Foreign::BodyDescriptor::IterateBody<ObjectVisitor>(map, obj, object_size,
- v);
- IteratePointers(obj, WasmFunctionData::kStartOfStrongFieldsOffset,
- kEndOfStrongFieldsOffset, v);
- }
-
- static inline int SizeOf(Map map, HeapObject object) { return kSize; }
-};
-
class WasmApiFunctionRef::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
@@ -651,8 +669,6 @@ class WasmApiFunctionRef::BodyDescriptor final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- Foreign::BodyDescriptor::IterateBody<ObjectVisitor>(map, obj, object_size,
- v);
IteratePointers(obj, kStartOfStrongFieldsOffset, kEndOfStrongFieldsOffset,
v);
}
@@ -660,26 +676,7 @@ class WasmApiFunctionRef::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
};
-class WasmExportedFunctionData::BodyDescriptor final
- : public BodyDescriptorBase {
- public:
- static bool IsValidSlot(Map map, HeapObject obj, int offset) {
- UNREACHABLE();
- }
-
- template <typename ObjectVisitor>
- static inline void IterateBody(Map map, HeapObject obj, int object_size,
- ObjectVisitor* v) {
- Foreign::BodyDescriptor::IterateBody<ObjectVisitor>(map, obj, object_size,
- v);
- IteratePointers(obj, WasmFunctionData::kStartOfStrongFieldsOffset,
- kEndOfStrongFieldsOffset, v);
- }
-
- static inline int SizeOf(Map map, HeapObject object) { return kSize; }
-};
-
-class WasmCapiFunctionData::BodyDescriptor final : public BodyDescriptorBase {
+class WasmInternalFunction::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
UNREACHABLE();
@@ -690,8 +687,8 @@ class WasmCapiFunctionData::BodyDescriptor final : public BodyDescriptorBase {
ObjectVisitor* v) {
Foreign::BodyDescriptor::IterateBody<ObjectVisitor>(map, obj, object_size,
v);
- IteratePointers(obj, WasmFunctionData::kStartOfStrongFieldsOffset,
- kEndOfStrongFieldsOffset, v);
+ IteratePointers(obj, kStartOfStrongFieldsOffset, kEndOfStrongFieldsOffset,
+ v);
}
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
@@ -994,38 +991,41 @@ class EmbedderDataArray::BodyDescriptor final : public BodyDescriptorBase {
}
};
-template <typename Op, typename ReturnType, typename T1, typename T2,
- typename T3, typename T4>
-ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
+template <typename Op, typename... Args>
+auto BodyDescriptorApply(InstanceType type, Args&&... args) {
+#define CALL_APPLY(ClassName) \
+ Op::template apply<ClassName::BodyDescriptor>(std::forward<Args>(args)...)
+
if (type < FIRST_NONSTRING_TYPE) {
switch (type & kStringRepresentationMask) {
case kSeqStringTag:
- return ReturnType();
+ if ((type & kStringEncodingMask) == kOneByteStringTag) {
+ return CALL_APPLY(SeqOneByteString);
+ } else {
+ return CALL_APPLY(SeqTwoByteString);
+ }
case kConsStringTag:
- return Op::template apply<ConsString::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(ConsString);
case kThinStringTag:
- return Op::template apply<ThinString::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(ThinString);
case kSlicedStringTag:
- return Op::template apply<SlicedString::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(SlicedString);
case kExternalStringTag:
if ((type & kStringEncodingMask) == kOneByteStringTag) {
- return Op::template apply<ExternalOneByteString::BodyDescriptor>(
- p1, p2, p3, p4);
+ return CALL_APPLY(ExternalOneByteString);
} else {
- return Op::template apply<ExternalTwoByteString::BodyDescriptor>(
- p1, p2, p3, p4);
+ return CALL_APPLY(ExternalTwoByteString);
}
}
UNREACHABLE();
}
if (InstanceTypeChecker::IsJSApiObject(type)) {
- return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(JSObject);
}
switch (type) {
case EMBEDDER_DATA_ARRAY_TYPE:
- return Op::template apply<EmbedderDataArray::BodyDescriptor>(p1, p2, p3,
- p4);
+ return CALL_APPLY(EmbedderDataArray);
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE:
case HASH_TABLE_TYPE:
@@ -1037,10 +1037,9 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case NUMBER_DICTIONARY_TYPE:
case SIMPLE_NUMBER_DICTIONARY_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
- return Op::template apply<FixedArray::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(FixedArray);
case EPHEMERON_HASH_TABLE_TYPE:
- return Op::template apply<EphemeronHashTable::BodyDescriptor>(p1, p2, p3,
- p4);
+ return CALL_APPLY(EphemeronHashTable);
case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
case CATCH_CONTEXT_TYPE:
@@ -1050,42 +1049,38 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case MODULE_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
- return Op::template apply<Context::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(Context);
case NATIVE_CONTEXT_TYPE:
- return Op::template apply<NativeContext::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(NativeContext);
case FIXED_DOUBLE_ARRAY_TYPE:
- return ReturnType();
+ return CALL_APPLY(FixedDoubleArray);
case FEEDBACK_METADATA_TYPE:
- return Op::template apply<FeedbackMetadata::BodyDescriptor>(p1, p2, p3,
- p4);
+ return CALL_APPLY(FeedbackMetadata);
case PROPERTY_ARRAY_TYPE:
- return Op::template apply<PropertyArray::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(PropertyArray);
case TRANSITION_ARRAY_TYPE:
- return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3,
- p4);
+ return CALL_APPLY(TransitionArray);
case FEEDBACK_CELL_TYPE:
- return Op::template apply<FeedbackCell::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(FeedbackCell);
case COVERAGE_INFO_TYPE:
- return Op::template apply<CoverageInfo::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(CoverageInfo);
#if V8_ENABLE_WEBASSEMBLY
+ case WASM_API_FUNCTION_REF_TYPE:
+ return CALL_APPLY(WasmApiFunctionRef);
case WASM_ARRAY_TYPE:
- return Op::template apply<WasmArray::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(WasmArray);
case WASM_CAPI_FUNCTION_DATA_TYPE:
- return Op::template apply<WasmCapiFunctionData::BodyDescriptor>(p1, p2,
- p3, p4);
+ return CALL_APPLY(WasmCapiFunctionData);
case WASM_EXPORTED_FUNCTION_DATA_TYPE:
- return Op::template apply<WasmExportedFunctionData::BodyDescriptor>(
- p1, p2, p3, p4);
+ return CALL_APPLY(WasmExportedFunctionData);
+ case WASM_INTERNAL_FUNCTION_TYPE:
+ return CALL_APPLY(WasmInternalFunction);
case WASM_JS_FUNCTION_DATA_TYPE:
- return Op::template apply<WasmJSFunctionData::BodyDescriptor>(p1, p2, p3,
- p4);
- case WASM_API_FUNCTION_REF_TYPE:
- return Op::template apply<WasmApiFunctionRef::BodyDescriptor>(p1, p2, p3,
- p4);
+ return CALL_APPLY(WasmJSFunctionData);
case WASM_STRUCT_TYPE:
- return Op::template apply<WasmStruct::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(WasmStruct);
case WASM_TYPE_INFO_TYPE:
- return Op::template apply<WasmTypeInfo::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(WasmTypeInfo);
#endif // V8_ENABLE_WEBASSEMBLY
case JS_API_OBJECT_TYPE:
case JS_ARGUMENTS_OBJECT_TYPE:
@@ -1162,129 +1157,116 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_SEGMENTS_TYPE:
#endif // V8_INTL_SUPPORT
#if V8_ENABLE_WEBASSEMBLY
- case WASM_TAG_OBJECT_TYPE:
case WASM_GLOBAL_OBJECT_TYPE:
case WASM_MEMORY_OBJECT_TYPE:
case WASM_MODULE_OBJECT_TYPE:
+ case WASM_SUSPENDER_OBJECT_TYPE:
case WASM_TABLE_OBJECT_TYPE:
+ case WASM_TAG_OBJECT_TYPE:
case WASM_VALUE_OBJECT_TYPE:
#endif // V8_ENABLE_WEBASSEMBLY
- return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(JSObject);
#if V8_ENABLE_WEBASSEMBLY
case WASM_INSTANCE_OBJECT_TYPE:
- return Op::template apply<WasmInstanceObject::BodyDescriptor>(p1, p2, p3,
- p4);
+ return CALL_APPLY(WasmInstanceObject);
#endif // V8_ENABLE_WEBASSEMBLY
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
- return Op::template apply<JSWeakCollection::BodyDescriptor>(p1, p2, p3,
- p4);
+ return CALL_APPLY(JSWeakCollection);
case JS_ARRAY_BUFFER_TYPE:
- return Op::template apply<JSArrayBuffer::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(JSArrayBuffer);
case JS_DATA_VIEW_TYPE:
- return Op::template apply<JSDataView::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(JSDataView);
case JS_TYPED_ARRAY_TYPE:
- return Op::template apply<JSTypedArray::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(JSTypedArray);
case WEAK_CELL_TYPE:
- return Op::template apply<WeakCell::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(WeakCell);
case JS_WEAK_REF_TYPE:
- return Op::template apply<JSWeakRef::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(JSWeakRef);
case JS_PROXY_TYPE:
- return Op::template apply<JSProxy::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(JSProxy);
case FOREIGN_TYPE:
- return Op::template apply<Foreign::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(Foreign);
case MAP_TYPE:
- return Op::template apply<Map::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(Map);
case CODE_TYPE:
- return Op::template apply<Code::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(Code);
case CELL_TYPE:
- return Op::template apply<Cell::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(Cell);
case PROPERTY_CELL_TYPE:
- return Op::template apply<PropertyCell::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(PropertyCell);
case SYMBOL_TYPE:
- return Op::template apply<Symbol::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(Symbol);
case BYTECODE_ARRAY_TYPE:
- return Op::template apply<BytecodeArray::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(BytecodeArray);
case SMALL_ORDERED_HASH_SET_TYPE:
- return Op::template apply<
- SmallOrderedHashTable<SmallOrderedHashSet>::BodyDescriptor>(p1, p2,
- p3, p4);
+ return CALL_APPLY(SmallOrderedHashTable<SmallOrderedHashSet>);
case SMALL_ORDERED_HASH_MAP_TYPE:
- return Op::template apply<
- SmallOrderedHashTable<SmallOrderedHashMap>::BodyDescriptor>(p1, p2,
- p3, p4);
+ return CALL_APPLY(SmallOrderedHashTable<SmallOrderedHashMap>);
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
- return Op::template apply<
- SmallOrderedHashTable<SmallOrderedNameDictionary>::BodyDescriptor>(
- p1, p2, p3, p4);
-
+ return CALL_APPLY(SmallOrderedHashTable<SmallOrderedNameDictionary>);
case SWISS_NAME_DICTIONARY_TYPE:
- return Op::template apply<SwissNameDictionary::BodyDescriptor>(p1, p2, p3,
- p4);
-
+ return CALL_APPLY(SwissNameDictionary);
case CODE_DATA_CONTAINER_TYPE:
- return Op::template apply<CodeDataContainer::BodyDescriptor>(p1, p2, p3,
- p4);
+ return CALL_APPLY(CodeDataContainer);
case PREPARSE_DATA_TYPE:
- return Op::template apply<PreparseData::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(PreparseData);
case HEAP_NUMBER_TYPE:
- case FILLER_TYPE:
+ return CALL_APPLY(HeapNumber);
case BYTE_ARRAY_TYPE:
- case FREE_SPACE_TYPE:
+ return CALL_APPLY(BigInt);
case BIGINT_TYPE:
- return ReturnType();
+ return CALL_APPLY(BigInt);
case ALLOCATION_SITE_TYPE:
- return Op::template apply<AllocationSite::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(AllocationSite);
-#define MAKE_STRUCT_CASE(TYPE, Name, name) case TYPE:
+#define MAKE_STRUCT_CASE(TYPE, Name, name) \
+ case TYPE: \
+ return CALL_APPLY(Name);
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
- if (type == PROTOTYPE_INFO_TYPE) {
- return Op::template apply<PrototypeInfo::BodyDescriptor>(p1, p2, p3,
- p4);
- }
-#if V8_ENABLE_WEBASSEMBLY
- if (type == WASM_INDIRECT_FUNCTION_TABLE_TYPE) {
- return Op::template apply<WasmIndirectFunctionTable::BodyDescriptor>(
- p1, p2, p3, p4);
- }
-#endif // V8_ENABLE_WEBASSEMBLY
- return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
case CALL_HANDLER_INFO_TYPE:
- return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(CallHandlerInfo);
case LOAD_HANDLER_TYPE:
+ return CALL_APPLY(LoadHandler);
case STORE_HANDLER_TYPE:
- return Op::template apply<DataHandler::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(StoreHandler);
case SOURCE_TEXT_MODULE_TYPE:
- return Op::template apply<SourceTextModule::BodyDescriptor>(p1, p2, p3,
- p4);
+ return CALL_APPLY(SourceTextModule);
case SYNTHETIC_MODULE_TYPE:
- return Op::template apply<SyntheticModule::BodyDescriptor>(p1, p2, p3,
- p4);
+ return CALL_APPLY(SyntheticModule);
// TODO(turbofan): Avoid duplicated cases when the body descriptors are
// identical.
#define MAKE_TORQUE_BODY_DESCRIPTOR_APPLY(TYPE, TypeName) \
case TYPE: \
- return Op::template apply<TypeName::BodyDescriptor>(p1, p2, p3, p4);
+ return CALL_APPLY(TypeName);
TORQUE_INSTANCE_TYPE_TO_BODY_DESCRIPTOR_LIST(
MAKE_TORQUE_BODY_DESCRIPTOR_APPLY)
#undef MAKE_TORQUE_BODY_DESCRIPTOR_APPLY
+ case FILLER_TYPE:
+ return Op::template apply<FreeSpaceFillerBodyDescriptor>(
+ std::forward<Args>(args)...);
+
+ case FREE_SPACE_TYPE:
+ return CALL_APPLY(FreeSpace);
+
default:
PrintF("Unknown type: %d\n", type);
UNREACHABLE();
}
+#undef CALL_APPLY
}
template <typename ObjectVisitor>
-void HeapObject::IterateFast(ObjectVisitor* v) {
+void HeapObject::IterateFast(PtrComprCageBase cage_base, ObjectVisitor* v) {
v->VisitMapPointer(*this);
- IterateBodyFast(v);
+ IterateBodyFast(cage_base, v);
}
template <typename ObjectVisitor>
-void HeapObject::IterateBodyFast(ObjectVisitor* v) {
- Map m = map();
+void HeapObject::IterateBodyFast(PtrComprCageBase cage_base, ObjectVisitor* v) {
+ Map m = map(cage_base);
IterateBodyFast(m, SizeFromMap(m), v);
}
@@ -1298,8 +1280,8 @@ struct CallIterateBody {
template <typename ObjectVisitor>
void HeapObject::IterateBodyFast(Map map, int object_size, ObjectVisitor* v) {
- BodyDescriptorApply<CallIterateBody, void>(map.instance_type(), map, *this,
- object_size, v);
+ BodyDescriptorApply<CallIterateBody>(map.instance_type(), map, *this,
+ object_size, v);
}
class EphemeronHashTable::BodyDescriptor final : public BodyDescriptorBase {
diff --git a/deps/v8/src/objects/objects-body-descriptors.h b/deps/v8/src/objects/objects-body-descriptors.h
index 8135e1f170..455578940b 100644
--- a/deps/v8/src/objects/objects-body-descriptors.h
+++ b/deps/v8/src/objects/objects-body-descriptors.h
@@ -152,7 +152,11 @@ class FlexibleBodyDescriptor : public SuffixRangeBodyDescriptor<start_offset> {
static inline int SizeOf(Map map, HeapObject object);
};
-using StructBodyDescriptor = FlexibleBodyDescriptor<HeapObject::kHeaderSize>;
+// A forward-declacable descriptor body alias for most of the Struct successors.
+class StructBodyDescriptor
+ : public FlexibleBodyDescriptor<HeapObject::kHeaderSize> {
+ public:
+};
// This class describes a body of an object in which all pointer fields are
// located in the [start_offset, object_size) interval.
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index d524531c32..c31db239c5 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -49,7 +49,11 @@ namespace internal {
V(SLICED_ONE_BYTE_STRING_TYPE) \
V(THIN_ONE_BYTE_STRING_TYPE) \
V(UNCACHED_EXTERNAL_STRING_TYPE) \
- V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE)
+ V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE) \
+ V(SHARED_STRING_TYPE) \
+ V(SHARED_THIN_STRING_TYPE) \
+ V(SHARED_ONE_BYTE_STRING_TYPE) \
+ V(SHARED_THIN_ONE_BYTE_STRING_TYPE)
#define INSTANCE_TYPE_LIST(V) \
INSTANCE_TYPE_LIST_BASE(V) \
@@ -94,7 +98,15 @@ namespace internal {
UncachedExternalOneByteInternalizedString) \
V(THIN_STRING_TYPE, ThinString::kSize, thin_string, ThinString) \
V(THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, thin_one_byte_string, \
- ThinOneByteString)
+ ThinOneByteString) \
+ \
+ V(SHARED_STRING_TYPE, kVariableSizeSentinel, shared_string, SharedString) \
+ V(SHARED_ONE_BYTE_STRING_TYPE, kVariableSizeSentinel, \
+ shared_one_byte_string, SharedOneByteString) \
+ V(SHARED_THIN_STRING_TYPE, ThinString::kSize, shared_thin_string, \
+ SharedThinString) \
+ V(SHARED_THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, \
+ shared_thin_one_byte_string, SharedThinOneByteString)
// A struct is a simple object a set of object-valued fields. Including an
// object type in this causes the compiler to generate most of the boilerplate
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index e45ac5255b..c079675d11 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -145,6 +145,20 @@ bool Object::IsNoSharedNameSentinel() const {
return *this == SharedFunctionInfo::kNoSharedNameSentinel;
}
+template <class T,
+ typename std::enable_if<(std::is_arithmetic<T>::value ||
+ std::is_enum<T>::value) &&
+ !std::is_floating_point<T>::value,
+ int>::type>
+T Object::Relaxed_ReadField(size_t offset) const {
+ // Pointer compression causes types larger than kTaggedSize to be
+ // unaligned. Atomic loads must be aligned.
+ DCHECK_IMPLIES(COMPRESS_POINTERS_BOOL, sizeof(T) <= kTaggedSize);
+ using AtomicT = typename base::AtomicTypeFromByteWidth<sizeof(T)>::type;
+ return static_cast<T>(base::AsAtomicImpl<AtomicT>::Relaxed_Load(
+ reinterpret_cast<AtomicT*>(field_address(offset))));
+}
+
bool HeapObject::InSharedHeap() const {
if (IsReadOnlyHeapObject(*this)) return V8_SHARED_RO_HEAP_BOOL;
return InSharedWritableHeap();
@@ -299,35 +313,30 @@ DEF_GETTER(HeapObject, IsDeoptimizationData, bool) {
}
DEF_GETTER(HeapObject, IsHandlerTable, bool) {
- if (!IsFixedArrayExact(cage_base)) return false;
- // There's actually no way to see the difference between a fixed array and
- // a handler table array.
- return true;
+ return IsFixedArrayExact(cage_base);
}
DEF_GETTER(HeapObject, IsTemplateList, bool) {
if (!IsFixedArrayExact(cage_base)) return false;
- // There's actually no way to see the difference between a fixed array and
- // a template list.
if (FixedArray::cast(*this).length() < 1) return false;
return true;
}
DEF_GETTER(HeapObject, IsDependentCode, bool) {
- if (!IsWeakFixedArray(cage_base)) return false;
- // There's actually no way to see the difference between a weak fixed array
- // and a dependent codes array.
- return true;
+ return IsWeakArrayList(cage_base);
}
DEF_GETTER(HeapObject, IsOSROptimizedCodeCache, bool) {
- if (!IsWeakFixedArray(cage_base)) return false;
- // There's actually no way to see the difference between a weak fixed array
- // and a osr optimized code cache.
- return true;
+ return IsWeakFixedArray(cage_base);
}
-DEF_GETTER(HeapObject, IsAbstractCode, bool) {
+bool HeapObject::IsAbstractCode() const {
+ // TODO(v8:11880): Either make AbstractCode be ByteArray|CodeT or
+ // ensure this version is not called for hot code.
+ PtrComprCageBase cage_base = GetPtrComprCageBaseSlow(*this);
+ return HeapObject::IsAbstractCode(cage_base);
+}
+bool HeapObject::IsAbstractCode(PtrComprCageBase cage_base) const {
return IsBytecodeArray(cage_base) || IsCode(cage_base);
}
@@ -635,7 +644,6 @@ MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
return value;
}
-#ifdef V8_CAGED_POINTERS
Address Object::ReadCagedPointerField(size_t offset,
PtrComprCageBase cage_base) const {
return i::ReadCagedPointerField(field_address(offset), cage_base);
@@ -651,7 +659,6 @@ void Object::WriteCagedPointerField(size_t offset, Isolate* isolate,
i::WriteCagedPointerField(field_address(offset), PtrComprCageBase(isolate),
value);
}
-#endif // V8_CAGED_POINTERS
void Object::InitExternalPointerField(size_t offset, Isolate* isolate) {
i::InitExternalPointerField(field_address(offset), isolate);
@@ -763,19 +770,10 @@ ReadOnlyRoots HeapObject::GetReadOnlyRoots(PtrComprCageBase cage_base) const {
}
Map HeapObject::map() const {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- // TODO(v8:11880): Ensure that cage friendly version is used for the cases
- // when this could be a Code object. Replace this with
- // DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeObject(*this));
- Isolate* isolate;
- if (GetIsolateFromHeapObject(*this, &isolate)) {
- PtrComprCageBase cage_base(isolate);
- return HeapObject::map(cage_base);
- }
- // If the Isolate can't be obtained then the heap object is a read-only
- // one and therefore not a Code object, so fallback to auto-computing cage
- // base value.
- }
+ // This method is never used for objects located in code space (Code and
+ // free space fillers) and thus it is fine to use auto-computed cage base
+ // value.
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(*this));
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return HeapObject::map(cage_base);
}
@@ -856,19 +854,10 @@ ObjectSlot HeapObject::map_slot() const {
}
MapWord HeapObject::map_word(RelaxedLoadTag tag) const {
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- // TODO(v8:11880): Ensure that cage friendly version is used for the cases
- // when this could be a Code object. Replace this with
- // DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeObject(*this));
- Isolate* isolate;
- if (GetIsolateFromHeapObject(*this, &isolate)) {
- PtrComprCageBase cage_base(isolate);
- return HeapObject::map_word(cage_base, tag);
- }
- // If the Isolate can't be obtained then the heap object is a read-only
- // one and therefore not a Code object, so fallback to auto-computing cage
- // base value.
- }
+ // This method is never used for objects located in code space (Code and
+ // free space fillers) and thus it is fine to use auto-computed cage base
+ // value.
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(*this));
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return HeapObject::map_word(cage_base, tag);
}
@@ -882,9 +871,10 @@ void HeapObject::set_map_word(MapWord map_word, RelaxedStoreTag) {
}
MapWord HeapObject::map_word(AcquireLoadTag tag) const {
- // This method is never used for Code objects and thus it is fine to use
- // auto-computed cage base value.
- DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeObject(*this));
+ // This method is never used for objects located in code space (Code and
+ // free space fillers) and thus it is fine to use auto-computed cage base
+ // value.
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(*this));
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return HeapObject::map_word(cage_base, tag);
}
@@ -904,7 +894,15 @@ bool HeapObject::release_compare_and_swap_map_word(MapWord old_map_word,
return result == static_cast<Tagged_t>(old_map_word.ptr());
}
-int HeapObject::Size() const { return SizeFromMap(map()); }
+// TODO(v8:11880): consider dropping parameterless version.
+int HeapObject::Size() const {
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(*this));
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return HeapObject::Size(cage_base);
+}
+int HeapObject::Size(PtrComprCageBase cage_base) const {
+ return SizeFromMap(map(cage_base));
+}
inline bool IsSpecialReceiverInstanceType(InstanceType instance_type) {
return instance_type <= LAST_SPECIAL_RECEIVER_TYPE;
@@ -967,21 +965,16 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(
// static
AllocationAlignment HeapObject::RequiredAlignment(Map map) {
- // TODO(bmeurer, v8:4153): We should think about requiring double alignment
+ // TODO(v8:4153): We should think about requiring double alignment
// in general for ByteArray, since they are used as backing store for typed
// arrays now.
-#ifdef V8_COMPRESS_POINTERS
- // TODO(ishell, v8:8875): Consider using aligned allocations once the
- // allocation alignment inconsistency is fixed. For now we keep using
- // unaligned access since both x64 and arm64 architectures (where pointer
- // compression is supported) allow unaligned access to doubles and full words.
-#endif // V8_COMPRESS_POINTERS
-#ifdef V8_HOST_ARCH_32_BIT
- int instance_type = map.instance_type();
- if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) return kDoubleAligned;
- if (instance_type == HEAP_NUMBER_TYPE) return kDoubleUnaligned;
-#endif // V8_HOST_ARCH_32_BIT
- return kWordAligned;
+ // TODO(ishell, v8:8875): Consider using aligned allocations for BigInt.
+ if (USE_ALLOCATION_ALIGNMENT_BOOL) {
+ int instance_type = map.instance_type();
+ if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) return kDoubleAligned;
+ if (instance_type == HEAP_NUMBER_TYPE) return kDoubleUnaligned;
+ }
+ return kTaggedAligned;
}
Address HeapObject::GetFieldAddress(int field_offset) const {
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index 3d4e6cf399..1d180f7e30 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -452,12 +452,12 @@ Handle<String> NoSideEffectsErrorToString(Isolate* isolate,
IncrementalStringBuilder builder(isolate);
builder.AppendString(name_str);
- builder.AppendCString(": ");
+ builder.AppendCStringLiteral(": ");
if (builder.Length() + msg_str->length() <= String::kMaxLength) {
builder.AppendString(msg_str);
} else {
- builder.AppendCString("<a very large string>");
+ builder.AppendCStringLiteral("<a very large string>");
}
return builder.Finish().ToHandleChecked();
@@ -501,7 +501,7 @@ MaybeHandle<String> Object::NoSideEffectsToMaybeString(Isolate* isolate,
if (fun_str->length() > 128) {
IncrementalStringBuilder builder(isolate);
builder.AppendString(isolate->factory()->NewSubString(fun_str, 0, 111));
- builder.AppendCString("...<omitted>...");
+ builder.AppendCStringLiteral("...<omitted>...");
builder.AppendString(isolate->factory()->NewSubString(
fun_str, fun_str->length() - 2, fun_str->length()));
@@ -517,7 +517,7 @@ MaybeHandle<String> Object::NoSideEffectsToMaybeString(Isolate* isolate,
}
IncrementalStringBuilder builder(isolate);
- builder.AppendCString("Symbol(");
+ builder.AppendCStringLiteral("Symbol(");
if (symbol->description().IsString()) {
builder.AppendString(
handle(String::cast(symbol->description()), isolate));
@@ -555,9 +555,9 @@ MaybeHandle<String> Object::NoSideEffectsToMaybeString(Isolate* isolate,
if (ctor_name->length() != 0) {
IncrementalStringBuilder builder(isolate);
- builder.AppendCString("#<");
+ builder.AppendCStringLiteral("#<");
builder.AppendString(ctor_name);
- builder.AppendCString(">");
+ builder.AppendCharacter('>');
return builder.Finish().ToHandleChecked();
}
@@ -603,9 +603,9 @@ Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
tag_obj->IsString() ? Handle<String>::cast(tag_obj) : builtin_tag;
IncrementalStringBuilder builder(isolate);
- builder.AppendCString("[object ");
+ builder.AppendCStringLiteral("[object ");
builder.AppendString(tag);
- builder.AppendCString("]");
+ builder.AppendCharacter(']');
return builder.Finish().ToHandleChecked();
}
@@ -1838,23 +1838,24 @@ std::ostream& operator<<(std::ostream& os, const Brief& v) {
void Smi::SmiPrint(std::ostream& os) const { os << value(); }
void HeapObject::HeapObjectShortPrint(std::ostream& os) {
+ PtrComprCageBase cage_base = GetPtrComprCageBaseSlow(*this);
os << AsHex::Address(this->ptr()) << " ";
- if (IsString()) {
+ if (IsString(cage_base)) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
String::cast(*this).StringShortPrint(&accumulator);
os << accumulator.ToCString().get();
return;
}
- if (IsJSObject()) {
+ if (IsJSObject(cage_base)) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
JSObject::cast(*this).JSObjectShortPrint(&accumulator);
os << accumulator.ToCString().get();
return;
}
- switch (map().instance_type()) {
+ switch (map(cage_base).instance_type()) {
case MAP_TYPE: {
os << "<Map";
Map mapInstance = Map::cast(*this);
@@ -1968,8 +1969,6 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) {
os << "<FeedbackCell[";
if (map() == roots.no_closures_cell_map()) {
os << "no feedback";
- } else if (map() == roots.no_closures_cell_map()) {
- os << "no closures";
} else if (map() == roots.one_closure_cell_map()) {
os << "one closure";
} else if (map() == roots.many_closures_cell_map()) {
@@ -2158,10 +2157,12 @@ void CallableTask::BriefPrintDetails(std::ostream& os) {
os << " callable=" << Brief(callable());
}
-void HeapObject::Iterate(ObjectVisitor* v) { IterateFast<ObjectVisitor>(v); }
+void HeapObject::Iterate(PtrComprCageBase cage_base, ObjectVisitor* v) {
+ IterateFast<ObjectVisitor>(cage_base, v);
+}
-void HeapObject::IterateBody(ObjectVisitor* v) {
- Map m = map();
+void HeapObject::IterateBody(PtrComprCageBase cage_base, ObjectVisitor* v) {
+ Map m = map(cage_base);
IterateBodyFast<ObjectVisitor>(m, SizeFromMap(m), v);
}
@@ -2171,15 +2172,15 @@ void HeapObject::IterateBody(Map map, int object_size, ObjectVisitor* v) {
struct CallIsValidSlot {
template <typename BodyDescriptor>
- static bool apply(Map map, HeapObject obj, int offset, int) {
+ static bool apply(Map map, HeapObject obj, int offset) {
return BodyDescriptor::IsValidSlot(map, obj, offset);
}
};
bool HeapObject::IsValidSlot(Map map, int offset) {
DCHECK_NE(0, offset);
- return BodyDescriptorApply<CallIsValidSlot, bool>(map.instance_type(), map,
- *this, offset, 0);
+ return BodyDescriptorApply<CallIsValidSlot>(map.instance_type(), map, *this,
+ offset);
}
int HeapObject::SizeFromMap(Map map) const {
@@ -2197,7 +2198,8 @@ int HeapObject::SizeFromMap(Map map) const {
return Context::SizeFor(Context::unchecked_cast(*this).length());
}
if (instance_type == ONE_BYTE_STRING_TYPE ||
- instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) {
+ instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE ||
+ instance_type == SHARED_ONE_BYTE_STRING_TYPE) {
// Strings may get concurrently truncated, hence we have to access its
// length synchronized.
return SeqOneByteString::SizeFor(
@@ -2215,7 +2217,8 @@ int HeapObject::SizeFromMap(Map map) const {
return FreeSpace::unchecked_cast(*this).size(kRelaxedLoad);
}
if (instance_type == STRING_TYPE ||
- instance_type == INTERNALIZED_STRING_TYPE) {
+ instance_type == INTERNALIZED_STRING_TYPE ||
+ instance_type == SHARED_STRING_TYPE) {
// Strings may get concurrently truncated, hence we have to access its
// length synchronized.
return SeqTwoByteString::SizeFor(
@@ -2301,12 +2304,18 @@ int HeapObject::SizeFromMap(Map map) const {
EmbedderDataArray::unchecked_cast(*this).length());
}
-bool HeapObject::NeedsRehashing() const {
- return NeedsRehashing(map().instance_type());
+bool HeapObject::NeedsRehashing(PtrComprCageBase cage_base) const {
+ return NeedsRehashing(map(cage_base).instance_type());
}
bool HeapObject::NeedsRehashing(InstanceType instance_type) const {
- DCHECK_EQ(instance_type, map().instance_type());
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ // Use map() only when it's guaranteed that it's not a Code object.
+ DCHECK_IMPLIES(instance_type != CODE_TYPE,
+ instance_type == map().instance_type());
+ } else {
+ DCHECK_EQ(instance_type, map().instance_type());
+ }
switch (instance_type) {
case DESCRIPTOR_ARRAY_TYPE:
case STRONG_DESCRIPTOR_ARRAY_TYPE:
@@ -2333,9 +2342,9 @@ bool HeapObject::NeedsRehashing(InstanceType instance_type) const {
}
}
-bool HeapObject::CanBeRehashed() const {
- DCHECK(NeedsRehashing());
- switch (map().instance_type()) {
+bool HeapObject::CanBeRehashed(PtrComprCageBase cage_base) const {
+ DCHECK(NeedsRehashing(cage_base));
+ switch (map(cage_base).instance_type()) {
case JS_MAP_TYPE:
case JS_SET_TYPE:
return true;
@@ -2368,7 +2377,7 @@ bool HeapObject::CanBeRehashed() const {
template <typename IsolateT>
void HeapObject::RehashBasedOnMap(IsolateT* isolate) {
- switch (map().instance_type()) {
+ switch (map(isolate).instance_type()) {
case HASH_TABLE_TYPE:
UNREACHABLE();
case NAME_DICTIONARY_TYPE:
@@ -2427,7 +2436,7 @@ template void HeapObject::RehashBasedOnMap(Isolate* isolate);
template void HeapObject::RehashBasedOnMap(LocalIsolate* isolate);
bool HeapObject::IsExternal(Isolate* isolate) const {
- return map().FindRootMap(isolate) == isolate->heap()->external_map();
+ return map(isolate).FindRootMap(isolate) == isolate->heap()->external_map();
}
void DescriptorArray::GeneralizeAllFields() {
@@ -2436,7 +2445,7 @@ void DescriptorArray::GeneralizeAllFields() {
PropertyDetails details = GetDetails(i);
details = details.CopyWithRepresentation(Representation::Tagged());
if (details.location() == PropertyLocation::kField) {
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
details = details.CopyWithConstness(PropertyConstness::kMutable);
SetValue(i, MaybeObject::FromObject(FieldType::Any()));
}
@@ -2964,7 +2973,7 @@ Maybe<bool> JSProxy::IsArray(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
Handle<JSReceiver> object = Handle<JSReceiver>::cast(proxy);
for (int i = 0; i < JSProxy::kMaxIterationLimit; i++) {
- Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ proxy = Handle<JSProxy>::cast(object);
if (proxy->IsRevoked()) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kProxyRevoked,
@@ -3383,9 +3392,9 @@ Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
if (!new_writable) {
PropertyDescriptor readonly;
readonly.set_writable(false);
- Maybe<bool> success = OrdinaryDefineOwnProperty(
- isolate, a, isolate->factory()->length_string(), &readonly,
- should_throw);
+ success = OrdinaryDefineOwnProperty(isolate, a,
+ isolate->factory()->length_string(),
+ &readonly, should_throw);
DCHECK(success.FromJust());
USE(success);
}
@@ -3560,7 +3569,8 @@ Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
return Just(true);
}
- PropertyDetails details(kData, DONT_ENUM, PropertyConstness::kMutable);
+ PropertyDetails details(PropertyKind::kData, DONT_ENUM,
+ PropertyConstness::kMutable);
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
Handle<SwissNameDictionary> dict(proxy->property_dictionary_swiss(),
isolate);
@@ -3819,7 +3829,7 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
int mask = DONT_DELETE | DONT_ENUM;
// READ_ONLY is an invalid attribute for JS setters/getters.
HeapObject heap_object;
- if (details.kind() != kAccessor ||
+ if (details.kind() != PropertyKind::kAccessor ||
!(value_or_field_type->GetHeapObjectIfStrong(&heap_object) &&
heap_object.IsAccessorPair())) {
mask |= READ_ONLY;
@@ -3860,7 +3870,7 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
DCHECK(!key.IsPrivateName());
DCHECK(details.IsEnumerable());
- DCHECK_EQ(details.kind(), kData);
+ DCHECK_EQ(details.kind(), PropertyKind::kData);
// If the new representation is an in-place changeable field, make it
// generic as possible (under in-place changes) to avoid type confusion if
// the source representation changes after this feedback has been collected.
@@ -3877,7 +3887,7 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
// Ensure the ObjectClone property details are NONE, and that all source
// details did not contain DONT_ENUM.
- PropertyDetails new_details(kData, NONE, details.location(),
+ PropertyDetails new_details(PropertyKind::kData, NONE, details.location(),
details.constness(), new_representation,
details.field_index());
@@ -4368,12 +4378,12 @@ void DescriptorArray::CopyFrom(InternalIndex index, DescriptorArray src) {
void DescriptorArray::Sort() {
// In-place heap sort.
- int len = number_of_descriptors();
+ const int len = number_of_descriptors();
// Reset sorting since the descriptor array might contain invalid pointers.
for (int i = 0; i < len; ++i) SetSortedKey(i, i);
// Bottom-up max-heap construction.
// Index of the last node with children.
- const int max_parent_index = (len / 2) - 1;
+ int max_parent_index = (len / 2) - 1;
for (int i = max_parent_index; i >= 0; --i) {
int parent_index = i;
const uint32_t parent_hash = GetSortedKey(i).hash();
@@ -4401,7 +4411,7 @@ void DescriptorArray::Sort() {
// Shift down the new top element.
int parent_index = 0;
const uint32_t parent_hash = GetSortedKey(parent_index).hash();
- const int max_parent_index = (i / 2) - 1;
+ max_parent_index = (i / 2) - 1;
while (parent_index <= max_parent_index) {
int child_index = parent_index * 2 + 1;
uint32_t child_hash = GetSortedKey(child_index).hash();
@@ -5956,15 +5966,15 @@ int BaseNameDictionary<Derived, Shape>::NextEnumerationIndex(
// Iterate over the dictionary using the enumeration order and update
// the dictionary with new enumeration indices.
for (int i = 0; i < length; i++) {
- InternalIndex index(Smi::ToInt(iteration_order->get(i)));
+ InternalIndex internal_index(Smi::ToInt(iteration_order->get(i)));
DCHECK(dictionary->IsKey(dictionary->GetReadOnlyRoots(),
- dictionary->KeyAt(isolate, index)));
+ dictionary->KeyAt(isolate, internal_index)));
int enum_index = PropertyDetails::kInitialIndex + i;
- PropertyDetails details = dictionary->DetailsAt(index);
+ PropertyDetails details = dictionary->DetailsAt(internal_index);
PropertyDetails new_details = details.set_index(enum_index);
- dictionary->DetailsAtPut(index, new_details);
+ dictionary->DetailsAtPut(internal_index, new_details);
}
index = PropertyDetails::kInitialIndex + length;
@@ -6458,8 +6468,10 @@ void PropertyCell::ClearAndInvalidate(ReadOnlyRoots roots) {
PropertyDetails details = property_details();
details = details.set_cell_type(PropertyCellType::kConstant);
Transition(details, roots.the_hole_value_handle());
+ // TODO(11527): pass Isolate as an argument.
+ Isolate* isolate = GetIsolateFromWritableObject(*this);
dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kPropertyCellChangedGroup);
+ isolate, DependentCode::kPropertyCellChangedGroup);
}
// static
@@ -6533,8 +6545,8 @@ Handle<PropertyCell> PropertyCell::PrepareForAndSetValue(
CHECK(!cell->value().IsTheHole(isolate));
const PropertyDetails original_details = cell->property_details();
// Data accesses could be cached in ics or optimized code.
- bool invalidate =
- original_details.kind() == kData && details.kind() == kAccessor;
+ bool invalidate = original_details.kind() == PropertyKind::kData &&
+ details.kind() == PropertyKind::kAccessor;
int index = original_details.dictionary_index();
DCHECK_LT(0, index);
details = details.set_index(index);
@@ -6556,7 +6568,7 @@ Handle<PropertyCell> PropertyCell::PrepareForAndSetValue(
if (original_details.cell_type() != new_type ||
(!original_details.IsReadOnly() && details.IsReadOnly())) {
cell->dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kPropertyCellChangedGroup);
+ isolate, DependentCode::kPropertyCellChangedGroup);
}
}
return cell;
@@ -6567,8 +6579,10 @@ void PropertyCell::InvalidateProtector() {
if (value() != Smi::FromInt(Protectors::kProtectorInvalid)) {
DCHECK_EQ(value(), Smi::FromInt(Protectors::kProtectorValid));
set_value(Smi::FromInt(Protectors::kProtectorInvalid), kReleaseStore);
+ // TODO(11527): pass Isolate as an argument.
+ Isolate* isolate = GetIsolateFromWritableObject(*this);
dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kPropertyCellChangedGroup);
+ isolate, DependentCode::kPropertyCellChangedGroup);
}
}
@@ -6582,7 +6596,7 @@ bool PropertyCell::CheckDataIsCompatible(PropertyDetails details,
CHECK_EQ(cell_type, PropertyCellType::kConstant);
} else {
CHECK_EQ(value.IsAccessorInfo() || value.IsAccessorPair(),
- details.kind() == kAccessor);
+ details.kind() == PropertyKind::kAccessor);
DCHECK_IMPLIES(cell_type == PropertyCellType::kUndefined,
value.IsUndefined());
}
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index 9fc636365d..82e1680b47 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -96,6 +96,7 @@
// - WasmMemoryObject
// - WasmModuleObject
// - WasmTableObject
+// - WasmSuspenderObject
// - JSProxy
// - FixedArrayBase
// - ByteArray
@@ -680,6 +681,15 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
}
}
+ // Atomically reads a field using relaxed memory ordering. Can only be used
+ // with integral types whose size is <= kTaggedSize (to guarantee alignment).
+ template <class T,
+ typename std::enable_if<(std::is_arithmetic<T>::value ||
+ std::is_enum<T>::value) &&
+ !std::is_floating_point<T>::value,
+ int>::type = 0>
+ inline T Relaxed_ReadField(size_t offset) const;
+
template <class T, typename std::enable_if<std::is_arithmetic<T>::value ||
std::is_enum<T>::value,
int>::type = 0>
@@ -699,16 +709,14 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
}
//
- // CagedPointer field accessors.
+ // CagedPointer_t field accessors.
//
-#ifdef V8_CAGED_POINTERS
inline Address ReadCagedPointerField(size_t offset,
PtrComprCageBase cage_base) const;
inline void WriteCagedPointerField(size_t offset, PtrComprCageBase cage_base,
Address value);
inline void WriteCagedPointerField(size_t offset, Isolate* isolate,
Address value);
-#endif // V8_CAGED_POINTERS
//
// ExternalPointer_t field accessors.
diff --git a/deps/v8/src/objects/oddball.tq b/deps/v8/src/objects/oddball.tq
index d111779a31..3edee2dbb9 100644
--- a/deps/v8/src/objects/oddball.tq
+++ b/deps/v8/src/objects/oddball.tq
@@ -2,12 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-@export
-@customCppClass
-@customMap // Oddballs have one of multiple maps, depending on the kind.
-@apiExposedInstanceTypeValue(0x43)
+@generateBodyDescriptor
+@apiExposedInstanceTypeValue(0x83)
@highestInstanceTypeWithinParentClassRange
-class Oddball extends PrimitiveHeapObject {
+extern class Oddball extends PrimitiveHeapObject {
to_number_raw: float64;
to_string: String;
to_number: Number;
diff --git a/deps/v8/src/objects/promise.h b/deps/v8/src/objects/promise.h
index 075afbeebc..f449e416c8 100644
--- a/deps/v8/src/objects/promise.h
+++ b/deps/v8/src/objects/promise.h
@@ -14,6 +14,7 @@ namespace v8 {
namespace internal {
class JSPromise;
+class StructBodyDescriptor;
#include "torque-generated/src/objects/promise-tq.inc"
@@ -31,6 +32,9 @@ class PromiseReactionJobTask
Microtask> {
public:
static const int kSizeOfAllPromiseReactionJobTasks = kHeaderSize;
+
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(PromiseReactionJobTask)
};
@@ -41,6 +45,8 @@ class PromiseFulfillReactionJobTask
public:
STATIC_ASSERT(kSize == kSizeOfAllPromiseReactionJobTasks);
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(PromiseFulfillReactionJobTask)
};
@@ -51,6 +57,8 @@ class PromiseRejectReactionJobTask
public:
STATIC_ASSERT(kSize == kSizeOfAllPromiseReactionJobTasks);
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(PromiseRejectReactionJobTask)
};
@@ -59,6 +67,8 @@ class PromiseResolveThenableJobTask
: public TorqueGeneratedPromiseResolveThenableJobTask<
PromiseResolveThenableJobTask, Microtask> {
public:
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(PromiseResolveThenableJobTask)
};
@@ -66,6 +76,8 @@ class PromiseResolveThenableJobTask
class PromiseCapability
: public TorqueGeneratedPromiseCapability<PromiseCapability, Struct> {
public:
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(PromiseCapability)
};
@@ -91,6 +103,8 @@ class PromiseReaction
public:
enum Type { kFulfill, kReject };
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(PromiseReaction)
};
diff --git a/deps/v8/src/objects/property-cell-inl.h b/deps/v8/src/objects/property-cell-inl.h
index ef4fa75463..3f0f4bf307 100644
--- a/deps/v8/src/objects/property-cell-inl.h
+++ b/deps/v8/src/objects/property-cell-inl.h
@@ -47,8 +47,10 @@ void PropertyCell::UpdatePropertyDetailsExceptCellType(
// unless the property is also configurable, in which case it will stay
// read-only forever.
if (!old_details.IsReadOnly() && details.IsReadOnly()) {
+ // TODO(11527): pass Isolate as an argument.
+ Isolate* isolate = GetIsolateFromWritableObject(*this);
dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kPropertyCellChangedGroup);
+ isolate, DependentCode::kPropertyCellChangedGroup);
}
}
diff --git a/deps/v8/src/objects/property-descriptor-object.h b/deps/v8/src/objects/property-descriptor-object.h
index c9affb4ff7..a18fe1fcf0 100644
--- a/deps/v8/src/objects/property-descriptor-object.h
+++ b/deps/v8/src/objects/property-descriptor-object.h
@@ -35,6 +35,8 @@ class PropertyDescriptorObject
HasWritableBit::kMask | HasValueBit::kMask |
HasGetBit::kMask | HasSetBit::kMask;
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(PropertyDescriptorObject)
};
diff --git a/deps/v8/src/objects/property-descriptor.cc b/deps/v8/src/objects/property-descriptor.cc
index e33759f6f7..46aac7e0ed 100644
--- a/deps/v8/src/objects/property-descriptor.cc
+++ b/deps/v8/src/objects/property-descriptor.cc
@@ -62,22 +62,22 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
PropertyDetails details = descs->GetDetails(i);
Handle<Object> value;
if (details.location() == PropertyLocation::kField) {
- if (details.kind() == kData) {
+ if (details.kind() == PropertyKind::kData) {
value = JSObject::FastPropertyAt(Handle<JSObject>::cast(obj),
details.representation(),
FieldIndex::ForDescriptor(*map, i));
} else {
- DCHECK_EQ(kAccessor, details.kind());
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
// Bail out to slow path.
return false;
}
} else {
DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
- if (details.kind() == kData) {
+ if (details.kind() == PropertyKind::kData) {
value = handle(descs->GetStrongValue(i), isolate);
} else {
- DCHECK_EQ(kAccessor, details.kind());
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
// Bail out to slow path.
return false;
}
diff --git a/deps/v8/src/objects/property-details.h b/deps/v8/src/objects/property-details.h
index f356bcd53a..0837aac7f2 100644
--- a/deps/v8/src/objects/property-details.h
+++ b/deps/v8/src/objects/property-details.h
@@ -79,7 +79,7 @@ class TypeInfo;
// Order of kinds is significant.
// Must fit in the BitField PropertyDetails::KindField.
-enum PropertyKind { kData = 0, kAccessor = 1 };
+enum class PropertyKind { kData = 0, kAccessor = 1 };
// Order of modes is significant.
// Must fit in the BitField PropertyDetails::LocationField.
@@ -104,16 +104,22 @@ class Representation {
kNumRepresentations
};
- Representation() : kind_(kNone) {}
+ constexpr Representation() : kind_(kNone) {}
- static Representation None() { return Representation(kNone); }
- static Representation Tagged() { return Representation(kTagged); }
- static Representation Smi() { return Representation(kSmi); }
- static Representation Double() { return Representation(kDouble); }
- static Representation HeapObject() { return Representation(kHeapObject); }
- static Representation WasmValue() { return Representation(kWasmValue); }
+ static constexpr Representation None() { return Representation(kNone); }
+ static constexpr Representation Tagged() { return Representation(kTagged); }
+ static constexpr Representation Smi() { return Representation(kSmi); }
+ static constexpr Representation Double() { return Representation(kDouble); }
+ static constexpr Representation HeapObject() {
+ return Representation(kHeapObject);
+ }
+ static constexpr Representation WasmValue() {
+ return Representation(kWasmValue);
+ }
- static Representation FromKind(Kind kind) { return Representation(kind); }
+ static constexpr Representation FromKind(Kind kind) {
+ return Representation(kind);
+ }
bool Equals(const Representation& other) const {
return kind_ == other.kind_;
@@ -190,14 +196,14 @@ class Representation {
return kTaggedSize;
}
- Kind kind() const { return static_cast<Kind>(kind_); }
- bool IsNone() const { return kind_ == kNone; }
- bool IsWasmValue() const { return kind_ == kWasmValue; }
- bool IsTagged() const { return kind_ == kTagged; }
- bool IsSmi() const { return kind_ == kSmi; }
- bool IsSmiOrTagged() const { return IsSmi() || IsTagged(); }
- bool IsDouble() const { return kind_ == kDouble; }
- bool IsHeapObject() const { return kind_ == kHeapObject; }
+ constexpr Kind kind() const { return static_cast<Kind>(kind_); }
+ constexpr bool IsNone() const { return kind_ == kNone; }
+ constexpr bool IsWasmValue() const { return kind_ == kWasmValue; }
+ constexpr bool IsTagged() const { return kind_ == kTagged; }
+ constexpr bool IsSmi() const { return kind_ == kSmi; }
+ constexpr bool IsSmiOrTagged() const { return IsSmi() || IsTagged(); }
+ constexpr bool IsDouble() const { return kind_ == kDouble; }
+ constexpr bool IsHeapObject() const { return kind_ == kHeapObject; }
const char* Mnemonic() const {
switch (kind_) {
@@ -218,7 +224,7 @@ class Representation {
}
private:
- explicit Representation(Kind k) : kind_(k) {}
+ explicit constexpr Representation(Kind k) : kind_(k) {}
// Make sure kind fits in int8.
STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
@@ -254,44 +260,45 @@ enum class PropertyCellType {
class PropertyDetails {
public:
// Property details for global dictionary properties.
- PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
- PropertyCellType cell_type, int dictionary_index = 0) {
- value_ = KindField::encode(kind) |
- LocationField::encode(PropertyLocation::kField) |
- AttributesField::encode(attributes) |
- // We track PropertyCell constness via PropertyCellTypeField,
- // so we set ConstnessField to kMutable to simplify DCHECKs related
- // to non-global property constness tracking.
- ConstnessField::encode(PropertyConstness::kMutable) |
- DictionaryStorageField::encode(dictionary_index) |
- PropertyCellTypeField::encode(cell_type);
- }
+ constexpr PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
+ PropertyCellType cell_type,
+ int dictionary_index = 0)
+ : value_(KindField::encode(kind) |
+ LocationField::encode(PropertyLocation::kField) |
+ AttributesField::encode(attributes) |
+ // We track PropertyCell constness via PropertyCellTypeField,
+ // so we set ConstnessField to kMutable to simplify DCHECKs
+ // related to non-global property constness tracking.
+ ConstnessField::encode(PropertyConstness::kMutable) |
+ DictionaryStorageField::encode(dictionary_index) |
+ PropertyCellTypeField::encode(cell_type)) {}
// Property details for dictionary mode properties/elements.
- PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
- PropertyConstness constness, int dictionary_index = 0) {
- value_ = KindField::encode(kind) |
- LocationField::encode(PropertyLocation::kField) |
- AttributesField::encode(attributes) |
- ConstnessField::encode(constness) |
- DictionaryStorageField::encode(dictionary_index) |
- PropertyCellTypeField::encode(PropertyCellType::kNoCell);
- }
+ constexpr PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
+ PropertyConstness constness,
+ int dictionary_index = 0)
+ : value_(KindField::encode(kind) |
+ LocationField::encode(PropertyLocation::kField) |
+ AttributesField::encode(attributes) |
+ ConstnessField::encode(constness) |
+ DictionaryStorageField::encode(dictionary_index) |
+ PropertyCellTypeField::encode(PropertyCellType::kNoCell)) {}
// Property details for fast mode properties.
- PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
- PropertyLocation location, PropertyConstness constness,
- Representation representation, int field_index = 0) {
- value_ = KindField::encode(kind) | AttributesField::encode(attributes) |
- LocationField::encode(location) |
- ConstnessField::encode(constness) |
- RepresentationField::encode(EncodeRepresentation(representation)) |
- FieldIndexField::encode(field_index);
- }
-
- static PropertyDetails Empty(
+ constexpr PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
+ PropertyLocation location,
+ PropertyConstness constness,
+ Representation representation, int field_index = 0)
+ : value_(
+ KindField::encode(kind) | AttributesField::encode(attributes) |
+ LocationField::encode(location) |
+ ConstnessField::encode(constness) |
+ RepresentationField::encode(EncodeRepresentation(representation)) |
+ FieldIndexField::encode(field_index)) {}
+
+ static constexpr PropertyDetails Empty(
PropertyCellType cell_type = PropertyCellType::kNoCell) {
- return PropertyDetails(kData, NONE, cell_type);
+ return PropertyDetails(PropertyKind::kData, NONE, cell_type);
}
bool operator==(PropertyDetails const& other) {
@@ -336,7 +343,7 @@ class PropertyDetails {
explicit inline PropertyDetails(Smi smi);
inline Smi AsSmi() const;
- static uint8_t EncodeRepresentation(Representation representation) {
+ static constexpr uint8_t EncodeRepresentation(Representation representation) {
return representation.kind();
}
diff --git a/deps/v8/src/objects/property.cc b/deps/v8/src/objects/property.cc
index 4cc29c70ae..42f1ea1276 100644
--- a/deps/v8/src/objects/property.cc
+++ b/deps/v8/src/objects/property.cc
@@ -89,16 +89,18 @@ Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
Representation representation,
const MaybeObjectHandle& wrapped_field_type) {
DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeak());
- PropertyDetails details(kData, attributes, PropertyLocation::kField,
- constness, representation, field_index);
+ PropertyDetails details(PropertyKind::kData, attributes,
+ PropertyLocation::kField, constness, representation,
+ field_index);
return Descriptor(key, wrapped_field_type, details);
}
Descriptor Descriptor::DataConstant(Handle<Name> key, Handle<Object> value,
PropertyAttributes attributes) {
PtrComprCageBase cage_base = GetPtrComprCageBase(*key);
- return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
- PropertyLocation::kDescriptor, PropertyConstness::kConst,
+ return Descriptor(key, MaybeObjectHandle(value), PropertyKind::kData,
+ attributes, PropertyLocation::kDescriptor,
+ PropertyConstness::kConst,
value->OptimalRepresentation(cage_base), 0);
}
@@ -113,16 +115,16 @@ Descriptor Descriptor::DataConstant(Isolate* isolate, Handle<Name> key,
Descriptor Descriptor::AccessorConstant(Handle<Name> key,
Handle<Object> foreign,
PropertyAttributes attributes) {
- return Descriptor(key, MaybeObjectHandle(foreign), kAccessor, attributes,
- PropertyLocation::kDescriptor, PropertyConstness::kConst,
- Representation::Tagged(), 0);
+ return Descriptor(key, MaybeObjectHandle(foreign), PropertyKind::kAccessor,
+ attributes, PropertyLocation::kDescriptor,
+ PropertyConstness::kConst, Representation::Tagged(), 0);
}
// Outputs PropertyDetails as a dictionary details.
void PropertyDetails::PrintAsSlowTo(std::ostream& os, bool print_dict_index) {
os << "(";
if (constness() == PropertyConstness::kConst) os << "const ";
- os << (kind() == kData ? "data" : "accessor");
+ os << (kind() == PropertyKind::kData ? "data" : "accessor");
if (print_dict_index) {
os << ", dict_index: " << dictionary_index();
}
@@ -133,7 +135,7 @@ void PropertyDetails::PrintAsSlowTo(std::ostream& os, bool print_dict_index) {
void PropertyDetails::PrintAsFastTo(std::ostream& os, PrintMode mode) {
os << "(";
if (constness() == PropertyConstness::kConst) os << "const ";
- os << (kind() == kData ? "data" : "accessor");
+ os << (kind() == PropertyKind::kData ? "data" : "accessor");
if (location() == PropertyLocation::kField) {
os << " field";
if (mode & kPrintFieldIndex) {
diff --git a/deps/v8/src/objects/prototype-info.tq b/deps/v8/src/objects/prototype-info.tq
index 33248469c3..d7df74bbc9 100644
--- a/deps/v8/src/objects/prototype-info.tq
+++ b/deps/v8/src/objects/prototype-info.tq
@@ -23,8 +23,8 @@ extern class PrototypeInfo extends Struct {
// is stored. Returns UNREGISTERED if this prototype has not been registered.
registry_slot: Smi;
+ bit_field: SmiTagged<PrototypeInfoFlags>;
+
// [object_create_map]: A field caching the map for Object.create(prototype).
object_create_map: Weak<Map>|Undefined;
-
- bit_field: SmiTagged<PrototypeInfoFlags>;
}
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index 08b744e4a2..e20493d468 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -189,7 +189,7 @@ Handle<ScopeInfo> ScopeInfo::Create(IsolateT* isolate, Zone* zone, Scope* scope,
is_asm_module = function_scope->is_asm_module();
#endif // V8_ENABLE_WEBASSEMBLY
}
- FunctionKind function_kind = kNormalFunction;
+ FunctionKind function_kind = FunctionKind::kNormalFunction;
if (scope->is_declaration_scope()) {
function_kind = scope->AsDeclarationScope()->function_kind();
sloppy_eval_can_extend_vars =
@@ -298,8 +298,8 @@ Handle<ScopeInfo> ScopeInfo::Create(IsolateT* isolate, Zone* zone, Scope* scope,
for (int i = 0; i < parameter_count; i++) {
Variable* parameter = scope->AsDeclarationScope()->parameter(i);
if (parameter->location() != VariableLocation::CONTEXT) continue;
- int index = parameter->index() - scope->ContextHeaderLength();
- int info_index = context_local_info_base + index;
+ int param_index = parameter->index() - scope->ContextHeaderLength();
+ int info_index = context_local_info_base + param_index;
int info = Smi::ToInt(scope_info.get(info_index));
info = ParameterNumberBits::update(info, i);
scope_info.set(info_index, Smi::FromInt(info));
@@ -401,7 +401,7 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
HasNewTargetBit::encode(false) |
FunctionVariableBits::encode(VariableAllocationInfo::NONE) |
IsAsmModuleBit::encode(false) | HasSimpleParametersBit::encode(true) |
- FunctionKindBits::encode(kNormalFunction) |
+ FunctionKindBits::encode(FunctionKind::kNormalFunction) |
HasOuterScopeInfoBit::encode(has_outer_scope_info) |
IsDebugEvaluateScopeBit::encode(false) |
ForceContextAllocationBit::encode(false) |
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 11c06a76b5..5544efea9d 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -274,7 +274,7 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, HeapObject> {
};
STATIC_ASSERT(LanguageModeSize == 1 << LanguageModeBit::kSize);
- STATIC_ASSERT(kLastFunctionKind <= FunctionKindBits::kMax);
+ STATIC_ASSERT(FunctionKind::kLastFunctionKind <= FunctionKindBits::kMax);
bool IsEmpty() const;
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 76b8d92dd8..09a65dbb1b 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -22,6 +22,7 @@ namespace v8 {
namespace internal {
class FunctionLiteral;
+class StructBodyDescriptor;
namespace wasm {
class NativeModule;
@@ -230,6 +231,8 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
DECL_PRINTER(Script)
DECL_VERIFIER(Script)
+ using BodyDescriptor = StructBodyDescriptor;
+
private:
// Bit positions in the flags field.
DEFINE_TORQUE_GENERATED_SCRIPT_FLAGS()
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 1e793d0219..a54ea4599f 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -7,6 +7,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -92,21 +93,10 @@ void PreparseData::set_child(int index, PreparseData value,
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledData)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseData)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseData)
+TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseDataWithJob)
+TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseDataAndJob)
TQ_OBJECT_CONSTRUCTORS_IMPL(InterpreterData)
-
-ACCESSORS(InterpreterData, raw_interpreter_trampoline, CodeT,
- kInterpreterTrampolineOffset)
-
-DEF_GETTER(InterpreterData, interpreter_trampoline, Code) {
- return FromCodeT(raw_interpreter_trampoline(cage_base));
-}
-
-void InterpreterData::set_interpreter_trampoline(Code code,
- WriteBarrierMode mode) {
- set_raw_interpreter_trampoline(ToCodeT(code), mode);
-}
-
TQ_OBJECT_CONSTRUCTORS_IMPL(SharedFunctionInfo)
NEVER_READ_ONLY_SPACE_IMPL(SharedFunctionInfo)
DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
@@ -211,7 +201,7 @@ AbstractCode SharedFunctionInfo::abstract_code(IsolateT* isolate) {
if (HasBytecodeArray()) {
return AbstractCode::cast(GetBytecodeArray(isolate));
} else {
- return AbstractCode::cast(GetCode());
+ return AbstractCode::cast(FromCodeT(GetCode()));
}
}
@@ -310,10 +300,10 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags,
SharedFunctionInfo::PrivateNameLookupSkipsOuterClassBit)
bool SharedFunctionInfo::optimization_disabled() const {
- return disable_optimization_reason() != BailoutReason::kNoReason;
+ return disabled_optimization_reason() != BailoutReason::kNoReason;
}
-BailoutReason SharedFunctionInfo::disable_optimization_reason() const {
+BailoutReason SharedFunctionInfo::disabled_optimization_reason() const {
return DisabledOptimizationReasonBits::decode(flags(kRelaxedLoad));
}
@@ -414,16 +404,16 @@ bool SharedFunctionInfo::IsDontAdaptArguments() const {
bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
-ScopeInfo SharedFunctionInfo::scope_info(AcquireLoadTag tag) const {
- Object maybe_scope_info = name_or_scope_info(tag);
- if (maybe_scope_info.IsScopeInfo()) {
+DEF_ACQUIRE_GETTER(SharedFunctionInfo, scope_info, ScopeInfo) {
+ Object maybe_scope_info = name_or_scope_info(cage_base, kAcquireLoad);
+ if (maybe_scope_info.IsScopeInfo(cage_base)) {
return ScopeInfo::cast(maybe_scope_info);
}
return GetReadOnlyRoots().empty_scope_info();
}
-ScopeInfo SharedFunctionInfo::scope_info() const {
- return scope_info(kAcquireLoad);
+DEF_GETTER(SharedFunctionInfo, scope_info, ScopeInfo) {
+ return scope_info(cage_base, kAcquireLoad);
}
void SharedFunctionInfo::SetScopeInfo(ScopeInfo scope_info,
@@ -581,7 +571,7 @@ BytecodeArray SharedFunctionInfo::GetBytecodeArray(IsolateT* isolate) const {
BytecodeArray SharedFunctionInfo::GetActiveBytecodeArray() const {
Object data = function_data(kAcquireLoad);
if (data.IsCodeT()) {
- Code baseline_code = FromCodeT(CodeT::cast(data));
+ CodeT baseline_code = CodeT::cast(data);
data = baseline_code.bytecode_or_interpreter_data();
}
if (data.IsBytecodeArray()) {
@@ -626,7 +616,7 @@ bool SharedFunctionInfo::ShouldFlushCode(
// called by the concurrent marker.
Object data = function_data(kAcquireLoad);
if (data.IsCodeT()) {
- Code baseline_code = FromCodeT(CodeT::cast(data));
+ CodeT baseline_code = CodeT::cast(data);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
// If baseline code flushing isn't enabled and we have baseline data on SFI
// we cannot flush baseline / bytecode.
@@ -646,7 +636,7 @@ bool SharedFunctionInfo::ShouldFlushCode(
return bytecode.IsOld();
}
-Code SharedFunctionInfo::InterpreterTrampoline() const {
+CodeT SharedFunctionInfo::InterpreterTrampoline() const {
DCHECK(HasInterpreterData());
return interpreter_data().interpreter_trampoline();
}
@@ -654,7 +644,7 @@ Code SharedFunctionInfo::InterpreterTrampoline() const {
bool SharedFunctionInfo::HasInterpreterData() const {
Object data = function_data(kAcquireLoad);
if (data.IsCodeT()) {
- Code baseline_code = FromCodeT(CodeT::cast(data));
+ CodeT baseline_code = CodeT::cast(data);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
data = baseline_code.bytecode_or_interpreter_data();
}
@@ -665,7 +655,7 @@ InterpreterData SharedFunctionInfo::interpreter_data() const {
DCHECK(HasInterpreterData());
Object data = function_data(kAcquireLoad);
if (data.IsCodeT()) {
- Code baseline_code = FromCodeT(CodeT::cast(data));
+ CodeT baseline_code = CodeT::cast(data);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
data = baseline_code.bytecode_or_interpreter_data();
}
@@ -682,21 +672,21 @@ void SharedFunctionInfo::set_interpreter_data(
bool SharedFunctionInfo::HasBaselineCode() const {
Object data = function_data(kAcquireLoad);
if (data.IsCodeT()) {
- DCHECK_EQ(FromCodeT(CodeT::cast(data)).kind(), CodeKind::BASELINE);
+ DCHECK_EQ(CodeT::cast(data).kind(), CodeKind::BASELINE);
return true;
}
return false;
}
-Code SharedFunctionInfo::baseline_code(AcquireLoadTag) const {
+CodeT SharedFunctionInfo::baseline_code(AcquireLoadTag) const {
DCHECK(HasBaselineCode());
- return FromCodeT(CodeT::cast(function_data(kAcquireLoad)));
+ return CodeT::cast(function_data(kAcquireLoad));
}
-void SharedFunctionInfo::set_baseline_code(Code baseline_code,
+void SharedFunctionInfo::set_baseline_code(CodeT baseline_code,
ReleaseStoreTag) {
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
- set_function_data(ToCodeT(baseline_code), kReleaseStore);
+ set_function_data(baseline_code, kReleaseStore);
}
void SharedFunctionInfo::FlushBaselineCode() {
@@ -804,6 +794,17 @@ bool SharedFunctionInfo::HasUncompiledDataWithoutPreparseData() const {
return function_data(kAcquireLoad).IsUncompiledDataWithoutPreparseData();
}
+void SharedFunctionInfo::ClearUncompiledDataJobPointer() {
+ UncompiledData uncompiled_data = this->uncompiled_data();
+ if (uncompiled_data.IsUncompiledDataWithPreparseDataAndJob()) {
+ UncompiledDataWithPreparseDataAndJob::cast(uncompiled_data)
+ .set_job(kNullAddress);
+ } else if (uncompiled_data.IsUncompiledDataWithoutPreparseDataWithJob()) {
+ UncompiledDataWithoutPreparseDataWithJob::cast(uncompiled_data)
+ .set_job(kNullAddress);
+ }
+}
+
void SharedFunctionInfo::ClearPreparseData() {
DCHECK(HasUncompiledDataWithPreparseData());
UncompiledDataWithPreparseData data = uncompiled_data_with_preparse_data();
diff --git a/deps/v8/src/objects/shared-function-info.cc b/deps/v8/src/objects/shared-function-info.cc
index 4354a2af28..a62688c6ee 100644
--- a/deps/v8/src/objects/shared-function-info.cc
+++ b/deps/v8/src/objects/shared-function-info.cc
@@ -10,6 +10,7 @@
#include "src/codegen/compiler.h"
#include "src/common/globals.h"
#include "src/diagnostics/code-tracer.h"
+#include "src/execution/isolate-utils.h"
#include "src/objects/shared-function-info-inl.h"
#include "src/strings/string-builder-inl.h"
@@ -67,7 +68,7 @@ void SharedFunctionInfo::Init(ReadOnlyRoots ro_roots, int unique_id) {
clear_padding();
}
-Code SharedFunctionInfo::GetCode() const {
+CodeT SharedFunctionInfo::GetCode() const {
// ======
// NOTE: This chain of checks MUST be kept in sync with the equivalent CSA
// GetSharedFunctionInfoCode method in code-stub-assembler.cc.
@@ -78,50 +79,50 @@ Code SharedFunctionInfo::GetCode() const {
if (data.IsSmi()) {
// Holding a Smi means we are a builtin.
DCHECK(HasBuiltinId());
- return isolate->builtins()->code(builtin_id());
+ return isolate->builtins()->codet(builtin_id());
}
if (data.IsBytecodeArray()) {
// Having a bytecode array means we are a compiled, interpreted function.
DCHECK(HasBytecodeArray());
- return isolate->builtins()->code(Builtin::kInterpreterEntryTrampoline);
+ return isolate->builtins()->codet(Builtin::kInterpreterEntryTrampoline);
}
if (data.IsCodeT()) {
// Having baseline Code means we are a compiled, baseline function.
DCHECK(HasBaselineCode());
- return FromCodeT(CodeT::cast(data));
+ return CodeT::cast(data);
}
#if V8_ENABLE_WEBASSEMBLY
if (data.IsAsmWasmData()) {
// Having AsmWasmData means we are an asm.js/wasm function.
DCHECK(HasAsmWasmData());
- return isolate->builtins()->code(Builtin::kInstantiateAsmJs);
+ return isolate->builtins()->codet(Builtin::kInstantiateAsmJs);
}
if (data.IsWasmExportedFunctionData()) {
// Having a WasmExportedFunctionData means the code is in there.
DCHECK(HasWasmExportedFunctionData());
- return wasm_exported_function_data().wrapper_code();
+ return ToCodeT(wasm_exported_function_data().wrapper_code());
}
if (data.IsWasmJSFunctionData()) {
- return wasm_js_function_data().wrapper_code();
+ return ToCodeT(wasm_js_function_data().wrapper_code());
}
if (data.IsWasmCapiFunctionData()) {
- return wasm_capi_function_data().wrapper_code();
+ return ToCodeT(wasm_capi_function_data().wrapper_code());
}
#endif // V8_ENABLE_WEBASSEMBLY
if (data.IsUncompiledData()) {
// Having uncompiled data (with or without scope) means we need to compile.
DCHECK(HasUncompiledData());
- return isolate->builtins()->code(Builtin::kCompileLazy);
+ return isolate->builtins()->codet(Builtin::kCompileLazy);
}
if (data.IsFunctionTemplateInfo()) {
// Having a function template info means we are an API function.
DCHECK(IsApiFunction());
- return isolate->builtins()->code(Builtin::kHandleApiCall);
+ return isolate->builtins()->codet(Builtin::kHandleApiCall);
}
if (data.IsInterpreterData()) {
- Code code = InterpreterTrampoline();
- DCHECK(code.IsCode());
- DCHECK(code.is_interpreter_trampoline_builtin());
+ CodeT code = InterpreterTrampoline();
+ DCHECK(code.IsCodeT());
+ DCHECK(FromCodeT(code).is_interpreter_trampoline_builtin());
return code;
}
UNREACHABLE();
@@ -225,6 +226,35 @@ void SharedFunctionInfo::SetScript(ReadOnlyRoots roots,
set_script(script_object);
}
+void SharedFunctionInfo::CopyFrom(SharedFunctionInfo other) {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ set_function_data(other.function_data(cage_base, kAcquireLoad),
+ kReleaseStore);
+ set_name_or_scope_info(other.name_or_scope_info(cage_base, kAcquireLoad),
+ kReleaseStore);
+ set_outer_scope_info_or_feedback_metadata(
+ other.outer_scope_info_or_feedback_metadata(cage_base));
+ set_script_or_debug_info(other.script_or_debug_info(cage_base, kAcquireLoad),
+ kReleaseStore);
+
+ set_length(other.length());
+ set_formal_parameter_count(other.formal_parameter_count());
+ set_function_token_offset(other.function_token_offset());
+ set_expected_nof_properties(other.expected_nof_properties());
+ set_flags2(other.flags2());
+ set_flags(other.flags(kRelaxedLoad), kRelaxedStore);
+ set_function_literal_id(other.function_literal_id());
+#if V8_SFI_HAS_UNIQUE_ID
+ set_unique_id(other.unique_id());
+#endif
+
+ // This should now be byte-for-byte identical to the input.
+ DCHECK_EQ(memcmp(reinterpret_cast<void*>(address()),
+ reinterpret_cast<void*>(other.address()),
+ SharedFunctionInfo::kSize),
+ 0);
+}
+
bool SharedFunctionInfo::HasBreakInfo() const {
if (!HasDebugInfo()) return false;
DebugInfo info = GetDebugInfo();
@@ -382,19 +412,19 @@ Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony(
DCHECK(!shared->name_should_print_as_anonymous());
IncrementalStringBuilder builder(isolate);
- builder.AppendCString("function ");
+ builder.AppendCStringLiteral("function ");
builder.AppendString(Handle<String>(shared->Name(), isolate));
- builder.AppendCString("(");
+ builder.AppendCharacter('(');
Handle<FixedArray> args(Script::cast(shared->script()).wrapped_arguments(),
isolate);
int argc = args->length();
for (int i = 0; i < argc; i++) {
- if (i > 0) builder.AppendCString(", ");
+ if (i > 0) builder.AppendCStringLiteral(", ");
builder.AppendString(Handle<String>(String::cast(args->get(i)), isolate));
}
- builder.AppendCString(") {\n");
+ builder.AppendCStringLiteral(") {\n");
builder.AppendString(source);
- builder.AppendCString("\n}");
+ builder.AppendCStringLiteral("\n}");
return builder.Finish().ToHandleChecked();
}
@@ -499,8 +529,8 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
// For lazy parsed functions, the following flags will be inaccurate since we
// don't have the information yet. They're set later in
- // SetSharedFunctionFlagsFromLiteral (compiler.cc), when the function is
- // really parsed and compiled.
+ // UpdateSharedFunctionFlagsAfterCompilation (compiler.cc), when the function
+ // is really parsed and compiled.
if (lit->ShouldEagerCompile()) {
shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
shared_info->UpdateAndFinalizeExpectedNofPropertiesFromEstimate(lit);
@@ -519,13 +549,25 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
if (scope_data != nullptr) {
Handle<PreparseData> preparse_data = scope_data->Serialize(isolate);
- data = isolate->factory()->NewUncompiledDataWithPreparseData(
- lit->GetInferredName(isolate), lit->start_position(),
- lit->end_position(), preparse_data);
+ if (lit->should_parallel_compile()) {
+ data = isolate->factory()->NewUncompiledDataWithPreparseDataAndJob(
+ lit->GetInferredName(isolate), lit->start_position(),
+ lit->end_position(), preparse_data);
+ } else {
+ data = isolate->factory()->NewUncompiledDataWithPreparseData(
+ lit->GetInferredName(isolate), lit->start_position(),
+ lit->end_position(), preparse_data);
+ }
} else {
- data = isolate->factory()->NewUncompiledDataWithoutPreparseData(
- lit->GetInferredName(isolate), lit->start_position(),
- lit->end_position());
+ if (lit->should_parallel_compile()) {
+ data = isolate->factory()->NewUncompiledDataWithoutPreparseDataWithJob(
+ lit->GetInferredName(isolate), lit->start_position(),
+ lit->end_position());
+ } else {
+ data = isolate->factory()->NewUncompiledDataWithoutPreparseData(
+ lit->GetInferredName(isolate), lit->start_position(),
+ lit->end_position());
+ }
}
shared_info->set_uncompiled_data(*data);
@@ -671,6 +713,19 @@ void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
}
// static
+void SharedFunctionInfo::EnsureBytecodeArrayAvailable(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
+ IsCompiledScope* is_compiled_scope, CreateSourcePositions flag) {
+ if (!shared_info->HasBytecodeArray()) {
+ if (!Compiler::Compile(isolate, shared_info, Compiler::CLEAR_EXCEPTION,
+ is_compiled_scope, flag)) {
+ FATAL("Failed to compile shared info that was already compiled before");
+ }
+ DCHECK(shared_info->GetBytecodeArray(isolate).HasSourcePositionTable());
+ }
+}
+
+// static
void SharedFunctionInfo::EnsureSourcePositionsAvailable(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info) {
if (shared_info->CanCollectSourcePosition(isolate)) {
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index e701587f21..f7c27455e1 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -49,6 +49,10 @@ using FunctionSig = Signature<ValueType>;
#include "torque-generated/src/objects/shared-function-info-tq.inc"
+// Defines whether the source positions should be created during function
+// compilation.
+enum class CreateSourcePositions { kNo, kYes };
+
// Data collected by the pre-parser storing information about scopes and inner
// functions.
//
@@ -141,14 +145,37 @@ class UncompiledDataWithPreparseData
TQ_OBJECT_CONSTRUCTORS(UncompiledDataWithPreparseData)
};
+// Class representing data for an uncompiled function that does not have any
+// data from the pre-parser, either because it's a leaf function or because the
+// pre-parser bailed out, but has a job pointer.
+class UncompiledDataWithoutPreparseDataWithJob
+ : public TorqueGeneratedUncompiledDataWithoutPreparseDataWithJob<
+ UncompiledDataWithoutPreparseDataWithJob,
+ UncompiledDataWithoutPreparseData> {
+ public:
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(UncompiledDataWithoutPreparseDataWithJob)
+};
+
+// Class representing data for an uncompiled function that has pre-parsed scope
+// data and a job pointer.
+class UncompiledDataWithPreparseDataAndJob
+ : public TorqueGeneratedUncompiledDataWithPreparseDataAndJob<
+ UncompiledDataWithPreparseDataAndJob,
+ UncompiledDataWithPreparseData> {
+ public:
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(UncompiledDataWithPreparseDataAndJob)
+};
+
class InterpreterData
: public TorqueGeneratedInterpreterData<InterpreterData, Struct> {
public:
- DECL_ACCESSORS(interpreter_trampoline, Code)
+ using BodyDescriptor = StructBodyDescriptor;
private:
- DECL_ACCESSORS(raw_interpreter_trampoline, CodeT)
-
TQ_OBJECT_CONSTRUCTORS(InterpreterData)
};
@@ -176,7 +203,7 @@ class SharedFunctionInfo
inline void SetName(String name);
// Get the code object which represents the execution of this function.
- V8_EXPORT_PRIVATE Code GetCode() const;
+ V8_EXPORT_PRIVATE CodeT GetCode() const;
// Get the abstract code associated with the function, which will either be
// a Code object or a BytecodeArray.
@@ -194,6 +221,11 @@ class SharedFunctionInfo
int function_literal_id,
bool reset_preparsed_scope_data = true);
+ // Copy the data from another SharedFunctionInfo. Used for copying data into
+ // and out of a placeholder SharedFunctionInfo, for off-thread compilation
+ // which is not allowed to touch a main-thread-visible SharedFunctionInfo.
+ void CopyFrom(SharedFunctionInfo other);
+
// Layout description of the optimized code map.
static const int kEntriesStart = 0;
static const int kContextOffset = 0;
@@ -303,13 +335,13 @@ class SharedFunctionInfo
inline BytecodeArray GetBytecodeArray(IsolateT* isolate) const;
inline void set_bytecode_array(BytecodeArray bytecode);
- inline Code InterpreterTrampoline() const;
+ inline CodeT InterpreterTrampoline() const;
inline bool HasInterpreterData() const;
inline InterpreterData interpreter_data() const;
inline void set_interpreter_data(InterpreterData interpreter_data);
inline bool HasBaselineCode() const;
- inline Code baseline_code(AcquireLoadTag) const;
- inline void set_baseline_code(Code baseline_code, ReleaseStoreTag);
+ inline CodeT baseline_code(AcquireLoadTag) const;
+ inline void set_baseline_code(CodeT baseline_code, ReleaseStoreTag);
inline void FlushBaselineCode();
inline BytecodeArray GetActiveBytecodeArray() const;
inline void SetActiveBytecodeArray(BytecodeArray bytecode);
@@ -344,6 +376,7 @@ class SharedFunctionInfo
inline void set_uncompiled_data_with_preparse_data(
UncompiledDataWithPreparseData data);
inline bool HasUncompiledDataWithoutPreparseData() const;
+ inline void ClearUncompiledDataJobPointer();
// Clear out pre-parsed scope data from UncompiledDataWithPreparseData,
// turning it into UncompiledDataWithoutPreparseData.
@@ -480,7 +513,7 @@ class SharedFunctionInfo
inline bool optimization_disabled() const;
// The reason why optimization was disabled.
- inline BailoutReason disable_optimization_reason() const;
+ inline BailoutReason disabled_optimization_reason() const;
// Disable (further) attempted optimization of all functions sharing this
// shared function info.
@@ -554,7 +587,8 @@ class SharedFunctionInfo
// TODO(caitp): make this a flag set during parsing
inline bool has_simple_parameters();
- // Initialize a SharedFunctionInfo from a parsed function literal.
+ // Initialize a SharedFunctionInfo from a parsed or preparsed function
+ // literal.
template <typename IsolateT>
static void InitFromFunctionLiteral(IsolateT* isolate,
Handle<SharedFunctionInfo> shared_info,
@@ -570,6 +604,11 @@ class SharedFunctionInfo
void SetFunctionTokenPosition(int function_token_position,
int start_position);
+ static void EnsureBytecodeArrayAvailable(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
+ IsCompiledScope* is_compiled,
+ CreateSourcePositions flag = CreateSourcePositions::kNo);
+
inline bool CanCollectSourcePosition(Isolate* isolate);
static void EnsureSourcePositionsAvailable(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info);
@@ -630,7 +669,7 @@ class SharedFunctionInfo
STATIC_ASSERT(BailoutReason::kLastErrorMessage <=
DisabledOptimizationReasonBits::kMax);
- STATIC_ASSERT(kLastFunctionKind <= FunctionKindBits::kMax);
+ STATIC_ASSERT(FunctionKind::kLastFunctionKind <= FunctionKindBits::kMax);
STATIC_ASSERT(FunctionSyntaxKind::kLastFunctionSyntaxKind <=
FunctionSyntaxKindBits::kMax);
diff --git a/deps/v8/src/objects/shared-function-info.tq b/deps/v8/src/objects/shared-function-info.tq
index 4f80f568dc..ab6eec747c 100644
--- a/deps/v8/src/objects/shared-function-info.tq
+++ b/deps/v8/src/objects/shared-function-info.tq
@@ -44,15 +44,13 @@ bitfield struct SharedFunctionInfoFlags2 extends uint8 {
has_static_private_methods_or_accessors: bool: 1 bit;
}
-@export
-@customCppClass
-@customMap // Just to place the map at the beginning of the roots array.
-class SharedFunctionInfo extends HeapObject {
+@generateBodyDescriptor
+extern class SharedFunctionInfo extends HeapObject {
// function_data field is treated as a custom weak pointer. We visit this
// field as a weak pointer if there is aged bytecode. If there is no bytecode
// or if the bytecode is young then we treat it as a strong pointer. This is
// done to support flushing of bytecode.
- weak function_data: Object;
+ @customWeakMarking function_data: Object;
name_or_scope_info: String|NoSharedNameSentinel|ScopeInfo;
outer_scope_info_or_feedback_metadata: HeapObject;
script_or_debug_info: Script|DebugInfo|Undefined;
@@ -118,25 +116,43 @@ macro IsSharedFunctionInfoDontAdaptArguments(sfi: SharedFunctionInfo): bool {
}
@abstract
-@export
-@customCppClass
-class UncompiledData extends HeapObject {
+extern class UncompiledData extends HeapObject {
inferred_name: String;
start_position: int32;
end_position: int32;
}
-@export
-@customCppClass
-class UncompiledDataWithoutPreparseData extends UncompiledData {
+@generateBodyDescriptor
+@generateUniqueMap
+@generateFactoryFunction
+extern class UncompiledDataWithoutPreparseData extends UncompiledData {
}
-@export
-@customCppClass
-class UncompiledDataWithPreparseData extends UncompiledData {
+@generateBodyDescriptor
+@generateUniqueMap
+@generateFactoryFunction
+extern class UncompiledDataWithPreparseData extends UncompiledData {
preparse_data: PreparseData;
}
+@generateBodyDescriptor
+@generateUniqueMap
+@generateFactoryFunction
+extern class UncompiledDataWithoutPreparseDataWithJob extends
+ UncompiledDataWithoutPreparseData {
+ // TODO(v8:10391): Define the field as ExternalPointer or move jobs into cage.
+ job: RawPtr;
+}
+
+@generateBodyDescriptor
+@generateUniqueMap
+@generateFactoryFunction
+extern class UncompiledDataWithPreparseDataAndJob extends
+ UncompiledDataWithPreparseData {
+ // TODO(v8:10391): Define the field as ExternalPointer or move jobs into cage.
+ job: RawPtr;
+}
+
@export
class OnHeapBasicBlockProfilerData extends HeapObject {
block_ids: ByteArray; // Stored as 4-byte ints
diff --git a/deps/v8/src/objects/source-text-module.cc b/deps/v8/src/objects/source-text-module.cc
index cf1773f2d6..3d8056bc5f 100644
--- a/deps/v8/src/objects/source-text-module.cc
+++ b/deps/v8/src/objects/source-text-module.cc
@@ -576,15 +576,15 @@ void SourceTextModule::FetchStarExports(Isolate* isolate,
// the name to undefined instead of a Cell.
Handle<ObjectHashTable> requested_exports(requested_module->exports(),
isolate);
- for (InternalIndex i : requested_exports->IterateEntries()) {
+ for (InternalIndex index : requested_exports->IterateEntries()) {
Object key;
- if (!requested_exports->ToKey(roots, i, &key)) continue;
+ if (!requested_exports->ToKey(roots, index, &key)) continue;
Handle<String> name(String::cast(key), isolate);
if (name->Equals(roots.default_string())) continue;
if (!exports->Lookup(name).IsTheHole(roots)) continue;
- Handle<Cell> cell(Cell::cast(requested_exports->ValueAt(i)), isolate);
+ Handle<Cell> cell(Cell::cast(requested_exports->ValueAt(index)), isolate);
auto insert_result = more_exports.insert(std::make_pair(name, cell));
if (!insert_result.second) {
auto it = insert_result.first;
@@ -683,8 +683,15 @@ MaybeHandle<JSObject> SourceTextModule::GetImportMeta(
return Handle<JSObject>::cast(import_meta);
}
-MaybeHandle<Object> SourceTextModule::EvaluateMaybeAsync(
+MaybeHandle<Object> SourceTextModule::Evaluate(
Isolate* isolate, Handle<SourceTextModule> module) {
+ CHECK(module->status() == kLinked || module->status() == kEvaluated);
+
+ // 5. Let stack be a new empty List.
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ ZoneForwardList<Handle<SourceTextModule>> stack(&zone);
+ unsigned dfs_index = 0;
+
// 6. Let capability be ! NewPromiseCapability(%Promise%).
Handle<JSPromise> capability = isolate->factory()->NewJSPromise();
@@ -692,18 +699,30 @@ MaybeHandle<Object> SourceTextModule::EvaluateMaybeAsync(
module->set_top_level_capability(*capability);
DCHECK(module->top_level_capability().IsJSPromise());
+ // 8. Let result be InnerModuleEvaluation(module, stack, 0).
// 9. If result is an abrupt completion, then
Handle<Object> unused_result;
- if (!Evaluate(isolate, module).ToHandle(&unused_result)) {
+ if (!InnerModuleEvaluation(isolate, module, &stack, &dfs_index)
+ .ToHandle(&unused_result)) {
+ // a. For each Cyclic Module Record m in stack, do
+ for (auto& descendant : stack) {
+ // i. Assert: m.[[Status]] is "evaluating".
+ CHECK_EQ(descendant->status(), kEvaluating);
+ // ii. Set m.[[Status]] to "evaluated".
+ // iii. Set m.[[EvaluationError]] to result.
+ Module::RecordErrorUsingPendingException(isolate, descendant);
+ }
+
// If the exception was a termination exception, rejecting the promise
// would resume execution, and our API contract is to return an empty
// handle. The module's status should be set to kErrored and the
// exception field should be set to `null`.
if (!isolate->is_catchable_by_javascript(isolate->pending_exception())) {
- DCHECK_EQ(module->status(), kErrored);
- DCHECK_EQ(module->exception(), *isolate->factory()->null_value());
+ CHECK_EQ(module->status(), kErrored);
+ CHECK_EQ(module->exception(), *isolate->factory()->null_value());
return {};
}
+ CHECK_EQ(module->exception(), isolate->pending_exception());
// d. Perform ! Call(capability.[[Reject]], undefined,
// «result.[[Value]]»).
@@ -721,49 +740,13 @@ MaybeHandle<Object> SourceTextModule::EvaluateMaybeAsync(
JSPromise::Resolve(capability, isolate->factory()->undefined_value())
.ToHandleChecked();
}
- }
-
- // 11. Return capability.[[Promise]].
- return capability;
-}
-
-MaybeHandle<Object> SourceTextModule::Evaluate(
- Isolate* isolate, Handle<SourceTextModule> module) {
- // Evaluate () Concrete Method continued from EvaluateMaybeAsync.
- CHECK(module->status() == kLinked || module->status() == kEvaluated);
-
- // 5. Let stack be a new empty List.
- Zone zone(isolate->allocator(), ZONE_NAME);
- ZoneForwardList<Handle<SourceTextModule>> stack(&zone);
- unsigned dfs_index = 0;
-
- // 8. Let result be InnerModuleEvaluation(module, stack, 0).
- // 9. If result is an abrupt completion, then
- Handle<Object> result;
- if (!InnerModuleEvaluation(isolate, module, &stack, &dfs_index)
- .ToHandle(&result)) {
- // a. For each Cyclic Module Record m in stack, do
- for (auto& descendant : stack) {
- // i. Assert: m.[[Status]] is "evaluating".
- CHECK_EQ(descendant->status(), kEvaluating);
- // ii. Set m.[[Status]] to "evaluated".
- // iii. Set m.[[EvaluationError]] to result.
- Module::RecordErrorUsingPendingException(isolate, descendant);
- }
-#ifdef DEBUG
- if (isolate->is_catchable_by_javascript(isolate->pending_exception())) {
- CHECK_EQ(module->exception(), isolate->pending_exception());
- } else {
- CHECK_EQ(module->exception(), *isolate->factory()->null_value());
- }
-#endif // DEBUG
- } else {
- // 10. Otherwise,
// c. Assert: stack is empty.
DCHECK(stack.empty());
}
- return result;
+
+ // 11. Return capability.[[Promise]].
+ return capability;
}
void SourceTextModule::AsyncModuleExecutionFulfilled(
@@ -1008,20 +991,12 @@ MaybeHandle<Object> SourceTextModule::ExecuteModule(
isolate->native_context()->generator_next_internal(), isolate);
Handle<Object> result;
- // With top_level_await, we need to catch any exceptions and reject
- // the top level capability.
- if (FLAG_harmony_top_level_await) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- Execution::TryCall(isolate, resume, generator, 0, nullptr,
- Execution::MessageHandling::kKeepPending, nullptr,
- false),
- Object);
- } else {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, resume, generator, 0, nullptr), Object);
- }
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::TryCall(isolate, resume, generator, 0, nullptr,
+ Execution::MessageHandling::kKeepPending, nullptr,
+ false),
+ Object);
DCHECK(JSIteratorResult::cast(*result).done().BooleanValue(isolate));
return handle(JSIteratorResult::cast(*result).value(), isolate);
}
diff --git a/deps/v8/src/objects/source-text-module.h b/deps/v8/src/objects/source-text-module.h
index bb5bd5d796..9894973d9d 100644
--- a/deps/v8/src/objects/source-text-module.h
+++ b/deps/v8/src/objects/source-text-module.h
@@ -17,6 +17,7 @@ namespace v8 {
namespace internal {
class UnorderedModuleSet;
+class StructBodyDescriptor;
#include "torque-generated/src/objects/source-text-module-tq.inc"
@@ -178,10 +179,6 @@ class SourceTextModule
AsyncParentCompletionSet* exec_list);
// Implementation of spec concrete method Evaluate.
- static V8_WARN_UNUSED_RESULT MaybeHandle<Object> EvaluateMaybeAsync(
- Isolate* isolate, Handle<SourceTextModule> module);
-
- // Continued implementation of spec concrete method Evaluate.
static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
Isolate* isolate, Handle<SourceTextModule> module);
@@ -276,6 +273,8 @@ class ModuleRequest
// a single assertion.
static const size_t kAssertionEntrySize = 3;
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(ModuleRequest)
};
@@ -292,6 +291,8 @@ class SourceTextModuleInfoEntry
Handle<PrimitiveHeapObject> import_name, int module_request,
int cell_index, int beg_pos, int end_pos);
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(SourceTextModuleInfoEntry)
};
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index 71357816d7..62f97afd19 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -195,18 +195,18 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
if (sourceURL->IsString()) return Handle<String>::cast(sourceURL);
IncrementalStringBuilder builder(isolate);
- builder.AppendCString("eval at ");
+ builder.AppendCStringLiteral("eval at ");
if (script->has_eval_from_shared()) {
Handle<SharedFunctionInfo> eval_shared(script->eval_from_shared(), isolate);
auto eval_name = SharedFunctionInfo::DebugName(eval_shared);
if (eval_name->length() != 0) {
builder.AppendString(eval_name);
} else {
- builder.AppendCString("<anonymous>");
+ builder.AppendCStringLiteral("<anonymous>");
}
if (eval_shared->script().IsScript()) {
Handle<Script> eval_script(Script::cast(eval_shared->script()), isolate);
- builder.AppendCString(" (");
+ builder.AppendCStringLiteral(" (");
if (eval_script->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
// Eval script originated from another eval.
Handle<String> str;
@@ -222,19 +222,19 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
if (Script::GetPositionInfo(eval_script,
Script::GetEvalPosition(isolate, script),
&info, Script::NO_OFFSET)) {
- builder.AppendCString(":");
+ builder.AppendCharacter(':');
builder.AppendInt(info.line + 1);
- builder.AppendCString(":");
+ builder.AppendCharacter(':');
builder.AppendInt(info.column + 1);
}
} else {
- builder.AppendCString("unknown source");
+ builder.AppendCStringLiteral("unknown source");
}
}
- builder.AppendCString(")");
+ builder.AppendCharacter(')');
}
} else {
- builder.AppendCString("<anonymous>");
+ builder.AppendCStringLiteral("<anonymous>");
}
return builder.Finish().ToHandleChecked();
}
@@ -581,7 +581,8 @@ void AppendFileLocation(Isolate* isolate, Handle<StackFrameInfo> frame,
if (!script_name_or_source_url->IsString() && frame->IsEval()) {
builder->AppendString(
Handle<String>::cast(StackFrameInfo::GetEvalOrigin(frame)));
- builder->AppendCString(", "); // Expecting source position to follow.
+ // Expecting source position to follow.
+ builder->AppendCStringLiteral(", ");
}
if (IsNonEmptyString(script_name_or_source_url)) {
@@ -590,7 +591,7 @@ void AppendFileLocation(Isolate* isolate, Handle<StackFrameInfo> frame,
// Source code does not originate from a file and is not native, but we
// can still get the source position inside the source string, e.g. in
// an eval string.
- builder->AppendCString("<anonymous>");
+ builder->AppendCStringLiteral("<anonymous>");
}
int line_number = StackFrameInfo::GetLineNumber(frame);
@@ -665,7 +666,7 @@ void AppendMethodCall(Isolate* isolate, Handle<StackFrameInfo> frame,
if (IsNonEmptyString(method_name)) {
Handle<String> method_string = Handle<String>::cast(method_name);
if (!StringEndsWithMethodName(isolate, function_string, method_string)) {
- builder->AppendCString(" [as ");
+ builder->AppendCStringLiteral(" [as ");
builder->AppendString(method_string);
builder->AppendCharacter(']');
}
@@ -678,7 +679,7 @@ void AppendMethodCall(Isolate* isolate, Handle<StackFrameInfo> frame,
if (IsNonEmptyString(method_name)) {
builder->AppendString(Handle<String>::cast(method_name));
} else {
- builder->AppendCString("<anonymous>");
+ builder->AppendCStringLiteral("<anonymous>");
}
}
}
@@ -687,24 +688,24 @@ void SerializeJSStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
IncrementalStringBuilder* builder) {
Handle<Object> function_name = StackFrameInfo::GetFunctionName(frame);
if (frame->IsAsync()) {
- builder->AppendCString("async ");
+ builder->AppendCStringLiteral("async ");
if (frame->IsPromiseAll() || frame->IsPromiseAny()) {
- builder->AppendCString("Promise.");
+ builder->AppendCStringLiteral("Promise.");
builder->AppendString(Handle<String>::cast(function_name));
- builder->AppendCString(" (index ");
+ builder->AppendCStringLiteral(" (index ");
builder->AppendInt(StackFrameInfo::GetSourcePosition(frame));
- builder->AppendCString(")");
+ builder->AppendCharacter(')');
return;
}
}
if (frame->IsMethodCall()) {
AppendMethodCall(isolate, frame, builder);
} else if (frame->IsConstructor()) {
- builder->AppendCString("new ");
+ builder->AppendCStringLiteral("new ");
if (IsNonEmptyString(function_name)) {
builder->AppendString(Handle<String>::cast(function_name));
} else {
- builder->AppendCString("<anonymous>");
+ builder->AppendCStringLiteral("<anonymous>");
}
} else if (IsNonEmptyString(function_name)) {
builder->AppendString(Handle<String>::cast(function_name));
@@ -712,9 +713,9 @@ void SerializeJSStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
AppendFileLocation(isolate, frame, builder);
return;
}
- builder->AppendCString(" (");
+ builder->AppendCStringLiteral(" (");
AppendFileLocation(isolate, frame, builder);
- builder->AppendCString(")");
+ builder->AppendCharacter(')');
}
#if V8_ENABLE_WEBASSEMBLY
@@ -729,32 +730,32 @@ void SerializeWasmStackFrame(Isolate* isolate, Handle<StackFrameInfo> frame,
} else {
builder->AppendString(Handle<String>::cast(module_name));
if (!function_name->IsNull()) {
- builder->AppendCString(".");
+ builder->AppendCharacter('.');
builder->AppendString(Handle<String>::cast(function_name));
}
}
- builder->AppendCString(" (");
+ builder->AppendCStringLiteral(" (");
}
Handle<Object> url(frame->GetScriptNameOrSourceURL(), isolate);
if (IsNonEmptyString(url)) {
builder->AppendString(Handle<String>::cast(url));
} else {
- builder->AppendCString("<anonymous>");
+ builder->AppendCStringLiteral("<anonymous>");
}
- builder->AppendCString(":");
+ builder->AppendCharacter(':');
const int wasm_func_index = frame->GetWasmFunctionIndex();
- builder->AppendCString("wasm-function[");
+ builder->AppendCStringLiteral("wasm-function[");
builder->AppendInt(wasm_func_index);
- builder->AppendCString("]:");
+ builder->AppendCStringLiteral("]:");
char buffer[16];
SNPrintF(base::ArrayVector(buffer), "0x%x",
StackFrameInfo::GetColumnNumber(frame) - 1);
builder->AppendCString(buffer);
- if (has_name) builder->AppendCString(")");
+ if (has_name) builder->AppendCharacter(')');
}
#endif // V8_ENABLE_WEBASSEMBLY
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index ce23de26d4..dad792bee1 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -16,6 +16,7 @@ namespace internal {
class MessageLocation;
class WasmInstanceObject;
+class StructBodyDescriptor;
#include "torque-generated/src/objects/stack-frame-info-tq.inc"
@@ -49,8 +50,8 @@ class StackFrameInfo
// Used to signal that the requested field is unknown.
static constexpr int kUnknown = kNoSourcePosition;
- static int GetLineNumber(Handle<StackFrameInfo> info);
- static int GetColumnNumber(Handle<StackFrameInfo> info);
+ V8_EXPORT_PRIVATE static int GetLineNumber(Handle<StackFrameInfo> info);
+ V8_EXPORT_PRIVATE static int GetColumnNumber(Handle<StackFrameInfo> info);
static int GetEnclosingLineNumber(Handle<StackFrameInfo> info);
static int GetEnclosingColumnNumber(Handle<StackFrameInfo> info);
@@ -64,7 +65,8 @@ class StackFrameInfo
Object GetScriptSourceMappingURL() const;
static Handle<PrimitiveHeapObject> GetEvalOrigin(Handle<StackFrameInfo> info);
- static Handle<Object> GetFunctionName(Handle<StackFrameInfo> info);
+ V8_EXPORT_PRIVATE static Handle<Object> GetFunctionName(
+ Handle<StackFrameInfo> info);
static Handle<Object> GetMethodName(Handle<StackFrameInfo> info);
static Handle<Object> GetTypeName(Handle<StackFrameInfo> info);
@@ -86,6 +88,8 @@ class StackFrameInfo
static bool ComputeLocation(Handle<StackFrameInfo> info,
MessageLocation* location);
+ using BodyDescriptor = StructBodyDescriptor;
+
private:
// Bit position in the flag, from least significant bit position.
DEFINE_TORQUE_GENERATED_STACK_FRAME_INFO_FLAGS()
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index 70b63a9446..9a75dd2d06 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -158,6 +158,8 @@ bool StringShape::IsIndirect() const {
return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag;
}
+bool StringShape::IsDirect() const { return !IsIndirect(); }
+
bool StringShape::IsExternal() const {
return (type_ & kStringRepresentationMask) == kExternalStringTag;
}
@@ -170,6 +172,31 @@ bool StringShape::IsUncachedExternal() const {
return (type_ & kUncachedExternalStringMask) == kUncachedExternalStringTag;
}
+bool StringShape::IsShared() const {
+ // TODO(v8:12007): Set is_shared to true on internalized string when
+ // FLAG_shared_string_table is removed.
+ return (type_ & kSharedStringMask) == kSharedStringTag ||
+ (FLAG_shared_string_table && IsInternalized());
+}
+
+bool StringShape::CanMigrateInParallel() const {
+ switch (representation_encoding_and_shared_tag()) {
+ case kSeqOneByteStringTag | kSharedStringTag:
+ case kSeqTwoByteStringTag | kSharedStringTag:
+ // Shared SeqStrings can migrate to ThinStrings.
+ return true;
+ case kThinStringTag | kOneByteStringTag | kSharedStringTag:
+ case kThinStringTag | kTwoByteStringTag | kSharedStringTag:
+ // Shared ThinStrings do not migrate.
+ return false;
+ default:
+ // If you crashed here, you probably added a new shared string
+ // type. Explicitly handle all shared string cases above.
+ DCHECK(!IsShared());
+ return false;
+ }
+}
+
StringRepresentationTag StringShape::representation_tag() const {
uint32_t tag = (type_ & kStringRepresentationMask);
return static_cast<StringRepresentationTag>(tag);
@@ -179,45 +206,49 @@ uint32_t StringShape::encoding_tag() const {
return type_ & kStringEncodingMask;
}
-uint32_t StringShape::full_representation_tag() const {
- return (type_ & (kStringRepresentationMask | kStringEncodingMask));
+uint32_t StringShape::representation_and_encoding_tag() const {
+ return (type_ & (kStringRepresentationAndEncodingMask));
+}
+
+uint32_t StringShape::representation_encoding_and_shared_tag() const {
+ return (type_ & (kStringRepresentationEncodingAndSharedMask));
}
-STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) ==
- Internals::kFullStringRepresentationMask);
+STATIC_ASSERT((kStringRepresentationAndEncodingMask) ==
+ Internals::kStringRepresentationAndEncodingMask);
STATIC_ASSERT(static_cast<uint32_t>(kStringEncodingMask) ==
Internals::kStringEncodingMask);
bool StringShape::IsSequentialOneByte() const {
- return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
+ return representation_and_encoding_tag() == kSeqOneByteStringTag;
}
bool StringShape::IsSequentialTwoByte() const {
- return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag);
+ return representation_and_encoding_tag() == kSeqTwoByteStringTag;
}
bool StringShape::IsExternalOneByte() const {
- return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
+ return representation_and_encoding_tag() == kExternalOneByteStringTag;
}
-STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) ==
+STATIC_ASSERT(kExternalOneByteStringTag ==
Internals::kExternalOneByteRepresentationTag);
STATIC_ASSERT(v8::String::ONE_BYTE_ENCODING == kOneByteStringTag);
bool StringShape::IsExternalTwoByte() const {
- return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag);
+ return representation_and_encoding_tag() == kExternalTwoByteStringTag;
}
-STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
+STATIC_ASSERT(kExternalTwoByteStringTag ==
Internals::kExternalTwoByteRepresentationTag);
STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
template <typename TDispatcher, typename TResult, typename... TArgs>
inline TResult StringShape::DispatchToSpecificTypeWithoutCast(TArgs&&... args) {
- switch (full_representation_tag()) {
+ switch (representation_and_encoding_tag()) {
case kSeqStringTag | kOneByteStringTag:
return TDispatcher::HandleSeqOneByteString(std::forward<TArgs>(args)...);
case kSeqStringTag | kTwoByteStringTag:
@@ -499,23 +530,23 @@ bool String::IsEqualToImpl(
const Char* data = str.data();
while (true) {
int32_t type = string.map(cage_base).instance_type();
- switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
- case kSeqStringTag | kOneByteStringTag:
+ switch (type & kStringRepresentationAndEncodingMask) {
+ case kSeqOneByteStringTag:
return CompareCharsEqual(
SeqOneByteString::cast(string).GetChars(no_gc, access_guard) +
slice_offset,
data, len);
- case kSeqStringTag | kTwoByteStringTag:
+ case kSeqTwoByteStringTag:
return CompareCharsEqual(
SeqTwoByteString::cast(string).GetChars(no_gc, access_guard) +
slice_offset,
data, len);
- case kExternalStringTag | kOneByteStringTag:
+ case kExternalOneByteStringTag:
return CompareCharsEqual(
ExternalOneByteString::cast(string).GetChars(cage_base) +
slice_offset,
data, len);
- case kExternalStringTag | kTwoByteStringTag:
+ case kExternalTwoByteStringTag:
return CompareCharsEqual(
ExternalTwoByteString::cast(string).GetChars(cage_base) +
slice_offset,
@@ -604,20 +635,31 @@ const Char* String::GetChars(
Handle<String> String::Flatten(Isolate* isolate, Handle<String> string,
AllocationType allocation) {
- if (string->IsConsString()) {
- DCHECK(!string->InSharedHeap());
- Handle<ConsString> cons = Handle<ConsString>::cast(string);
- if (cons->IsFlat()) {
- string = handle(cons->first(), isolate);
- } else {
- return SlowFlatten(isolate, cons, allocation);
+ DisallowGarbageCollection no_gc; // Unhandlified code.
+ PtrComprCageBase cage_base(isolate);
+ String s = *string;
+ StringShape shape(s, cage_base);
+
+ // Shortcut already-flat strings.
+ if (V8_LIKELY(shape.IsDirect())) return string;
+
+ if (shape.IsCons()) {
+ DCHECK(!s.InSharedHeap());
+ ConsString cons = ConsString::cast(s);
+ if (!cons.IsFlat(isolate)) {
+ AllowGarbageCollection yes_gc;
+ return SlowFlatten(isolate, handle(cons, isolate), allocation);
}
+ s = cons.first(cage_base);
+ shape = StringShape(s, cage_base);
}
- if (string->IsThinString()) {
- string = handle(Handle<ThinString>::cast(string)->actual(), isolate);
- DCHECK(!string->IsConsString());
+
+ if (shape.IsThin()) {
+ s = ThinString::cast(s).actual(cage_base);
+ DCHECK(!s.IsConsString());
}
- return string;
+
+ return handle(s, isolate);
}
Handle<String> String::Flatten(LocalIsolate* isolate, Handle<String> string,
@@ -627,6 +669,80 @@ Handle<String> String::Flatten(LocalIsolate* isolate, Handle<String> string,
return string;
}
+// static
+base::Optional<String::FlatContent> String::TryGetFlatContentFromDirectString(
+ PtrComprCageBase cage_base, const DisallowGarbageCollection& no_gc,
+ String string, int offset, int length,
+ const SharedStringAccessGuardIfNeeded& access_guard) {
+ DCHECK_GE(offset, 0);
+ DCHECK_GE(length, 0);
+ DCHECK_LE(offset + length, string.length());
+ switch (StringShape{string, cage_base}.representation_and_encoding_tag()) {
+ case kSeqOneByteStringTag:
+ return FlatContent(
+ SeqOneByteString::cast(string).GetChars(no_gc, access_guard) + offset,
+ length, no_gc);
+ case kSeqTwoByteStringTag:
+ return FlatContent(
+ SeqTwoByteString::cast(string).GetChars(no_gc, access_guard) + offset,
+ length, no_gc);
+ case kExternalOneByteStringTag:
+ return FlatContent(
+ ExternalOneByteString::cast(string).GetChars(cage_base) + offset,
+ length, no_gc);
+ case kExternalTwoByteStringTag:
+ return FlatContent(
+ ExternalTwoByteString::cast(string).GetChars(cage_base) + offset,
+ length, no_gc);
+ default:
+ return {};
+ }
+ UNREACHABLE();
+}
+
+String::FlatContent String::GetFlatContent(
+ const DisallowGarbageCollection& no_gc) {
+#if DEBUG
+ // Check that this method is called only from the main thread.
+ {
+ Isolate* isolate;
+ // We don't have to check read only strings as those won't move.
+ DCHECK_IMPLIES(GetIsolateFromHeapObject(*this, &isolate),
+ ThreadId::Current() == isolate->thread_id());
+ }
+#endif
+
+ return GetFlatContent(no_gc, SharedStringAccessGuardIfNeeded::NotNeeded());
+}
+
+String::FlatContent String::GetFlatContent(
+ const DisallowGarbageCollection& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard) {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ base::Optional<FlatContent> flat_content = TryGetFlatContentFromDirectString(
+ cage_base, no_gc, *this, 0, length(), access_guard);
+ if (flat_content.has_value()) return flat_content.value();
+ return SlowGetFlatContent(no_gc, access_guard);
+}
+
+Handle<String> String::Share(Isolate* isolate, Handle<String> string) {
+ DCHECK(FLAG_shared_string_table);
+ MaybeHandle<Map> new_map;
+ switch (
+ isolate->factory()->ComputeSharingStrategyForString(string, &new_map)) {
+ case StringTransitionStrategy::kCopy:
+ return SlowShare(isolate, string);
+ case StringTransitionStrategy::kInPlace:
+ // A relaxed write is sufficient here, because at this point the string
+ // has not yet escaped the current thread.
+ DCHECK(string->InSharedHeap());
+ string->set_map_no_write_barrier(*new_map.ToHandleChecked());
+ return string;
+ case StringTransitionStrategy::kAlreadyTransitioned:
+ return string;
+ }
+}
+
uint16_t String::Get(int index) const {
DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(*this));
return GetImpl(index, GetPtrComprCageBase(*this),
@@ -680,7 +796,7 @@ void String::Set(int index, uint16_t value) {
DCHECK(index >= 0 && index < length());
DCHECK(StringShape(*this).IsSequential());
- return this->IsOneByteRepresentation()
+ return IsOneByteRepresentation()
? SeqOneByteString::cast(*this).SeqOneByteStringSet(index, value)
: SeqTwoByteString::cast(*this).SeqTwoByteStringSet(index, value);
}
@@ -689,13 +805,21 @@ bool String::IsFlat() const { return IsFlat(GetPtrComprCageBase(*this)); }
bool String::IsFlat(PtrComprCageBase cage_base) const {
if (!StringShape(*this, cage_base).IsCons()) return true;
- return ConsString::cast(*this).second(cage_base).length() == 0;
+ return ConsString::cast(*this).IsFlat(cage_base);
+}
+
+bool String::IsShared() const { return IsShared(GetPtrComprCageBase(*this)); }
+
+bool String::IsShared(PtrComprCageBase cage_base) const {
+ const bool result = StringShape(*this, cage_base).IsShared();
+ DCHECK_IMPLIES(result, InSharedHeap());
+ return result;
}
String String::GetUnderlying() const {
// Giving direct access to underlying string only makes sense if the
// wrapping string is already flattened.
- DCHECK(this->IsFlat());
+ DCHECK(IsFlat());
DCHECK(StringShape(*this).IsIndirect());
STATIC_ASSERT(static_cast<int>(ConsString::kFirstOffset) ==
static_cast<int>(SlicedString::kParentOffset));
@@ -723,30 +847,31 @@ ConsString String::VisitFlat(
DCHECK(offset <= length);
PtrComprCageBase cage_base = GetPtrComprCageBase(string);
while (true) {
- int32_t tag = StringShape(string, cage_base).full_representation_tag();
+ int32_t tag =
+ StringShape(string, cage_base).representation_and_encoding_tag();
switch (tag) {
- case kSeqStringTag | kOneByteStringTag:
+ case kSeqOneByteStringTag:
visitor->VisitOneByteString(
SeqOneByteString::cast(string).GetChars(no_gc, access_guard) +
slice_offset,
length - offset);
return ConsString();
- case kSeqStringTag | kTwoByteStringTag:
+ case kSeqTwoByteStringTag:
visitor->VisitTwoByteString(
SeqTwoByteString::cast(string).GetChars(no_gc, access_guard) +
slice_offset,
length - offset);
return ConsString();
- case kExternalStringTag | kOneByteStringTag:
+ case kExternalOneByteStringTag:
visitor->VisitOneByteString(
ExternalOneByteString::cast(string).GetChars(cage_base) +
slice_offset,
length - offset);
return ConsString();
- case kExternalStringTag | kTwoByteStringTag:
+ case kExternalTwoByteStringTag:
visitor->VisitTwoByteString(
ExternalTwoByteString::cast(string).GetChars(cage_base) +
slice_offset,
@@ -807,10 +932,22 @@ uint8_t SeqOneByteString::Get(
}
void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
- DCHECK(index >= 0 && index < length() && value <= kMaxOneByteCharCode);
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, length());
+ DCHECK_LE(value, kMaxOneByteCharCode);
WriteField<byte>(kHeaderSize + index * kCharSize, static_cast<byte>(value));
}
+void SeqOneByteString::SeqOneByteStringSetChars(int index,
+ const uint8_t* string,
+ int string_length) {
+ DCHECK_LE(0, index);
+ DCHECK_LT(index + string_length, length());
+ void* address =
+ reinterpret_cast<void*>(field_address(kHeaderSize + index * kCharSize));
+ memcpy(address, string, string_length);
+}
+
Address SeqOneByteString::GetCharsAddress() const {
return field_address(kHeaderSize);
}
@@ -871,19 +1008,34 @@ inline int SeqTwoByteString::AllocatedSize() {
return SizeFor(length(kAcquireLoad));
}
+// static
+bool SeqOneByteString::IsCompatibleMap(Map map, ReadOnlyRoots roots) {
+ return map == roots.one_byte_string_map() ||
+ map == roots.shared_one_byte_string_map();
+}
+
+// static
+bool SeqTwoByteString::IsCompatibleMap(Map map, ReadOnlyRoots roots) {
+ return map == roots.string_map() || map == roots.shared_string_map();
+}
+
void SlicedString::set_parent(String parent, WriteBarrierMode mode) {
DCHECK(parent.IsSeqString() || parent.IsExternalString());
TorqueGeneratedSlicedString<SlicedString, Super>::set_parent(parent, mode);
}
-Object ConsString::unchecked_first() {
+Object ConsString::unchecked_first() const {
return TaggedField<Object, kFirstOffset>::load(*this);
}
-Object ConsString::unchecked_second() {
+Object ConsString::unchecked_second() const {
return RELAXED_READ_FIELD(*this, kSecondOffset);
}
+bool ConsString::IsFlat(PtrComprCageBase cage_base) const {
+ return second(cage_base).length() == 0;
+}
+
DEF_GETTER(ThinString, unchecked_actual, HeapObject) {
return TaggedField<HeapObject, kActualOffset>::load(cage_base, *this);
}
@@ -1272,6 +1424,8 @@ bool String::IsInPlaceInternalizable(InstanceType instance_type) {
switch (instance_type) {
case STRING_TYPE:
case ONE_BYTE_STRING_TYPE:
+ case SHARED_STRING_TYPE:
+ case SHARED_ONE_BYTE_STRING_TYPE:
case EXTERNAL_STRING_TYPE:
case EXTERNAL_ONE_BYTE_STRING_TYPE:
return true;
diff --git a/deps/v8/src/objects/string-table.cc b/deps/v8/src/objects/string-table.cc
index 28aa58276c..7d9e9d898d 100644
--- a/deps/v8/src/objects/string-table.cc
+++ b/deps/v8/src/objects/string-table.cc
@@ -364,13 +364,13 @@ class InternalizedStringKey final : public StringTableKey {
Handle<String> AsHandle(Isolate* isolate) {
// Internalize the string in-place if possible.
MaybeHandle<Map> maybe_internalized_map;
- StringInternalizationStrategy strategy =
+ StringTransitionStrategy strategy =
isolate->factory()->ComputeInternalizationStrategyForString(
string_, &maybe_internalized_map);
switch (strategy) {
- case StringInternalizationStrategy::kCopy:
+ case StringTransitionStrategy::kCopy:
break;
- case StringInternalizationStrategy::kInPlace:
+ case StringTransitionStrategy::kInPlace:
// A relaxed write is sufficient here even with concurrent
// internalization. Though it is not synchronizing, a thread that does
// not see the relaxed write will wait on the string table write
@@ -381,7 +381,7 @@ class InternalizedStringKey final : public StringTableKey {
*maybe_internalized_map.ToHandleChecked());
DCHECK(string_->IsInternalizedString());
return string_;
- case StringInternalizationStrategy::kAlreadyInternalized:
+ case StringTransitionStrategy::kAlreadyTransitioned:
// We can see already internalized strings here only when sharing the
// string table and allowing concurrent internalization.
DCHECK(FLAG_shared_string_table);
@@ -437,13 +437,11 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
// correctly ordered by LookupKey's write mutex and see the updated map
// during the re-lookup.
//
- // For lookup misses, the internalized string map is the same map in RO
- // space regardless of which thread is doing the lookup.
+ // For lookup misses, the internalized string map is the same map in RO space
+ // regardless of which thread is doing the lookup.
//
- // For lookup hits, String::MakeThin is not threadsafe but is currently
- // only called on strings that are not accessible from multiple threads,
- // even if in the shared heap. TODO(v8:12007) Make String::MakeThin
- // threadsafe so old- generation flat strings can be shared across threads.
+ // For lookup hits, String::MakeThin is threadsafe and spinlocks on
+ // migrating into a ThinString.
string = String::Flatten(isolate, string);
if (string->IsInternalizedString()) return string;
@@ -454,6 +452,7 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
if (!string->IsInternalizedString()) {
string->MakeThin(isolate, *result);
}
+
return result;
}
@@ -495,16 +494,17 @@ Handle<String> StringTable::LookupKey(IsolateT* isolate, StringTableKey* key) {
// Load the current string table data, in case another thread updates the
// data while we're reading.
- const Data* data = data_.load(std::memory_order_acquire);
+ const Data* current_data = data_.load(std::memory_order_acquire);
// First try to find the string in the table. This is safe to do even if the
// table is now reallocated; we won't find a stale entry in the old table
// because the new table won't delete it's corresponding entry until the
// string is dead, in which case it will die in this table too and worst
// case we'll have a false miss.
- InternalIndex entry = data->FindEntry(isolate, key, key->hash());
+ InternalIndex entry = current_data->FindEntry(isolate, key, key->hash());
if (entry.is_found()) {
- Handle<String> result(String::cast(data->Get(isolate, entry)), isolate);
+ Handle<String> result(String::cast(current_data->Get(isolate, entry)),
+ isolate);
DCHECK_IMPLIES(FLAG_shared_string_table, result->InSharedHeap());
return result;
}
@@ -516,7 +516,7 @@ Handle<String> StringTable::LookupKey(IsolateT* isolate, StringTableKey* key) {
// allocates the same string, the insert will fail, the lookup above will
// succeed, and this string will be discarded.
Handle<String> new_string = key->AsHandle(isolate);
- DCHECK_IMPLIES(FLAG_shared_string_table, new_string->InSharedHeap());
+ DCHECK_IMPLIES(FLAG_shared_string_table, new_string->IsShared());
{
base::MutexGuard table_write_guard(&write_mutex_);
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index f488781fd5..aea42741d2 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -4,6 +4,7 @@
#include "src/objects/string.h"
+#include "src/base/platform/yield-processor.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/execution/isolate-utils.h"
@@ -79,28 +80,39 @@ Handle<String> String::SlowFlatten(Isolate* isolate, Handle<ConsString> cons,
return result;
}
-Handle<String> String::SlowCopy(Isolate* isolate, Handle<SeqString> source,
- AllocationType allocation) {
- int length = source->length();
- Handle<String> copy;
- if (source->IsOneByteRepresentation()) {
- copy = isolate->factory()
- ->NewRawOneByteString(length, allocation)
- .ToHandleChecked();
+Handle<String> String::SlowShare(Isolate* isolate, Handle<String> source) {
+ DCHECK(FLAG_shared_string_table);
+ Handle<String> flat = Flatten(isolate, source, AllocationType::kSharedOld);
+
+ // Do not recursively call Share, so directly compute the sharing strategy for
+ // the flat string, which could already be a copy or an existing string from
+ // e.g. a shortcut ConsString.
+ MaybeHandle<Map> new_map;
+ switch (isolate->factory()->ComputeSharingStrategyForString(flat, &new_map)) {
+ case StringTransitionStrategy::kCopy:
+ break;
+ case StringTransitionStrategy::kInPlace:
+ // A relaxed write is sufficient here, because at this point the string
+ // has not yet escaped the current thread.
+ DCHECK(flat->InSharedHeap());
+ flat->set_map_no_write_barrier(*new_map.ToHandleChecked());
+ return flat;
+ case StringTransitionStrategy::kAlreadyTransitioned:
+ return flat;
+ }
+
+ int length = flat->length();
+ if (flat->IsOneByteRepresentation()) {
+ Handle<SeqOneByteString> copy =
+ isolate->factory()->NewRawSharedOneByteString(length).ToHandleChecked();
DisallowGarbageCollection no_gc;
- String::FlatContent content = source->GetFlatContent(no_gc);
- CopyChars(SeqOneByteString::cast(*copy).GetChars(no_gc),
- content.ToOneByteVector().begin(), length);
+ WriteToFlat(*flat, copy->GetChars(no_gc), 0, length);
return copy;
- } else {
- copy = isolate->factory()
- ->NewRawTwoByteString(length, allocation)
- .ToHandleChecked();
- DisallowGarbageCollection no_gc;
- String::FlatContent content = source->GetFlatContent(no_gc);
- CopyChars(SeqTwoByteString::cast(*copy).GetChars(no_gc),
- content.ToUC16Vector().begin(), length);
}
+ Handle<SeqTwoByteString> copy =
+ isolate->factory()->NewRawSharedTwoByteString(length).ToHandleChecked();
+ DisallowGarbageCollection no_gc;
+ WriteToFlat(*flat, copy->GetChars(no_gc), 0, length);
return copy;
}
@@ -141,6 +153,147 @@ void MigrateExternalString(Isolate* isolate, String string,
}
}
+template <typename IsolateT>
+Map ComputeThinStringMap(IsolateT* isolate, StringShape from_string_shape,
+ bool one_byte) {
+ ReadOnlyRoots roots(isolate);
+ if (from_string_shape.IsShared()) {
+ return one_byte ? roots.shared_thin_one_byte_string_map()
+ : roots.shared_thin_string_map();
+ }
+ return one_byte ? roots.thin_one_byte_string_map() : roots.thin_string_map();
+}
+
+enum class StringMigrationResult {
+ kThisThreadMigrated,
+ kAnotherThreadMigrated
+};
+
+// This function must be used when migrating strings whose
+// StringShape::CanMigrateInParallel() is true. It encapsulates the
+// synchronization needed for parallel migrations from multiple threads. The
+// user passes a lambda to perform to update the representation.
+//
+// Returns whether this thread successfully migrated the string or another
+// thread did so.
+//
+// The locking algorithm to migrate a String uses its map word as a migration
+// lock:
+//
+// map = string.map(kAcquireLoad);
+// if (map != SENTINEL_MAP &&
+// string.compare_and_swap_map(map, SENTINEL_MAP)) {
+// // Lock acquired, i.e. the string's map is SENTINEL_MAP.
+// } else {
+// // Lock not acquired. Another thread set the sentinel. Spin until the
+// // map is no longer the sentinel, i.e. until the other thread
+// // releases the lock.
+// Map reloaded_map;
+// do {
+// reloaded_map = string.map(kAcquireLoad);
+// } while (reloaded_map == SENTINEL_MAP);
+// }
+//
+// Some notes on usage:
+// - The initial map must be loaded with kAcquireLoad for synchronization.
+// - Avoid loading the map multiple times. Load the map once and branch
+// on that.
+// - The lambda is passed the string and its initial (pre-migration)
+// StringShape.
+// - The lambda may be executed under a spinlock, so it should be as short
+// as possible.
+// - Currently only SeqString -> ThinString migrations can happen in
+// parallel. If kAnotherThreadMigrated is returned, then the caller doesn't
+// need to do any other work. In the future, if additional migrations can
+// happen in parallel, then restarts may be needed if the parallel migration
+// was to a different type (e.g. SeqString -> External).
+//
+// Example:
+//
+// DisallowGarbageCollection no_gc;
+// Map initial_map = string.map(kAcquireLoad);
+// switch (MigrateStringMapUnderLockIfNeeded(
+// isolate, string, initial_map, target_map,
+// [](Isolate* isolate, String string, StringShape initial_shape) {
+// auto t = TargetStringType::unchecked_cast(string);
+// t.set_field(foo);
+// t.set_another_field(bar);
+// }, no_gc);
+//
+template <typename IsolateT, typename Callback>
+StringMigrationResult MigrateStringMapUnderLockIfNeeded(
+ IsolateT* isolate, String string, Map initial_map, Map target_map,
+ Callback update_representation, const DisallowGarbageCollection& no_gc) {
+ USE(no_gc);
+
+ InstanceType initial_type = initial_map.instance_type();
+ StringShape initial_shape(initial_type);
+
+ if (initial_shape.CanMigrateInParallel()) {
+ // A string whose map is a sentinel map means that it is in the critical
+ // section for being migrated to a different map. There are multiple
+ // sentinel maps: one for each InstanceType that may be migrated from.
+ Map sentinel_map =
+ *isolate->factory()->GetStringMigrationSentinelMap(initial_type);
+
+ // Try to acquire the migration lock by setting the string's map to the
+ // sentinel map. Note that it's possible that we've already witnessed a
+ // sentinel map.
+ if (initial_map == sentinel_map ||
+ !string.release_compare_and_swap_map_word(
+ MapWord::FromMap(initial_map), MapWord::FromMap(sentinel_map))) {
+ // If the lock couldn't be acquired, another thread must be migrating this
+ // string. The string's map will be the sentinel map until the migration
+ // is finished. Spin until the map is no longer the sentinel map.
+ //
+ // TODO(v8:12007): Replace this spin lock with a ParkingLot-like
+ // primitive.
+ Map reloaded_map = string.map(kAcquireLoad);
+ while (reloaded_map == sentinel_map) {
+ YIELD_PROCESSOR;
+ reloaded_map = string.map(kAcquireLoad);
+ }
+
+ // Another thread must have migrated once the map is no longer the
+ // sentinel map.
+ //
+ // TODO(v8:12007): At time of writing there is only a single kind of
+ // migration that can happen in parallel: SeqString -> ThinString. If
+ // other parallel migrations are added, this DCHECK will fail, and users
+ // of MigrateStringMapUnderLockIfNeeded would need to restart if the
+ // string was migrated to a different map than target_map.
+ DCHECK_EQ(reloaded_map, target_map);
+ return StringMigrationResult::kAnotherThreadMigrated;
+ }
+ }
+
+ // With the lock held for cases where it's needed, do the work to update the
+ // representation before storing the map word. In addition to parallel
+ // migrations, this also ensures that the concurrent marker will read the
+ // updated representation when visiting migrated strings.
+ update_representation(isolate, string, initial_shape);
+
+ // Do the store on the map word.
+ //
+ // In debug mode, do a compare-and-swap that is checked to succeed, to check
+ // that all string map migrations are using this function, since to be in the
+ // migration critical section, the string's current map must be the sentinel
+ // map.
+ //
+ // Otherwise do a normal release store.
+ if (DEBUG_BOOL && initial_shape.CanMigrateInParallel()) {
+ DCHECK_NE(initial_map, target_map);
+ Map sentinel_map =
+ *isolate->factory()->GetStringMigrationSentinelMap(initial_type);
+ CHECK(string.release_compare_and_swap_map_word(
+ MapWord::FromMap(sentinel_map), MapWord::FromMap(target_map)));
+ } else {
+ string.set_map(target_map, kReleaseStore);
+ }
+
+ return StringMigrationResult::kThisThreadMigrated;
+}
+
} // namespace
template <typename IsolateT>
@@ -148,28 +301,45 @@ void String::MakeThin(IsolateT* isolate, String internalized) {
DisallowGarbageCollection no_gc;
DCHECK_NE(*this, internalized);
DCHECK(internalized.IsInternalizedString());
- // TODO(v8:12007): Make this method threadsafe.
- DCHECK_IMPLIES(
- InSharedWritableHeap(),
- ThreadId::Current() == GetIsolateFromWritableObject(*this)->thread_id());
- if (this->IsExternalString()) {
- MigrateExternalString(isolate->AsIsolate(), *this, internalized);
+ // Load the map once at the beginning and use it to query for the shape of the
+ // string to avoid reloading the map in case of parallel migrations. See
+ // comment above for MigrateStringMapUnderLockIfNeeded.
+ Map initial_map = this->map(kAcquireLoad);
+ StringShape initial_shape(initial_map);
+
+ // Another thread may have already migrated the string.
+ if (initial_shape.IsThin()) {
+ DCHECK(initial_shape.IsShared());
+ return;
}
- bool has_pointers = StringShape(*this).IsIndirect();
+ bool has_pointers = initial_shape.IsIndirect();
+ int old_size = this->SizeFromMap(initial_map);
+ Map target_map = ComputeThinStringMap(isolate, initial_shape,
+ internalized.IsOneByteRepresentation());
+ switch (MigrateStringMapUnderLockIfNeeded(
+ isolate, *this, initial_map, target_map,
+ [=](IsolateT* isolate, String string, StringShape initial_shape) {
+ if (initial_shape.IsExternal()) {
+ // TODO(v8:12007): Support external strings.
+ DCHECK(!initial_shape.IsShared());
+ MigrateExternalString(isolate->AsIsolate(), string, internalized);
+ }
+
+ ThinString::unchecked_cast(string).set_actual(internalized);
+ DCHECK_GE(old_size, ThinString::kSize);
+ },
+ no_gc)) {
+ case StringMigrationResult::kThisThreadMigrated:
+ // Overwrite character data with the filler below.
+ break;
+ case StringMigrationResult::kAnotherThreadMigrated:
+ // Nothing to do.
+ return;
+ }
- int old_size = this->Size();
- bool one_byte = internalized.IsOneByteRepresentation();
- Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
- : isolate->factory()->thin_string_map();
- // Update actual first and then do release store on the map word. This ensures
- // that the concurrent marker will read the pointer when visiting a
- // ThinString.
- ThinString thin = ThinString::unchecked_cast(*this);
- thin.set_actual(internalized);
- DCHECK_GE(old_size, ThinString::kSize);
- this->set_map(*map, kReleaseStore);
+ ThinString thin = ThinString::cast(*this);
Address thin_end = thin.address() + ThinString::kSize;
int size_delta = old_size - ThinString::kSize;
if (size_delta != 0) {
@@ -185,8 +355,10 @@ void String::MakeThin(IsolateT* isolate, String internalized) {
}
}
-template void String::MakeThin(Isolate* isolate, String internalized);
-template void String::MakeThin(LocalIsolate* isolate, String internalized);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void String::MakeThin(
+ Isolate* isolate, String internalized);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void String::MakeThin(
+ LocalIsolate* isolate, String internalized);
bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Disallow garbage collection to avoid possible GC vs string access deadlock.
@@ -380,7 +552,6 @@ bool String::SupportsExternalization() {
const char* String::PrefixForDebugPrint() const {
StringShape shape(*this);
if (IsTwoByteRepresentation()) {
- StringShape shape(*this);
if (shape.IsInternalized()) {
return "u#";
} else if (shape.IsCons()) {
@@ -393,7 +564,6 @@ const char* String::PrefixForDebugPrint() const {
return "u\"";
}
} else {
- StringShape shape(*this);
if (shape.IsInternalized()) {
return "#";
} else if (shape.IsCons()) {
@@ -568,63 +738,42 @@ Handle<Object> String::ToNumber(Isolate* isolate, Handle<String> subject) {
return isolate->factory()->NewNumber(StringToDouble(isolate, subject, flags));
}
-String::FlatContent String::GetFlatContent(
- const DisallowGarbageCollection& no_gc) {
-#if DEBUG
- // Check that this method is called only from the main thread.
- {
- Isolate* isolate;
- // We don't have to check read only strings as those won't move.
- DCHECK_IMPLIES(GetIsolateFromHeapObject(*this, &isolate),
- ThreadId::Current() == isolate->thread_id());
- }
-#endif
+String::FlatContent String::SlowGetFlatContent(
+ const DisallowGarbageCollection& no_gc,
+ const SharedStringAccessGuardIfNeeded& access_guard) {
USE(no_gc);
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
- int length = this->length();
- StringShape shape(*this, cage_base);
String string = *this;
+ StringShape shape(string, cage_base);
int offset = 0;
- if (shape.representation_tag() == kConsStringTag) {
+
+ // Extract cons- and sliced strings.
+ if (shape.IsCons()) {
ConsString cons = ConsString::cast(string);
- if (cons.second(cage_base).length() != 0) {
- return FlatContent(no_gc);
- }
+ if (!cons.IsFlat(cage_base)) return FlatContent(no_gc);
string = cons.first(cage_base);
shape = StringShape(string, cage_base);
- } else if (shape.representation_tag() == kSlicedStringTag) {
+ } else if (shape.IsSliced()) {
SlicedString slice = SlicedString::cast(string);
offset = slice.offset();
string = slice.parent(cage_base);
shape = StringShape(string, cage_base);
- DCHECK(shape.representation_tag() != kConsStringTag &&
- shape.representation_tag() != kSlicedStringTag);
}
- if (shape.representation_tag() == kThinStringTag) {
+
+ DCHECK(!shape.IsCons());
+ DCHECK(!shape.IsSliced());
+
+ // Extract thin strings.
+ if (shape.IsThin()) {
ThinString thin = ThinString::cast(string);
string = thin.actual(cage_base);
shape = StringShape(string, cage_base);
- DCHECK(!shape.IsCons());
- DCHECK(!shape.IsSliced());
- }
- if (shape.encoding_tag() == kOneByteStringTag) {
- const uint8_t* start;
- if (shape.representation_tag() == kSeqStringTag) {
- start = SeqOneByteString::cast(string).GetChars(no_gc);
- } else {
- start = ExternalOneByteString::cast(string).GetChars(cage_base);
- }
- return FlatContent(start + offset, length, no_gc);
- } else {
- DCHECK_EQ(shape.encoding_tag(), kTwoByteStringTag);
- const base::uc16* start;
- if (shape.representation_tag() == kSeqStringTag) {
- start = SeqTwoByteString::cast(string).GetChars(no_gc);
- } else {
- start = ExternalTwoByteString::cast(string).GetChars(cage_base);
- }
- return FlatContent(start + offset, length, no_gc);
}
+
+ DCHECK(shape.IsDirect());
+ return TryGetFlatContentFromDirectString(cage_base, no_gc, string, offset,
+ length(), access_guard)
+ .value();
}
std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
@@ -697,7 +846,7 @@ void String::WriteToFlat(String source, sinkchar* sink, int start, int length,
DCHECK_LT(0, length);
DCHECK_LE(0, start);
DCHECK_LE(length, source.length());
- switch (StringShape(source, cage_base).full_representation_tag()) {
+ switch (StringShape(source, cage_base).representation_and_encoding_tag()) {
case kOneByteStringTag | kExternalStringTag:
CopyChars(
sink,
@@ -919,17 +1068,19 @@ bool String::SlowEquals(
bool String::SlowEquals(Isolate* isolate, Handle<String> one,
Handle<String> two) {
// Fast check: negative check with lengths.
- int one_length = one->length();
+ const int one_length = one->length();
if (one_length != two->length()) return false;
if (one_length == 0) return true;
// Fast check: if at least one ThinString is involved, dereference it/them
// and restart.
if (one->IsThinString() || two->IsThinString()) {
- if (one->IsThinString())
+ if (one->IsThinString()) {
one = handle(ThinString::cast(*one).actual(), isolate);
- if (two->IsThinString())
+ }
+ if (two->IsThinString()) {
two = handle(ThinString::cast(*two).actual(), isolate);
+ }
return String::Equals(isolate, one, two);
}
@@ -967,12 +1118,17 @@ bool String::SlowEquals(Isolate* isolate, Handle<String> one,
if (flat1.IsOneByte() && flat2.IsOneByte()) {
return CompareCharsEqual(flat1.ToOneByteVector().begin(),
flat2.ToOneByteVector().begin(), one_length);
- } else {
- for (int i = 0; i < one_length; i++) {
- if (flat1.Get(i) != flat2.Get(i)) return false;
- }
- return true;
+ } else if (flat1.IsTwoByte() && flat2.IsTwoByte()) {
+ return CompareCharsEqual(flat1.ToUC16Vector().begin(),
+ flat2.ToUC16Vector().begin(), one_length);
+ } else if (flat1.IsOneByte() && flat2.IsTwoByte()) {
+ return CompareCharsEqual(flat1.ToOneByteVector().begin(),
+ flat2.ToUC16Vector().begin(), one_length);
+ } else if (flat1.IsTwoByte() && flat2.IsOneByte()) {
+ return CompareCharsEqual(flat1.ToUC16Vector().begin(),
+ flat2.ToOneByteVector().begin(), one_length);
}
+ UNREACHABLE();
}
// static
@@ -1794,7 +1950,7 @@ const byte* String::AddressOfCharacterAt(
}
CHECK_LE(0, start_index);
CHECK_LE(start_index, subject.length());
- switch (shape.full_representation_tag()) {
+ switch (shape.representation_and_encoding_tag()) {
case kOneByteStringTag | kSeqStringTag:
return reinterpret_cast<const byte*>(
SeqOneByteString::cast(subject).GetChars(no_gc) + start_index);
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 635da7cd53..092e5e707b 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -12,6 +12,7 @@
#include "src/base/strings.h"
#include "src/common/globals.h"
#include "src/objects/instance-type.h"
+#include "src/objects/map.h"
#include "src/objects/name.h"
#include "src/objects/smi.h"
#include "src/strings/unicode-decoder.h"
@@ -42,25 +43,29 @@ enum RobustnessFlag { ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL };
// concrete performance benefit at that particular point in the code.
class StringShape {
public:
- inline explicit StringShape(const String s);
- inline explicit StringShape(const String s, PtrComprCageBase cage_base);
- inline explicit StringShape(Map s);
- inline explicit StringShape(InstanceType t);
- inline bool IsSequential() const;
- inline bool IsExternal() const;
- inline bool IsCons() const;
- inline bool IsSliced() const;
- inline bool IsThin() const;
- inline bool IsIndirect() const;
- inline bool IsUncachedExternal() const;
- inline bool IsExternalOneByte() const;
- inline bool IsExternalTwoByte() const;
- inline bool IsSequentialOneByte() const;
- inline bool IsSequentialTwoByte() const;
- inline bool IsInternalized() const;
- inline StringRepresentationTag representation_tag() const;
- inline uint32_t encoding_tag() const;
- inline uint32_t full_representation_tag() const;
+ V8_INLINE explicit StringShape(const String s);
+ V8_INLINE explicit StringShape(const String s, PtrComprCageBase cage_base);
+ V8_INLINE explicit StringShape(Map s);
+ V8_INLINE explicit StringShape(InstanceType t);
+ V8_INLINE bool IsSequential() const;
+ V8_INLINE bool IsExternal() const;
+ V8_INLINE bool IsCons() const;
+ V8_INLINE bool IsSliced() const;
+ V8_INLINE bool IsThin() const;
+ V8_INLINE bool IsDirect() const;
+ V8_INLINE bool IsIndirect() const;
+ V8_INLINE bool IsUncachedExternal() const;
+ V8_INLINE bool IsExternalOneByte() const;
+ V8_INLINE bool IsExternalTwoByte() const;
+ V8_INLINE bool IsSequentialOneByte() const;
+ V8_INLINE bool IsSequentialTwoByte() const;
+ V8_INLINE bool IsInternalized() const;
+ V8_INLINE bool IsShared() const;
+ V8_INLINE bool CanMigrateInParallel() const;
+ V8_INLINE StringRepresentationTag representation_tag() const;
+ V8_INLINE uint32_t encoding_tag() const;
+ V8_INLINE uint32_t representation_and_encoding_tag() const;
+ V8_INLINE uint32_t representation_encoding_and_shared_tag() const;
#ifdef DEBUG
inline uint32_t type() const { return type_; }
inline void invalidate() { valid_ = false; }
@@ -176,6 +181,7 @@ class String : public TorqueGeneratedString<String, Name> {
};
template <typename IsolateT>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void MakeThin(IsolateT* isolate, String canonical);
template <typename Char>
@@ -250,10 +256,10 @@ class String : public TorqueGeneratedString<String, Name> {
// Degenerate cons strings are handled specially by the garbage
// collector (see IsShortcutCandidate).
- static inline Handle<String> Flatten(
+ static V8_INLINE Handle<String> Flatten(
Isolate* isolate, Handle<String> string,
AllocationType allocation = AllocationType::kYoung);
- static inline Handle<String> Flatten(
+ static V8_INLINE Handle<String> Flatten(
LocalIsolate* isolate, Handle<String> string,
AllocationType allocation = AllocationType::kYoung);
@@ -262,13 +268,23 @@ class String : public TorqueGeneratedString<String, Name> {
// If the string isn't flat, and therefore doesn't have flat content, the
// returned structure will report so, and can't provide a vector of either
// kind.
- V8_EXPORT_PRIVATE FlatContent
+ // When using a SharedStringAccessGuard, the guard's must outlive the
+ // returned FlatContent.
+ V8_EXPORT_PRIVATE V8_INLINE FlatContent
GetFlatContent(const DisallowGarbageCollection& no_gc);
+ V8_EXPORT_PRIVATE V8_INLINE FlatContent
+ GetFlatContent(const DisallowGarbageCollection& no_gc,
+ const SharedStringAccessGuardIfNeeded&);
// Returns the parent of a sliced string or first part of a flat cons string.
// Requires: StringShape(this).IsIndirect() && this->IsFlat()
inline String GetUnderlying() const;
+ // Shares the string. Checks inline if the string is already shared or can be
+ // shared by transitioning its map in-place. If neither is possible, flattens
+ // and copies into a new shared sequential string.
+ static inline Handle<String> Share(Isolate* isolate, Handle<String> string);
+
// String relational comparison, implemented according to ES6 section 7.2.11
// Abstract Relational Comparison (step 5): The comparison of Strings uses a
// simple lexicographic ordering on sequences of code unit values. There is no
@@ -437,6 +453,9 @@ class String : public TorqueGeneratedString<String, Name> {
inline bool IsFlat() const;
inline bool IsFlat(PtrComprCageBase cage_base) const;
+ inline bool IsShared() const;
+ inline bool IsShared(PtrComprCageBase cage_base) const;
+
// Max char codes.
static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
@@ -585,8 +604,17 @@ class String : public TorqueGeneratedString<String, Name> {
V8_EXPORT_PRIVATE static Handle<String> SlowFlatten(
Isolate* isolate, Handle<ConsString> cons, AllocationType allocation);
- static Handle<String> SlowCopy(Isolate* isolate, Handle<SeqString> source,
- AllocationType allocation);
+ V8_EXPORT_PRIVATE V8_INLINE static base::Optional<FlatContent>
+ TryGetFlatContentFromDirectString(PtrComprCageBase cage_base,
+ const DisallowGarbageCollection& no_gc,
+ String string, int offset, int length,
+ const SharedStringAccessGuardIfNeeded&);
+ V8_EXPORT_PRIVATE FlatContent
+ SlowGetFlatContent(const DisallowGarbageCollection& no_gc,
+ const SharedStringAccessGuardIfNeeded&);
+
+ V8_EXPORT_PRIVATE static Handle<String> SlowShare(Isolate* isolate,
+ Handle<String> source);
// Slow case of String::Equals. This implementation works on any strings
// but it is most efficient on strings that are almost flat.
@@ -674,6 +702,8 @@ class SeqOneByteString
inline uint8_t Get(int index, PtrComprCageBase cage_base,
const SharedStringAccessGuardIfNeeded& access_guard) const;
inline void SeqOneByteStringSet(int index, uint16_t value);
+ inline void SeqOneByteStringSetChars(int index, const uint8_t* string,
+ int length);
// Get the address of the characters in this string.
inline Address GetCharsAddress() const;
@@ -692,11 +722,6 @@ class SeqOneByteString
// is deterministic.
void clear_padding();
- // Garbage collection support. This method is called by the
- // garbage collector to compute the actual size of an OneByteString
- // instance.
- inline int SeqOneByteStringSize(InstanceType instance_type);
-
// Maximal memory usage for a single sequential one-byte string.
static const int kMaxCharsSize = kMaxLength;
static const int kMaxSize = OBJECT_POINTER_ALIGN(kMaxCharsSize + kHeaderSize);
@@ -704,6 +729,9 @@ class SeqOneByteString
int AllocatedSize();
+ // A SeqOneByteString have different maps depending on whether it is shared.
+ static inline bool IsCompatibleMap(Map map, ReadOnlyRoots roots);
+
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(SeqOneByteString)
@@ -740,11 +768,6 @@ class SeqTwoByteString
// is deterministic.
void clear_padding();
- // Garbage collection support. This method is called by the
- // garbage collector to compute the actual size of a TwoByteString
- // instance.
- inline int SeqTwoByteStringSize(InstanceType instance_type);
-
// Maximal memory usage for a single sequential two-byte string.
static const int kMaxCharsSize = kMaxLength * 2;
static const int kMaxSize = OBJECT_POINTER_ALIGN(kMaxCharsSize + kHeaderSize);
@@ -753,6 +776,9 @@ class SeqTwoByteString
int AllocatedSize();
+ // A SeqTwoByteString have different maps depending on whether it is shared.
+ static inline bool IsCompatibleMap(Map map, ReadOnlyRoots roots);
+
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(SeqTwoByteString)
@@ -770,11 +796,13 @@ class ConsString : public TorqueGeneratedConsString<ConsString, String> {
public:
// Doesn't check that the result is a string, even in debug mode. This is
// useful during GC where the mark bits confuse the checks.
- inline Object unchecked_first();
+ inline Object unchecked_first() const;
// Doesn't check that the result is a string, even in debug mode. This is
// useful during GC where the mark bits confuse the checks.
- inline Object unchecked_second();
+ inline Object unchecked_second() const;
+
+ V8_INLINE bool IsFlat(PtrComprCageBase cage_base) const;
// Dispatched behavior.
V8_EXPORT_PRIVATE uint16_t
@@ -988,12 +1016,12 @@ class V8_EXPORT_PRIVATE FlatStringReader : public Relocatable {
inline base::uc32 Get(int index) const;
template <typename Char>
inline Char Get(int index) const;
- int length() { return length_; }
+ int length() const { return length_; }
private:
Handle<String> str_;
bool is_one_byte_;
- int length_;
+ int const length_;
const void* start_;
};
diff --git a/deps/v8/src/objects/string.tq b/deps/v8/src/objects/string.tq
index 4894147003..e202199e36 100644
--- a/deps/v8/src/objects/string.tq
+++ b/deps/v8/src/objects/string.tq
@@ -5,7 +5,7 @@
#include 'src/builtins/builtins-string-gen.h'
@abstract
-@reserveBitsInInstanceType(6)
+@reserveBitsInInstanceType(7)
extern class String extends Name {
macro StringInstanceType(): StringInstanceType {
return %RawDownCast<StringInstanceType>(
@@ -32,6 +32,7 @@ bitfield struct StringInstanceType extends uint16 {
is_one_byte: bool: 1 bit;
is_uncached: bool: 1 bit;
is_not_internalized: bool: 1 bit;
+ is_shared: bool: 1 bit;
}
@generateBodyDescriptor
diff --git a/deps/v8/src/objects/struct.h b/deps/v8/src/objects/struct.h
index 41a4b2b481..70c9377dff 100644
--- a/deps/v8/src/objects/struct.h
+++ b/deps/v8/src/objects/struct.h
@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
+class StructBodyDescriptor;
+
#include "torque-generated/src/objects/struct-tq.inc"
// An abstract superclass, a marker class really, for simple structure classes.
@@ -31,6 +33,8 @@ class Tuple2 : public TorqueGeneratedTuple2<Tuple2, Struct> {
public:
void BriefPrintDetails(std::ostream& os);
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(Tuple2)
};
@@ -69,6 +73,8 @@ class AccessorPair : public TorqueGeneratedAccessorPair<AccessorPair, Struct> {
inline bool Equals(Object getter_value, Object setter_value);
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(AccessorPair)
};
@@ -78,6 +84,8 @@ class ClassPositions
// Dispatched behavior.
void BriefPrintDetails(std::ostream& os);
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(ClassPositions)
};
diff --git a/deps/v8/src/objects/synthetic-module.cc b/deps/v8/src/objects/synthetic-module.cc
index 0322ca9b8a..d2687692f0 100644
--- a/deps/v8/src/objects/synthetic-module.cc
+++ b/deps/v8/src/objects/synthetic-module.cc
@@ -119,23 +119,21 @@ MaybeHandle<Object> SyntheticModule::Evaluate(Isolate* isolate,
Handle<Object> result_from_callback = Utils::OpenHandle(*result);
- if (FLAG_harmony_top_level_await) {
- Handle<JSPromise> capability;
- if (result_from_callback->IsJSPromise()) {
- capability = Handle<JSPromise>::cast(result_from_callback);
- } else {
- // The host's evaluation steps should have returned a resolved Promise,
- // but as an allowance to hosts that have not yet finished the migration
- // to top-level await, create a Promise if the callback result didn't give
- // us one.
- capability = isolate->factory()->NewJSPromise();
- JSPromise::Resolve(capability, isolate->factory()->undefined_value())
- .ToHandleChecked();
- }
-
- module->set_top_level_capability(*capability);
+ Handle<JSPromise> capability;
+ if (result_from_callback->IsJSPromise()) {
+ capability = Handle<JSPromise>::cast(result_from_callback);
+ } else {
+ // The host's evaluation steps should have returned a resolved Promise,
+ // but as an allowance to hosts that have not yet finished the migration
+ // to top-level await, create a Promise if the callback result didn't give
+ // us one.
+ capability = isolate->factory()->NewJSPromise();
+ JSPromise::Resolve(capability, isolate->factory()->undefined_value())
+ .ToHandleChecked();
}
+ module->set_top_level_capability(*capability);
+
return result_from_callback;
}
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
index 7faf9e9ac9..d9fc0bb102 100644
--- a/deps/v8/src/objects/tagged-field.h
+++ b/deps/v8/src/objects/tagged-field.h
@@ -49,7 +49,7 @@ class TaggedField : public AllStatic {
int offset = 0);
static inline void Relaxed_Store(HeapObject host, T value);
- static void Relaxed_Store(HeapObject host, int offset, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
static inline T Acquire_Load_No_Unpack(PtrComprCageBase cage_base,
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index 24c5b49d76..a02b1d3651 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+class StructBodyDescriptor;
+
#include "torque-generated/src/objects/template-objects-tq.inc"
// CachedTemplateObject is a tuple used to cache a TemplateObject that has been
@@ -27,6 +29,8 @@ class CachedTemplateObject final
Handle<JSArray> template_object,
Handle<HeapObject> next);
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(CachedTemplateObject)
};
@@ -42,6 +46,8 @@ class TemplateObjectDescription final
Handle<TemplateObjectDescription> description,
Handle<SharedFunctionInfo> shared_info, int slot_id);
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(TemplateObjectDescription)
};
diff --git a/deps/v8/src/objects/templates.cc b/deps/v8/src/objects/templates.cc
index 91306861e6..93c0220878 100644
--- a/deps/v8/src/objects/templates.cc
+++ b/deps/v8/src/objects/templates.cc
@@ -40,9 +40,9 @@ Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
}
FunctionKind function_kind;
if (info->remove_prototype()) {
- function_kind = kConciseMethod;
+ function_kind = FunctionKind::kConciseMethod;
} else {
- function_kind = kNormalFunction;
+ function_kind = FunctionKind::kNormalFunction;
}
Handle<SharedFunctionInfo> result =
isolate->factory()->NewSharedFunctionInfoForApiFunction(name_string, info,
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index 0b6de3d832..8ab006ab97 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -14,6 +14,7 @@
namespace v8 {
class CFunctionInfo;
+class StructBodyDescriptor;
namespace internal {
@@ -40,6 +41,8 @@ class TemplateInfo : public TorqueGeneratedTemplateInfo<TemplateInfo, Struct> {
inline bool should_cache() const;
inline bool is_cached() const;
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(TemplateInfo)
};
@@ -49,6 +52,9 @@ class FunctionTemplateRareData
Struct> {
public:
DECL_VERIFIER(FunctionTemplateRareData)
+
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(FunctionTemplateRareData)
};
@@ -176,6 +182,8 @@ class FunctionTemplateInfo
// Bit position in the flag, from least significant bit position.
DEFINE_TORQUE_GENERATED_FUNCTION_TEMPLATE_INFO_FLAGS()
+ using BodyDescriptor = StructBodyDescriptor;
+
private:
static constexpr int kNoJSApiObjectType = 0;
static inline FunctionTemplateRareData EnsureFunctionTemplateRareData(
@@ -202,6 +210,8 @@ class ObjectTemplateInfo
// chain till a function template that has an instance template is found.
inline ObjectTemplateInfo GetParent(Isolate* isolate);
+ using BodyDescriptor = StructBodyDescriptor;
+
private:
DEFINE_TORQUE_GENERATED_OBJECT_TEMPLATE_INFO_FLAGS()
diff --git a/deps/v8/src/objects/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h
index 91cc906013..16f50969ae 100644
--- a/deps/v8/src/objects/transitions-inl.h
+++ b/deps/v8/src/objects/transitions-inl.h
@@ -328,7 +328,7 @@ Handle<String> TransitionsAccessor::ExpectedTransitionKey() {
PropertyDetails details = GetSimpleTargetDetails(target);
if (details.location() != PropertyLocation::kField)
return Handle<String>::null();
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
if (details.attributes() != NONE) return Handle<String>::null();
Name name = GetSimpleTransitionKey(target);
if (!name.IsString()) return Handle<String>::null();
diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc
index 0e76dc4e1b..7e83392c86 100644
--- a/deps/v8/src/objects/transitions.cc
+++ b/deps/v8/src/objects/transitions.cc
@@ -265,11 +265,11 @@ MaybeHandle<Map> TransitionsAccessor::FindTransitionToDataProperty(
DCHECK(name->IsUniqueName());
DisallowGarbageCollection no_gc;
PropertyAttributes attributes = name->IsPrivate() ? DONT_ENUM : NONE;
- Map target = SearchTransition(*name, kData, attributes);
+ Map target = SearchTransition(*name, PropertyKind::kData, attributes);
if (target.is_null()) return MaybeHandle<Map>();
PropertyDetails details = target.GetLastDescriptorDetails(isolate_);
DCHECK_EQ(attributes, details.attributes());
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
if (requested_location == kFieldOnly &&
details.location() != PropertyLocation::kField) {
return MaybeHandle<Map>();
@@ -689,7 +689,7 @@ void TransitionArray::Sort() {
for (int i = 1; i < length; i++) {
Name key = GetKey(i);
MaybeObject target = GetRawTarget(i);
- PropertyKind kind = kData;
+ PropertyKind kind = PropertyKind::kData;
PropertyAttributes attributes = NONE;
if (!TransitionsAccessor::IsSpecialTransition(roots, key)) {
Map target_map = TransitionsAccessor::GetTargetFromRaw(target);
@@ -702,7 +702,7 @@ void TransitionArray::Sort() {
for (j = i - 1; j >= 0; j--) {
Name temp_key = GetKey(j);
MaybeObject temp_target = GetRawTarget(j);
- PropertyKind temp_kind = kData;
+ PropertyKind temp_kind = PropertyKind::kData;
PropertyAttributes temp_attributes = NONE;
if (!TransitionsAccessor::IsSpecialTransition(roots, temp_key)) {
Map temp_target_map =
diff --git a/deps/v8/src/objects/turbofan-types.h b/deps/v8/src/objects/turbofan-types.h
index 953b6950ab..409645a2d2 100644
--- a/deps/v8/src/objects/turbofan-types.h
+++ b/deps/v8/src/objects/turbofan-types.h
@@ -17,9 +17,14 @@ namespace internal {
#include "torque-generated/src/objects/turbofan-types-tq.inc"
-class TurbofanTypeBits {
+class TurbofanTypeLowBits {
public:
- DEFINE_TORQUE_GENERATED_TURBOFAN_TYPE_BITS()
+ DEFINE_TORQUE_GENERATED_TURBOFAN_TYPE_LOW_BITS()
+};
+
+class TurbofanTypeHighBits {
+ public:
+ DEFINE_TORQUE_GENERATED_TURBOFAN_TYPE_HIGH_BITS()
};
} // namespace internal
diff --git a/deps/v8/src/objects/turbofan-types.tq b/deps/v8/src/objects/turbofan-types.tq
index 035b6f8829..05e93918a0 100644
--- a/deps/v8/src/objects/turbofan-types.tq
+++ b/deps/v8/src/objects/turbofan-types.tq
@@ -9,7 +9,10 @@
class TurbofanType extends HeapObject {
}
-bitfield struct TurbofanTypeBits extends uint32 {
+// TurbofanBitsetType is 64 bit.
+// We use two separate 32 bit bitsets in Torque, due to limitted support
+// of 64 bit bitsets.
+bitfield struct TurbofanTypeLowBits extends uint32 {
_unused_padding_field_1: bool: 1 bit;
other_unsigned31: bool: 1 bit;
other_unsigned32: bool: 1 bit;
@@ -44,9 +47,14 @@ bitfield struct TurbofanTypeBits extends uint32 {
caged_pointer: bool: 1 bit;
}
+bitfield struct TurbofanTypeHighBits extends uint32 {
+ wasm_object: bool: 1 bit;
+}
+
@export
class TurbofanBitsetType extends TurbofanType {
- bitset: TurbofanTypeBits;
+ bitset_low: TurbofanTypeLowBits;
+ bitset_high: TurbofanTypeHighBits;
}
@export
@@ -75,83 +83,92 @@ macro IsMinusZero(x: float64): bool {
return x == 0 && 1 / x < 0;
}
-macro TestTurbofanBitsetType(value: Object, bitset: TurbofanTypeBits): bool {
+macro TestTurbofanBitsetType(
+ value: Object, bitsetLow: TurbofanTypeLowBits,
+ bitsetHigh: TurbofanTypeHighBits): bool {
+ // Silence unused warnings on builds that don't need {bitsetHigh}.
+ const _unused = bitsetHigh;
typeswitch (value) {
case (value: Number): {
const valueF = Convert<float64>(value);
if (IsInteger(value)) {
if (IsMinusZero(valueF)) {
- return bitset.minus_zero;
+ return bitsetLow.minus_zero;
} else if (valueF < Convert<float64>(-0x80000000)) {
- return bitset.other_number;
+ return bitsetLow.other_number;
} else if (valueF < -0x40000000) {
- return bitset.other_signed32;
+ return bitsetLow.other_signed32;
} else if (valueF < 0) {
- return bitset.negative31;
+ return bitsetLow.negative31;
} else if (valueF < Convert<float64>(0x40000000)) {
- return bitset.unsigned30;
+ return bitsetLow.unsigned30;
} else if (valueF < 0x80000000) {
- return bitset.other_unsigned31;
+ return bitsetLow.other_unsigned31;
} else if (valueF <= 0xffffffff) {
- return bitset.other_unsigned32;
+ return bitsetLow.other_unsigned32;
} else {
- return bitset.other_number;
+ return bitsetLow.other_number;
}
} else if (Float64IsNaN(valueF)) {
- return bitset.naN;
+ return bitsetLow.naN;
} else {
- return bitset.other_number;
+ return bitsetLow.other_number;
}
}
case (Null): {
- return bitset.null;
+ return bitsetLow.null;
}
case (Undefined): {
- return bitset.undefined;
+ return bitsetLow.undefined;
}
case (Boolean): {
- return bitset.boolean;
+ return bitsetLow.boolean;
}
case (Symbol): {
- return bitset.symbol;
+ return bitsetLow.symbol;
}
case (s: String): {
if (s.IsNotInternalized()) {
- return bitset.other_string;
+ return bitsetLow.other_string;
} else {
- return bitset.internalized_string;
+ return bitsetLow.internalized_string;
}
}
case (proxy: JSProxy): {
- return Is<Callable>(proxy) ? bitset.callable_proxy : bitset.other_proxy;
+ return Is<Callable>(proxy) ? bitsetLow.callable_proxy :
+ bitsetLow.other_proxy;
}
case (JSFunction): {
- return bitset.function;
+ return bitsetLow.function;
}
case (JSBoundFunction): {
- return bitset.bound_function;
+ return bitsetLow.bound_function;
}
case (TheHole): {
- return bitset.hole;
+ return bitsetLow.hole;
}
case (JSArray): {
- return bitset.array;
+ return bitsetLow.array;
}
case (BigInt): {
// TODO (tebbi): Distinguish different BigInt types.
- return bitset.unsigned_big_int_63 | bitset.other_unsigned_big_int_64 |
- bitset.negative_big_int_63 | bitset.other_big_int;
- }
- case (Callable): {
- return bitset.other_callable;
+ return bitsetLow.unsigned_big_int_63 |
+ bitsetLow.other_unsigned_big_int_64 | bitsetLow.negative_big_int_63 |
+ bitsetLow.other_big_int;
}
case (object: JSObject): {
if (object.map.IsUndetectable()) {
- return bitset.other_undetectable;
+ return bitsetLow.other_undetectable;
+ } else if (Is<Callable>(object)) {
+ return bitsetLow.other_callable;
} else {
- return bitset.other_object;
+ return bitsetLow.other_object;
}
}
+ @if(V8_ENABLE_WEBASSEMBLY)
+ case (WasmObject): {
+ return bitsetHigh.wasm_object;
+ }
case (Object): {
return false;
}
@@ -162,7 +179,8 @@ builtin TestTurbofanType(implicit context: Context)(
value: Object, expectedType: TurbofanType): Boolean {
typeswitch (expectedType) {
case (t: TurbofanBitsetType): {
- return Convert<Boolean>(TestTurbofanBitsetType(value, t.bitset));
+ return Convert<Boolean>(
+ TestTurbofanBitsetType(value, t.bitset_low, t.bitset_high));
}
case (t: TurbofanUnionType): {
return Convert<Boolean>(
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index a8c78404c4..a82582a48d 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -413,7 +413,8 @@ Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
}
DCHECK(object->IsHeapObject());
- switch (HeapObject::cast(*object).map().instance_type()) {
+ InstanceType instance_type = HeapObject::cast(*object).map().instance_type();
+ switch (instance_type) {
case ODDBALL_TYPE:
WriteOddball(Oddball::cast(*object));
return ThrowIfOutOfMemory();
@@ -433,7 +434,7 @@ Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
Handle<JSArrayBufferView> view = Handle<JSArrayBufferView>::cast(object);
if (!id_map_.Find(view) && !treat_array_buffer_views_as_host_objects_) {
Handle<JSArrayBuffer> buffer(
- view->IsJSTypedArray()
+ InstanceTypeChecker::IsJSTypedArray(instance_type)
? Handle<JSTypedArray>::cast(view)->GetBuffer()
: handle(JSArrayBuffer::cast(view->buffer()), isolate_));
if (!WriteJSReceiver(buffer).FromMaybe(false)) return Nothing<bool>();
@@ -441,10 +442,10 @@ Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
return WriteJSReceiver(view);
}
default:
- if (object->IsString()) {
+ if (InstanceTypeChecker::IsString(instance_type)) {
WriteString(Handle<String>::cast(object));
return ThrowIfOutOfMemory();
- } else if (object->IsJSReceiver()) {
+ } else if (InstanceTypeChecker::IsJSReceiver(instance_type)) {
return WriteJSReceiver(Handle<JSReceiver>::cast(object));
} else {
ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
@@ -623,7 +624,7 @@ Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
if (V8_LIKELY(!map_changed)) map_changed = *map != object->map();
if (V8_LIKELY(!map_changed &&
details.location() == PropertyLocation::kField)) {
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
value = JSObject::FastPropertyAt(object, details.representation(),
field_index);
@@ -664,6 +665,7 @@ Maybe<bool> ValueSerializer::WriteJSObjectSlow(Handle<JSObject> object) {
}
Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
+ PtrComprCageBase cage_base(isolate_);
uint32_t length = 0;
bool valid_length = array->length().ToArrayLength(&length);
DCHECK(valid_length);
@@ -675,7 +677,7 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
// existed (as only indices which were enumerable own properties at this point
// should be serialized).
const bool should_serialize_densely =
- array->HasFastElements() && !array->HasHoleyElements();
+ array->HasFastElements(cage_base) && !array->HasHoleyElements(cage_base);
if (should_serialize_densely) {
DCHECK_LE(length, static_cast<uint32_t>(FixedArray::kMaxLength));
@@ -685,35 +687,36 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
// Fast paths. Note that PACKED_ELEMENTS in particular can bail due to the
// structure of the elements changing.
- switch (array->GetElementsKind()) {
+ switch (array->GetElementsKind(cage_base)) {
case PACKED_SMI_ELEMENTS: {
- Handle<FixedArray> elements(FixedArray::cast(array->elements()),
- isolate_);
- for (; i < length; i++) WriteSmi(Smi::cast(elements->get(i)));
+ DisallowGarbageCollection no_gc;
+ FixedArray elements = FixedArray::cast(array->elements());
+ for (i = 0; i < length; i++)
+ WriteSmi(Smi::cast(elements.get(cage_base, i)));
break;
}
case PACKED_DOUBLE_ELEMENTS: {
// Elements are empty_fixed_array, not a FixedDoubleArray, if the array
// is empty. No elements to encode in this case anyhow.
if (length == 0) break;
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(array->elements()), isolate_);
- for (; i < length; i++) {
+ DisallowGarbageCollection no_gc;
+ FixedDoubleArray elements = FixedDoubleArray::cast(array->elements());
+ for (i = 0; i < length; i++) {
WriteTag(SerializationTag::kDouble);
- WriteDouble(elements->get_scalar(i));
+ WriteDouble(elements.get_scalar(i));
}
break;
}
case PACKED_ELEMENTS: {
- Handle<Object> old_length(array->length(), isolate_);
+ Handle<Object> old_length(array->length(cage_base), isolate_);
for (; i < length; i++) {
- if (array->length() != *old_length ||
- array->GetElementsKind() != PACKED_ELEMENTS) {
+ if (array->length(cage_base) != *old_length ||
+ array->GetElementsKind(cage_base) != PACKED_ELEMENTS) {
// Fall back to slow path.
break;
}
- Handle<Object> element(FixedArray::cast(array->elements()).get(i),
- isolate_);
+ Handle<Object> element(
+ FixedArray::cast(array->elements()).get(cage_base, i), isolate_);
if (!WriteObject(element).FromMaybe(false)) return Nothing<bool>();
}
break;
@@ -935,6 +938,11 @@ Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView view) {
WriteVarint(static_cast<uint8_t>(tag));
WriteVarint(static_cast<uint32_t>(view.byte_offset()));
WriteVarint(static_cast<uint32_t>(view.byte_length()));
+ // TODO(crbug.com/v8/12532): Re-enable the flags serialization logic below.
+ // Bump the serialization format version number when doing so, and preserve
+ // logic and tests for reading from the old format.
+ //
+ // WriteVarint(static_cast<uint32_t>(view.bit_field()));
return ThrowIfOutOfMemory();
}
@@ -1859,6 +1867,9 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
uint8_t tag = 0;
uint32_t byte_offset = 0;
uint32_t byte_length = 0;
+ uint32_t flags = 0;
+ // TODO(crbug.com/v8/12532): Read `flags` from the serialized value, when we
+ // restore the logic for serializing them.
if (!ReadVarint<uint8_t>().To(&tag) ||
!ReadVarint<uint32_t>().To(&byte_offset) ||
!ReadVarint<uint32_t>().To(&byte_length) ||
@@ -1875,6 +1886,7 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
Handle<JSDataView> data_view =
isolate_->factory()->NewJSDataView(buffer, byte_offset, byte_length);
AddObjectWithID(id, data_view);
+ data_view->set_bit_field(flags);
return data_view;
}
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
@@ -1891,6 +1903,7 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
}
Handle<JSTypedArray> typed_array = isolate_->factory()->NewJSTypedArray(
external_array_type, buffer, byte_offset, byte_length / element_size);
+ typed_array->set_bit_field(flags);
AddObjectWithID(id, typed_array);
return typed_array;
}
diff --git a/deps/v8/src/objects/visitors.h b/deps/v8/src/objects/visitors.h
index d10c395270..f8a98e7e12 100644
--- a/deps/v8/src/objects/visitors.h
+++ b/deps/v8/src/objects/visitors.h
@@ -89,6 +89,13 @@ class RootVisitor {
UNREACHABLE();
}
+ // Visits a single pointer which is Code from the execution stack.
+ virtual void VisitRunningCode(FullObjectSlot p) {
+ // For most visitors, currently running Code is no different than any other
+ // on-stack pointer.
+ VisitRootPointer(Root::kStackRoots, nullptr, p);
+ }
+
// Intended for serialization/deserialization checking: insert, or
// check for the presence of, a tag at this position in the stream.
// Also used for marking up GC roots in heap snapshots.
@@ -197,7 +204,7 @@ class ObjectVisitorWithCageBases : public ObjectVisitor {
// The pointer compression cage base value used for decompression of
// references to Code objects.
PtrComprCageBase code_cage_base() const {
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
return code_cage_base_;
#else
return cage_base();
diff --git a/deps/v8/src/parsing/expression-scope.h b/deps/v8/src/parsing/expression-scope.h
index d6c117f73b..c49fae7519 100644
--- a/deps/v8/src/parsing/expression-scope.h
+++ b/deps/v8/src/parsing/expression-scope.h
@@ -63,16 +63,16 @@ class ExpressionScope {
if (scope->is_with_scope()) {
passed_through_with = true;
} else if (scope->is_catch_scope()) {
- Variable* var = scope->LookupLocal(name);
+ Variable* masking_var = scope->LookupLocal(name);
// If a variable is declared in a catch scope with a masking
// catch-declared variable, the initializing assignment is an
// assignment to the catch-declared variable instead.
// https://tc39.es/ecma262/#sec-variablestatements-in-catch-blocks
- if (var != nullptr) {
+ if (masking_var != nullptr) {
result->set_is_assigned();
if (passed_through_with) break;
- result->BindTo(var);
- var->SetMaybeAssigned();
+ result->BindTo(masking_var);
+ masking_var->SetMaybeAssigned();
return result;
}
}
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 41055e8024..048948ed3c 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -36,7 +36,10 @@ UnoptimizedCompileFlags::UnoptimizedCompileFlags(Isolate* isolate,
set_allow_lazy_compile(true);
set_collect_source_positions(!FLAG_enable_lazy_source_positions ||
isolate->NeedsDetailedOptimizedCodeLineInfo());
- set_allow_harmony_top_level_await(FLAG_harmony_top_level_await);
+ set_post_parallel_compile_tasks_for_eager_toplevel(
+ FLAG_parallel_compile_tasks_for_eager_toplevel);
+ set_post_parallel_compile_tasks_for_lazy(
+ FLAG_parallel_compile_tasks_for_lazy);
}
// static
@@ -133,6 +136,8 @@ void UnoptimizedCompileFlags::SetFlagsFromFunction(T function) {
set_class_scope_has_private_brand(function->class_scope_has_private_brand());
set_has_static_private_methods_or_accessors(
function->has_static_private_methods_or_accessors());
+ set_private_name_lookup_skips_outer_class(
+ function->private_name_lookup_skips_outer_class());
set_is_toplevel(function->is_toplevel());
}
@@ -164,39 +169,46 @@ void UnoptimizedCompileFlags::SetFlagsForFunctionFromScript(Script script) {
script.IsUserJavaScript());
}
-UnoptimizedCompileState::UnoptimizedCompileState(Isolate* isolate)
+ReusableUnoptimizedCompileState::ReusableUnoptimizedCompileState(
+ Isolate* isolate)
: hash_seed_(HashSeed(isolate)),
allocator_(isolate->allocator()),
- ast_string_constants_(isolate->ast_string_constants()),
logger_(isolate->logger()),
- parallel_tasks_(
- isolate->lazy_compile_dispatcher()
- ? new ParallelTasks(isolate->lazy_compile_dispatcher())
- : nullptr) {}
-
-UnoptimizedCompileState::UnoptimizedCompileState(
- const UnoptimizedCompileState& other) V8_NOEXCEPT
- : hash_seed_(other.hash_seed()),
- allocator_(other.allocator()),
- ast_string_constants_(other.ast_string_constants()),
- logger_(other.logger()),
- // TODO(leszeks): Should this create a new ParallelTasks instance?
- parallel_tasks_(nullptr) {}
+ dispatcher_(isolate->lazy_compile_dispatcher()),
+ ast_string_constants_(isolate->ast_string_constants()),
+ zone_(allocator_, "unoptimized-compile-zone"),
+ ast_value_factory_(
+ new AstValueFactory(zone(), ast_string_constants(), hash_seed())) {}
+
+ReusableUnoptimizedCompileState::ReusableUnoptimizedCompileState(
+ LocalIsolate* isolate)
+ : hash_seed_(HashSeed(isolate)),
+ allocator_(isolate->allocator()),
+ logger_(isolate->main_thread_logger()),
+ dispatcher_(isolate->lazy_compile_dispatcher()),
+ ast_string_constants_(isolate->ast_string_constants()),
+ zone_(allocator_, "unoptimized-compile-zone"),
+ ast_value_factory_(
+ new AstValueFactory(zone(), ast_string_constants(), hash_seed())) {}
+
+ReusableUnoptimizedCompileState::~ReusableUnoptimizedCompileState() = default;
ParseInfo::ParseInfo(const UnoptimizedCompileFlags flags,
- UnoptimizedCompileState* state)
+ UnoptimizedCompileState* state,
+ ReusableUnoptimizedCompileState* reusable_state,
+ uintptr_t stack_limit,
+ RuntimeCallStats* runtime_call_stats)
: flags_(flags),
state_(state),
- zone_(std::make_unique<Zone>(state->allocator(), "parser-zone")),
+ reusable_state_(reusable_state),
extension_(nullptr),
script_scope_(nullptr),
- stack_limit_(0),
+ stack_limit_(stack_limit),
parameters_end_pos_(kNoSourcePosition),
max_function_literal_id_(kFunctionLiteralIdInvalid),
character_stream_(nullptr),
- ast_value_factory_(nullptr),
function_name_(nullptr),
- runtime_call_stats_(nullptr),
+ runtime_call_stats_(runtime_call_stats),
source_range_map_(nullptr),
literal_(nullptr),
allow_eval_cache_(false),
@@ -210,30 +222,18 @@ ParseInfo::ParseInfo(const UnoptimizedCompileFlags flags,
}
ParseInfo::ParseInfo(Isolate* isolate, const UnoptimizedCompileFlags flags,
- UnoptimizedCompileState* state)
- : ParseInfo(flags, state) {
- SetPerThreadState(isolate->stack_guard()->real_climit(),
- isolate->counters()->runtime_call_stats());
-}
-
-// static
-std::unique_ptr<ParseInfo> ParseInfo::ForToplevelFunction(
- const UnoptimizedCompileFlags flags, UnoptimizedCompileState* compile_state,
- const FunctionLiteral* literal, const AstRawString* function_name) {
- std::unique_ptr<ParseInfo> result(new ParseInfo(flags, compile_state));
-
- // Clone the function_name AstRawString into the ParseInfo's own
- // AstValueFactory.
- const AstRawString* cloned_function_name =
- result->GetOrCreateAstValueFactory()->CloneFromOtherFactory(
- function_name);
-
- // Setup function specific details.
- DCHECK(!literal->is_toplevel());
- result->set_function_name(cloned_function_name);
-
- return result;
-}
+ UnoptimizedCompileState* state,
+ ReusableUnoptimizedCompileState* reusable_state)
+ : ParseInfo(flags, state, reusable_state,
+ isolate->stack_guard()->real_climit(),
+ isolate->counters()->runtime_call_stats()) {}
+
+ParseInfo::ParseInfo(LocalIsolate* isolate, const UnoptimizedCompileFlags flags,
+ UnoptimizedCompileState* state,
+ ReusableUnoptimizedCompileState* reusable_state,
+ uintptr_t stack_limit)
+ : ParseInfo(flags, state, reusable_state, stack_limit,
+ isolate->runtime_call_stats()) {}
ParseInfo::~ParseInfo() = default;
@@ -285,14 +285,6 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
MaybeHandle<FixedArray> maybe_wrapped_arguments,
ScriptOriginOptions origin_options, NativesFlag natives);
-AstValueFactory* ParseInfo::GetOrCreateAstValueFactory() {
- if (!ast_value_factory_.get()) {
- ast_value_factory_.reset(
- new AstValueFactory(zone(), ast_string_constants(), hash_seed()));
- }
- return ast_value_factory();
-}
-
void ParseInfo::AllocateSourceRangeMap() {
DCHECK(flags().block_coverage_enabled());
DCHECK_NULL(source_range_map());
@@ -331,15 +323,5 @@ void ParseInfo::CheckFlagsForFunctionFromScript(Script script) {
source_range_map() != nullptr);
}
-void UnoptimizedCompileState::ParallelTasks::Enqueue(
- ParseInfo* outer_parse_info, const AstRawString* function_name,
- FunctionLiteral* literal) {
- base::Optional<LazyCompileDispatcher::JobId> job_id =
- dispatcher_->Enqueue(outer_parse_info, function_name, literal);
- if (job_id) {
- enqueued_jobs_.emplace_front(std::make_pair(literal, *job_id));
- }
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index b882423dbb..8ceccdd514 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -40,27 +40,29 @@ class Utf16CharacterStream;
class Zone;
// The flags for a parse + unoptimized compile operation.
-#define FLAG_FIELDS(V, _) \
- V(is_toplevel, bool, 1, _) \
- V(is_eager, bool, 1, _) \
- V(is_eval, bool, 1, _) \
- V(outer_language_mode, LanguageMode, 1, _) \
- V(parse_restriction, ParseRestriction, 1, _) \
- V(is_module, bool, 1, _) \
- V(allow_lazy_parsing, bool, 1, _) \
- V(is_lazy_compile, bool, 1, _) \
- V(collect_type_profile, bool, 1, _) \
- V(coverage_enabled, bool, 1, _) \
- V(block_coverage_enabled, bool, 1, _) \
- V(is_asm_wasm_broken, bool, 1, _) \
- V(class_scope_has_private_brand, bool, 1, _) \
- V(requires_instance_members_initializer, bool, 1, _) \
- V(has_static_private_methods_or_accessors, bool, 1, _) \
- V(might_always_opt, bool, 1, _) \
- V(allow_natives_syntax, bool, 1, _) \
- V(allow_lazy_compile, bool, 1, _) \
- V(collect_source_positions, bool, 1, _) \
- V(allow_harmony_top_level_await, bool, 1, _) \
+#define FLAG_FIELDS(V, _) \
+ V(is_toplevel, bool, 1, _) \
+ V(is_eager, bool, 1, _) \
+ V(is_eval, bool, 1, _) \
+ V(outer_language_mode, LanguageMode, 1, _) \
+ V(parse_restriction, ParseRestriction, 1, _) \
+ V(is_module, bool, 1, _) \
+ V(allow_lazy_parsing, bool, 1, _) \
+ V(is_lazy_compile, bool, 1, _) \
+ V(collect_type_profile, bool, 1, _) \
+ V(coverage_enabled, bool, 1, _) \
+ V(block_coverage_enabled, bool, 1, _) \
+ V(is_asm_wasm_broken, bool, 1, _) \
+ V(class_scope_has_private_brand, bool, 1, _) \
+ V(private_name_lookup_skips_outer_class, bool, 1, _) \
+ V(requires_instance_members_initializer, bool, 1, _) \
+ V(has_static_private_methods_or_accessors, bool, 1, _) \
+ V(might_always_opt, bool, 1, _) \
+ V(allow_natives_syntax, bool, 1, _) \
+ V(allow_lazy_compile, bool, 1, _) \
+ V(post_parallel_compile_tasks_for_eager_toplevel, bool, 1, _) \
+ V(post_parallel_compile_tasks_for_lazy, bool, 1, _) \
+ V(collect_source_positions, bool, 1, _) \
V(is_repl_mode, bool, 1, _)
class V8_EXPORT_PRIVATE UnoptimizedCompileFlags {
@@ -159,67 +161,61 @@ class ParseInfo;
// The mutable state for a parse + unoptimized compile operation.
class V8_EXPORT_PRIVATE UnoptimizedCompileState {
public:
- explicit UnoptimizedCompileState(Isolate*);
- UnoptimizedCompileState(const UnoptimizedCompileState& other) V8_NOEXCEPT;
-
- class ParallelTasks {
- public:
- explicit ParallelTasks(LazyCompileDispatcher* lazy_compile_dispatcher)
- : dispatcher_(lazy_compile_dispatcher) {
- DCHECK_NOT_NULL(dispatcher_);
- }
-
- void Enqueue(ParseInfo* outer_parse_info, const AstRawString* function_name,
- FunctionLiteral* literal);
-
- using EnqueuedJobsIterator =
- std::forward_list<std::pair<FunctionLiteral*, uintptr_t>>::iterator;
-
- EnqueuedJobsIterator begin() { return enqueued_jobs_.begin(); }
- EnqueuedJobsIterator end() { return enqueued_jobs_.end(); }
+ const PendingCompilationErrorHandler* pending_error_handler() const {
+ return &pending_error_handler_;
+ }
+ PendingCompilationErrorHandler* pending_error_handler() {
+ return &pending_error_handler_;
+ }
- LazyCompileDispatcher* dispatcher() { return dispatcher_; }
+ private:
+ PendingCompilationErrorHandler pending_error_handler_;
+};
- private:
- LazyCompileDispatcher* dispatcher_;
- std::forward_list<std::pair<FunctionLiteral*, uintptr_t>> enqueued_jobs_;
- };
+// A container for ParseInfo fields that are reusable across multiple parses and
+// unoptimized compiles.
+//
+// Note that this is different from UnoptimizedCompileState, which has mutable
+// state for a single compilation that is not reusable across multiple
+// compilations.
+class V8_EXPORT_PRIVATE ReusableUnoptimizedCompileState {
+ public:
+ explicit ReusableUnoptimizedCompileState(Isolate* isolate);
+ explicit ReusableUnoptimizedCompileState(LocalIsolate* isolate);
+ ~ReusableUnoptimizedCompileState();
+ Zone* zone() { return &zone_; }
+ AstValueFactory* ast_value_factory() const {
+ return ast_value_factory_.get();
+ }
uint64_t hash_seed() const { return hash_seed_; }
AccountingAllocator* allocator() const { return allocator_; }
const AstStringConstants* ast_string_constants() const {
return ast_string_constants_;
}
Logger* logger() const { return logger_; }
- PendingCompilationErrorHandler* pending_error_handler() {
- return &pending_error_handler_;
- }
- const PendingCompilationErrorHandler* pending_error_handler() const {
- return &pending_error_handler_;
- }
- ParallelTasks* parallel_tasks() const { return parallel_tasks_.get(); }
+ LazyCompileDispatcher* dispatcher() const { return dispatcher_; }
private:
uint64_t hash_seed_;
AccountingAllocator* allocator_;
- const AstStringConstants* ast_string_constants_;
- PendingCompilationErrorHandler pending_error_handler_;
Logger* logger_;
- std::unique_ptr<ParallelTasks> parallel_tasks_;
+ LazyCompileDispatcher* dispatcher_;
+ const AstStringConstants* ast_string_constants_;
+ Zone zone_;
+ std::unique_ptr<AstValueFactory> ast_value_factory_;
};
// A container for the inputs, configuration options, and outputs of parsing.
class V8_EXPORT_PRIVATE ParseInfo {
public:
ParseInfo(Isolate* isolate, const UnoptimizedCompileFlags flags,
- UnoptimizedCompileState* state);
-
- // Creates a new parse info based on parent top-level |outer_parse_info| for
- // function |literal|.
- static std::unique_ptr<ParseInfo> ForToplevelFunction(
- const UnoptimizedCompileFlags flags,
- UnoptimizedCompileState* compile_state, const FunctionLiteral* literal,
- const AstRawString* function_name);
+ UnoptimizedCompileState* state,
+ ReusableUnoptimizedCompileState* reusable_state);
+ ParseInfo(LocalIsolate* isolate, const UnoptimizedCompileFlags flags,
+ UnoptimizedCompileState* state,
+ ReusableUnoptimizedCompileState* reusable_state,
+ uintptr_t stack_limit);
~ParseInfo();
@@ -230,37 +226,32 @@ class V8_EXPORT_PRIVATE ParseInfo {
ScriptOriginOptions origin_options,
NativesFlag natives = NOT_NATIVES_CODE);
- // Either returns the ast-value-factory associcated with this ParseInfo, or
- // creates and returns a new factory if none exists.
- AstValueFactory* GetOrCreateAstValueFactory();
-
- Zone* zone() const { return zone_.get(); }
+ Zone* zone() const { return reusable_state_->zone(); }
const UnoptimizedCompileFlags& flags() const { return flags_; }
- // Getters for state.
- uint64_t hash_seed() const { return state_->hash_seed(); }
- AccountingAllocator* allocator() const { return state_->allocator(); }
+ // Getters for reusable state.
+ uint64_t hash_seed() const { return reusable_state_->hash_seed(); }
+ AccountingAllocator* allocator() const {
+ return reusable_state_->allocator();
+ }
const AstStringConstants* ast_string_constants() const {
- return state_->ast_string_constants();
+ return reusable_state_->ast_string_constants();
+ }
+ Logger* logger() const { return reusable_state_->logger(); }
+ LazyCompileDispatcher* dispatcher() const {
+ return reusable_state_->dispatcher();
}
- Logger* logger() const { return state_->logger(); }
+ const UnoptimizedCompileState* state() const { return state_; }
+
+ // Getters for state.
PendingCompilationErrorHandler* pending_error_handler() {
return state_->pending_error_handler();
}
- UnoptimizedCompileState::ParallelTasks* parallel_tasks() const {
- return state_->parallel_tasks();
- }
- const UnoptimizedCompileState* state() const { return state_; }
// Accessors for per-thread state.
uintptr_t stack_limit() const { return stack_limit_; }
RuntimeCallStats* runtime_call_stats() const { return runtime_call_stats_; }
- void SetPerThreadState(uintptr_t stack_limit,
- RuntimeCallStats* runtime_call_stats) {
- stack_limit_ = stack_limit;
- runtime_call_stats_ = runtime_call_stats;
- }
// Accessor methods for output flags.
bool allow_eval_cache() const { return allow_eval_cache_; }
@@ -297,8 +288,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
}
AstValueFactory* ast_value_factory() const {
- DCHECK(ast_value_factory_.get());
- return ast_value_factory_.get();
+ return reusable_state_->ast_value_factory();
}
const AstRawString* function_name() const { return function_name_; }
@@ -334,8 +324,9 @@ class V8_EXPORT_PRIVATE ParseInfo {
void CheckFlagsForFunctionFromScript(Script script);
private:
- ParseInfo(const UnoptimizedCompileFlags flags,
- UnoptimizedCompileState* state);
+ ParseInfo(const UnoptimizedCompileFlags flags, UnoptimizedCompileState* state,
+ ReusableUnoptimizedCompileState* reusable_state,
+ uintptr_t stack_limit, RuntimeCallStats* runtime_call_stats);
void CheckFlagsForToplevelCompileFromScript(Script script,
bool is_collecting_type_profile);
@@ -343,8 +334,8 @@ class V8_EXPORT_PRIVATE ParseInfo {
//------------- Inputs to parsing and scope analysis -----------------------
const UnoptimizedCompileFlags flags_;
UnoptimizedCompileState* state_;
+ ReusableUnoptimizedCompileState* reusable_state_;
- std::unique_ptr<Zone> zone_;
v8::Extension* extension_;
DeclarationScope* script_scope_;
uintptr_t stack_limit_;
@@ -354,7 +345,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
//----------- Inputs+Outputs of parsing and scope analysis -----------------
std::unique_ptr<Utf16CharacterStream> character_stream_;
std::unique_ptr<ConsumedPreparseData> consumed_preparse_data_;
- std::unique_ptr<AstValueFactory> ast_value_factory_;
const AstRawString* function_name_;
RuntimeCallStats* runtime_call_stats_;
SourceRangeMap* source_range_map_; // Used when block coverage is enabled.
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 9a0f4e580f..7240e64777 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -923,12 +923,9 @@ class ParserBase {
if (flags().parsing_while_debugging() == ParsingWhileDebugging::kYes) {
ReportMessageAt(scanner()->location(),
MessageTemplate::kAwaitNotInDebugEvaluate);
- } else if (flags().allow_harmony_top_level_await()) {
- ReportMessageAt(scanner()->location(),
- MessageTemplate::kAwaitNotInAsyncContext);
} else {
ReportMessageAt(scanner()->location(),
- MessageTemplate::kAwaitNotInAsyncFunction);
+ MessageTemplate::kAwaitNotInAsyncContext);
}
return;
}
@@ -1068,8 +1065,7 @@ class ParserBase {
return IsResumableFunction(function_state_->kind());
}
bool is_await_allowed() const {
- return is_async_function() || (flags().allow_harmony_top_level_await() &&
- IsModule(function_state_->kind()));
+ return is_async_function() || IsModule(function_state_->kind());
}
bool is_await_as_identifier_disallowed() {
return flags().is_module() ||
@@ -1242,6 +1238,10 @@ class ParserBase {
Scanner::Location class_name_location,
bool name_is_strict_reserved,
int class_token_pos);
+ ExpressionT DoParseClassLiteral(ClassScope* class_scope, IdentifierT name,
+ Scanner::Location class_name_location,
+ bool is_anonymous, int class_token_pos);
+
ExpressionT ParseTemplateLiteral(ExpressionT tag, int start, bool tagged);
ExpressionT ParseSuperExpression();
ExpressionT ParseImportExpressions();
@@ -2526,8 +2526,6 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberInitializer(
if (initializer_scope == nullptr) {
initializer_scope = NewFunctionScope(function_kind);
- // TODO(gsathya): Make scopes be non contiguous.
- initializer_scope->set_start_position(beg_pos);
initializer_scope->SetLanguageMode(LanguageMode::kStrict);
}
@@ -2542,8 +2540,13 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberInitializer(
initializer = factory()->NewUndefinedLiteral(kNoSourcePosition);
}
- initializer_scope->set_end_position(end_position());
if (is_static) {
+ // For the instance initializer, we will save the positions
+ // later with the positions of the class body so that we can reparse
+ // it later.
+ // TODO(joyee): Make scopes be non contiguous.
+ initializer_scope->set_start_position(beg_pos);
+ initializer_scope->set_end_position(end_position());
class_info->static_elements_scope = initializer_scope;
class_info->has_static_elements = true;
} else {
@@ -3446,7 +3449,7 @@ ParserBase<Impl>::ParseLeftHandSideContinuation(ExpressionT result) {
// async () => ...
if (!args.length()) return factory()->NewEmptyParentheses(pos);
// async ( Arguments ) => ...
- ExpressionT result = impl()->ExpressionListToExpression(args);
+ result = impl()->ExpressionListToExpression(args);
result->mark_parenthesized();
return result;
}
@@ -4559,7 +4562,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
// parameters.
int dummy_num_parameters = -1;
int dummy_function_length = -1;
- DCHECK_NE(kind & FunctionKind::kArrowFunction, 0);
+ DCHECK(IsArrowFunction(kind));
bool did_preparse_successfully = impl()->SkipFunction(
nullptr, kind, FunctionSyntaxKind::kAnonymousExpression,
formal_parameters.scope, &dummy_num_parameters,
@@ -4581,8 +4584,8 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
if (has_error()) return impl()->FailureExpression();
DeclarationScope* function_scope = next_arrow_function_info_.scope;
- FunctionState function_state(&function_state_, &scope_,
- function_scope);
+ FunctionState inner_function_state(&function_state_, &scope_,
+ function_scope);
Scanner::Location loc(function_scope->start_position(),
end_position());
FormalParametersT parameters(function_scope);
@@ -4685,6 +4688,15 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
}
ClassScope* class_scope = NewClassScope(scope(), is_anonymous);
+ return DoParseClassLiteral(class_scope, name, class_name_location,
+ is_anonymous, class_token_pos);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::DoParseClassLiteral(
+ ClassScope* class_scope, IdentifierT name,
+ Scanner::Location class_name_location, bool is_anonymous,
+ int class_token_pos) {
BlockState block_state(&scope_, class_scope);
RaiseLanguageMode(LanguageMode::kStrict);
@@ -4771,6 +4783,12 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
Expect(Token::RBRACE);
int end_pos = end_position();
class_scope->set_end_position(end_pos);
+ if (class_info.instance_members_scope != nullptr) {
+ // Use the positions of the class body for the instance initializer
+ // function so that we can reparse it later.
+ class_info.instance_members_scope->set_start_position(class_token_pos);
+ class_info.instance_members_scope->set_end_position(end_pos);
+ }
VariableProxy* unresolvable = class_scope->ResolvePrivateNamesPartially();
if (unresolvable != nullptr) {
@@ -4934,7 +4952,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
Next();
pos = position();
- bool is_valid = CheckTemplateEscapes(forbid_illegal_escapes);
+ is_valid = CheckTemplateEscapes(forbid_illegal_escapes);
impl()->AddTemplateSpan(&ts, is_valid, next == Token::TEMPLATE_TAIL);
} while (next == Token::TEMPLATE_SPAN);
@@ -5618,7 +5636,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement() {
case BLOCK_SCOPE:
// Class static blocks disallow return. They are their own var scopes and
// have a varblock scope.
- if (function_state_->kind() == kClassStaticInitializerFunction) {
+ if (function_state_->kind() ==
+ FunctionKind::kClassStaticInitializerFunction) {
impl()->ReportMessageAt(loc, MessageTemplate::kIllegalReturn);
return impl()->NullStatement();
}
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 9c088570a7..21a1f695cc 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -18,6 +18,7 @@
#include "src/common/globals.h"
#include "src/common/message-template.h"
#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
+#include "src/heap/parked-scope.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/logging/runtime-call-stats-scope.h"
@@ -419,12 +420,15 @@ Expression* Parser::NewV8RuntimeFunctionForFuzzing(
return factory()->NewCallRuntime(function, permissive_args, pos);
}
-Parser::Parser(ParseInfo* info)
+Parser::Parser(LocalIsolate* local_isolate, ParseInfo* info,
+ Handle<Script> script)
: ParserBase<Parser>(
info->zone(), &scanner_, info->stack_limit(),
- info->GetOrCreateAstValueFactory(), info->pending_error_handler(),
+ info->ast_value_factory(), info->pending_error_handler(),
info->runtime_call_stats(), info->logger(), info->flags(), true),
+ local_isolate_(local_isolate),
info_(info),
+ script_(script),
scanner_(info->character_stream(), flags()),
preparser_zone_(info->zone()->allocator(), "pre-parser-zone"),
reusable_preparser_(nullptr),
@@ -470,8 +474,9 @@ void Parser::InitializeEmptyScopeChain(ParseInfo* info) {
original_scope_ = script_scope;
}
+template <typename IsolateT>
void Parser::DeserializeScopeChain(
- Isolate* isolate, ParseInfo* info,
+ IsolateT* isolate, ParseInfo* info,
MaybeHandle<ScopeInfo> maybe_outer_scope_info,
Scope::DeserializationMode mode) {
InitializeEmptyScopeChain(info);
@@ -488,19 +493,16 @@ void Parser::DeserializeScopeChain(
}
}
-namespace {
+template void Parser::DeserializeScopeChain(
+ Isolate* isolate, ParseInfo* info,
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info,
+ Scope::DeserializationMode mode);
+template void Parser::DeserializeScopeChain(
+ LocalIsolate* isolate, ParseInfo* info,
+ MaybeHandle<ScopeInfo> maybe_outer_scope_info,
+ Scope::DeserializationMode mode);
-void MaybeResetCharacterStream(ParseInfo* info, FunctionLiteral* literal) {
-#if V8_ENABLE_WEBASSEMBLY
- // Don't reset the character stream if there is an asm.js module since it will
- // be used again by the asm-parser.
- if (info->contains_asm_module()) {
- if (FLAG_stress_validate_asm) return;
- if (literal != nullptr && literal->scope()->ContainsAsmModule()) return;
- }
-#endif // V8_ENABLE_WEBASSEMBLY
- info->ResetCharacterStream();
-}
+namespace {
void MaybeProcessSourceRanges(ParseInfo* parse_info, Expression* root,
uintptr_t stack_limit_) {
@@ -539,7 +541,6 @@ void Parser::ParseProgram(Isolate* isolate, Handle<Script> script,
scanner_.Initialize();
FunctionLiteral* result = DoParseProgram(isolate, info);
- MaybeResetCharacterStream(info, result);
MaybeProcessSourceRanges(info, result, stack_limit_);
PostProcessParseResult(isolate, info, result);
@@ -592,35 +593,31 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
DCHECK(flags().is_module());
PrepareGeneratorVariables();
- Expression* initial_yield =
- BuildInitialYield(kNoSourcePosition, kGeneratorFunction);
+ Expression* initial_yield = BuildInitialYield(
+ kNoSourcePosition, FunctionKind::kGeneratorFunction);
body.Add(
factory()->NewExpressionStatement(initial_yield, kNoSourcePosition));
- if (flags().allow_harmony_top_level_await()) {
- // First parse statements into a buffer. Then, if there was a
- // top level await, create an inner block and rewrite the body of the
- // module as an async function. Otherwise merge the statements back
- // into the main body.
- BlockT block = impl()->NullBlock();
- {
- StatementListT statements(pointer_buffer());
- ParseModuleItemList(&statements);
- // Modules will always have an initial yield. If there are any
- // additional suspends, i.e. awaits, then we treat the module as an
- // AsyncModule.
- if (function_state.suspend_count() > 1) {
- scope->set_is_async_module();
- block = factory()->NewBlock(true, statements);
- } else {
- statements.MergeInto(&body);
- }
- }
- if (IsAsyncModule(scope->function_kind())) {
- impl()->RewriteAsyncFunctionBody(
- &body, block, factory()->NewUndefinedLiteral(kNoSourcePosition));
+ // First parse statements into a buffer. Then, if there was a
+ // top level await, create an inner block and rewrite the body of the
+ // module as an async function. Otherwise merge the statements back
+ // into the main body.
+ BlockT block = impl()->NullBlock();
+ {
+ StatementListT statements(pointer_buffer());
+ ParseModuleItemList(&statements);
+ // Modules will always have an initial yield. If there are any
+ // additional suspends, i.e. awaits, then we treat the module as an
+ // AsyncModule.
+ if (function_state.suspend_count() > 1) {
+ scope->set_is_async_module();
+ block = factory()->NewBlock(true, statements);
+ } else {
+ statements.MergeInto(&body);
}
- } else {
- ParseModuleItemList(&body);
+ }
+ if (IsAsyncModule(scope->function_kind())) {
+ impl()->RewriteAsyncFunctionBody(
+ &body, block, factory()->NewUndefinedLiteral(kNoSourcePosition));
}
if (!has_error() &&
!module()->Validate(this->scope()->AsModuleScope(),
@@ -657,6 +654,7 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
// conflicting var declarations with outer scope-info-backed scopes.
if (flags().is_eval()) {
DCHECK(parsing_on_main_thread_);
+ DCHECK(!overall_parse_is_parked_);
info->ast_value_factory()->Internalize(isolate);
}
CheckConflictingVarDeclarations(scope);
@@ -686,7 +684,8 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
return result;
}
-void Parser::PostProcessParseResult(Isolate* isolate, ParseInfo* info,
+template <typename IsolateT>
+void Parser::PostProcessParseResult(IsolateT* isolate, ParseInfo* info,
FunctionLiteral* literal) {
if (literal == nullptr) return;
@@ -696,10 +695,7 @@ void Parser::PostProcessParseResult(Isolate* isolate, ParseInfo* info,
info->set_allow_eval_cache(allow_eval_cache());
}
- // We cannot internalize on a background thread; a foreground task will take
- // care of calling AstValueFactory::Internalize just before compilation.
- DCHECK_EQ(isolate != nullptr, parsing_on_main_thread_);
- if (isolate) info->ast_value_factory()->Internalize(isolate);
+ info->ast_value_factory()->Internalize(isolate);
{
RCS_SCOPE(info->runtime_call_stats(), RuntimeCallCounterId::kCompileAnalyse,
@@ -712,6 +708,12 @@ void Parser::PostProcessParseResult(Isolate* isolate, ParseInfo* info,
}
}
+template void Parser::PostProcessParseResult(Isolate* isolate, ParseInfo* info,
+ FunctionLiteral* literal);
+template void Parser::PostProcessParseResult(LocalIsolate* isolate,
+ ParseInfo* info,
+ FunctionLiteral* literal);
+
ZonePtrList<const AstRawString>* Parser::PrepareWrappedArguments(
Isolate* isolate, ParseInfo* info, Zone* zone) {
DCHECK(parsing_on_main_thread_);
@@ -722,7 +724,8 @@ ZonePtrList<const AstRawString>* Parser::PrepareWrappedArguments(
zone->New<ZonePtrList<const AstRawString>>(arguments_length, zone);
for (int i = 0; i < arguments_length; i++) {
const AstRawString* argument_string = ast_value_factory()->GetString(
- Handle<String>(String::cast(arguments->get(i)), isolate));
+ String::cast(arguments->get(i)),
+ SharedStringAccessGuardIfNeeded(isolate));
arguments_for_wrapped_function->Add(argument_string, zone);
}
return arguments_for_wrapped_function;
@@ -745,10 +748,11 @@ void Parser::ParseWrapped(Isolate* isolate, ParseInfo* info,
ZonePtrList<const AstRawString>* arguments_for_wrapped_function =
PrepareWrappedArguments(isolate, info, zone);
- FunctionLiteral* function_literal = ParseFunctionLiteral(
- function_name, location, kSkipFunctionNameCheck, kNormalFunction,
- kNoSourcePosition, FunctionSyntaxKind::kWrapped, LanguageMode::kSloppy,
- arguments_for_wrapped_function);
+ FunctionLiteral* function_literal =
+ ParseFunctionLiteral(function_name, location, kSkipFunctionNameCheck,
+ FunctionKind::kNormalFunction, kNoSourcePosition,
+ FunctionSyntaxKind::kWrapped, LanguageMode::kSloppy,
+ arguments_for_wrapped_function);
Statement* return_statement =
factory()->NewReturnStatement(function_literal, kNoSourcePosition);
@@ -848,8 +852,8 @@ void Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
}
// Initialize parser state.
- Handle<String> name(shared_info->Name(), isolate);
- info->set_function_name(ast_value_factory()->GetString(name));
+ info->set_function_name(ast_value_factory()->GetString(
+ shared_info->Name(), SharedStringAccessGuardIfNeeded(isolate)));
scanner_.Initialize();
FunctionLiteral* result;
@@ -859,13 +863,14 @@ void Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
// function is in heritage position. Otherwise the function scope's skip bit
// will be correctly inherited from the outer scope.
ClassScope::HeritageParsingScope heritage(original_scope_->AsClassScope());
- result = DoParseFunction(isolate, info, start_position, end_position,
- function_literal_id, info->function_name());
+ result = DoParseDeserializedFunction(
+ isolate, shared_info, info, start_position, end_position,
+ function_literal_id, info->function_name());
} else {
- result = DoParseFunction(isolate, info, start_position, end_position,
- function_literal_id, info->function_name());
+ result = DoParseDeserializedFunction(
+ isolate, shared_info, info, start_position, end_position,
+ function_literal_id, info->function_name());
}
- MaybeResetCharacterStream(info, result);
MaybeProcessSourceRanges(info, result, stack_limit_);
if (result != nullptr) {
Handle<String> inferred_name(shared_info->inferred_name(), isolate);
@@ -950,14 +955,14 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
// Parsing patterns as variable reference expression creates
// NewUnresolved references in current scope. Enter arrow function
// scope for formal parameter parsing.
- BlockState block_state(&scope_, scope);
+ BlockState inner_block_state(&scope_, scope);
if (Check(Token::LPAREN)) {
// '(' StrictFormalParameters ')'
ParseFormalParameterList(&formals);
Expect(Token::RPAREN);
} else {
// BindingIdentifier
- ParameterParsingScope scope(impl(), &formals);
+ ParameterParsingScope parameter_parsing_scope(impl(), &formals);
ParseFormalParameter(&formals);
DeclareFormalParameters(&formals);
}
@@ -1027,6 +1032,103 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
return result;
}
+FunctionLiteral* Parser::DoParseDeserializedFunction(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info, ParseInfo* info,
+ int start_position, int end_position, int function_literal_id,
+ const AstRawString* raw_name) {
+ if (flags().function_kind() !=
+ FunctionKind::kClassMembersInitializerFunction) {
+ return DoParseFunction(isolate, info, start_position, end_position,
+ function_literal_id, raw_name);
+ }
+
+ // Reparse the outer class while skipping the non-fields to get a list of
+ // ClassLiteralProperty and create a InitializeClassMembersStatement for
+ // the synthetic instance initializer function.
+ FunctionLiteral* result = ParseClassForInstanceMemberInitialization(
+ isolate, original_scope_->AsClassScope(), start_position,
+ function_literal_id);
+ DCHECK_EQ(result->kind(), FunctionKind::kClassMembersInitializerFunction);
+ DCHECK_EQ(result->function_literal_id(), function_literal_id);
+ DCHECK_EQ(result->end_position(), shared_info->EndPosition());
+
+ // The private_name_lookup_skips_outer_class bit should be set by
+ // PostProcessParseResult() during scope analysis later.
+ return result;
+}
+
+FunctionLiteral* Parser::ParseClassForInstanceMemberInitialization(
+ Isolate* isolate, ClassScope* original_scope, int initializer_pos,
+ int initializer_id) {
+ int class_token_pos = initializer_pos;
+
+ // Insert a FunctionState with the closest outer Declaration scope
+ DeclarationScope* nearest_decl_scope = original_scope->GetDeclarationScope();
+ DCHECK_NOT_NULL(nearest_decl_scope);
+ FunctionState function_state(&function_state_, &scope_, nearest_decl_scope);
+ // We will reindex the function literals later.
+ ResetFunctionLiteralId();
+
+ // We preparse the class members that are not fields with initializers
+ // in order to collect the function literal ids.
+ ParsingModeScope mode(this, PARSE_LAZILY);
+
+ ExpressionParsingScope no_expression_scope(impl());
+
+ // We will reparse the entire class because we want to know if
+ // the class is anonymous.
+ // When the function is a kClassMembersInitializerFunction, we record the
+ // source range of the entire class as its positions in its SFI, so at this
+ // point the scanner should be rewound to the position of the class token.
+ DCHECK_EQ(peek(), Token::CLASS);
+ Expect(Token::CLASS);
+
+ const AstRawString* class_name = NullIdentifier();
+ const AstRawString* variable_name = NullIdentifier();
+ // It's a reparse so we don't need to check for default export or
+ // whether the names are reserved.
+ if (peek() == Token::EXTENDS || peek() == Token::LBRACE) {
+ GetDefaultStrings(&class_name, &variable_name);
+ } else {
+ class_name = ParseIdentifier();
+ variable_name = class_name;
+ }
+ bool is_anonymous = class_name == nullptr || class_name->IsEmpty();
+
+ // Create a new ClassScope for the parser to create the inner scopes,
+ // the variable resolution would be done in the original scope, however.
+ // TODO(joyee): see if we can reset the original scope to a state that
+ // can be reused directly and avoid creating this temporary scope.
+ ClassScope* reparsed_scope =
+ NewClassScope(original_scope->outer_scope(), is_anonymous);
+
+#ifdef DEBUG
+ original_scope->SetScopeName(class_name);
+#endif
+
+ Expression* expr =
+ DoParseClassLiteral(reparsed_scope, class_name, scanner()->location(),
+ is_anonymous, class_token_pos);
+ DCHECK(expr->IsClassLiteral());
+ ClassLiteral* literal = expr->AsClassLiteral();
+ FunctionLiteral* initializer =
+ literal->instance_members_initializer_function();
+
+ // Reindex so that the function literal ids match.
+ AstFunctionLiteralIdReindexer reindexer(
+ stack_limit_, initializer_id - initializer->function_literal_id());
+ reindexer.Reindex(expr);
+
+ no_expression_scope.ValidateExpression();
+
+ // Fix up the scope chain and the references used by the instance member
+ // initializer.
+ reparsed_scope->ReplaceReparsedClassScope(isolate, ast_value_factory(),
+ original_scope);
+ original_scope_ = reparsed_scope;
+ return initializer;
+}
+
Statement* Parser::ParseModuleItem() {
// ecma262/#prod-ModuleItem
// ModuleItem :
@@ -2563,44 +2665,38 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
eager_compile_hint == FunctionLiteral::kShouldLazyCompile;
const bool is_top_level = AllowsLazyParsingWithoutUnresolvedVariables();
const bool is_eager_top_level_function = !is_lazy && is_top_level;
- const bool is_lazy_top_level_function = is_lazy && is_top_level;
- const bool is_lazy_inner_function = is_lazy && !is_top_level;
RCS_SCOPE(runtime_call_stats_, RuntimeCallCounterId::kParseFunctionLiteral,
RuntimeCallStats::kThreadSpecific);
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
- // Determine whether we can still lazy parse the inner function.
- // The preconditions are:
- // - Lazy compilation has to be enabled.
- // - Neither V8 natives nor native function declarations can be allowed,
- // since parsing one would retroactively force the function to be
- // eagerly compiled.
- // - The invoker of this parser can't depend on the AST being eagerly
- // built (either because the function is about to be compiled, or
- // because the AST is going to be inspected for some reason).
- // - Because of the above, we can't be attempting to parse a
- // FunctionExpression; even without enclosing parentheses it might be
- // immediately invoked.
- // - The function literal shouldn't be hinted to eagerly compile.
-
- // Inner functions will be parsed using a temporary Zone. After parsing, we
- // will migrate unresolved variable into a Scope in the main Zone.
-
- const bool should_preparse_inner = parse_lazily() && is_lazy_inner_function;
+ // Determine whether we can lazy parse the inner function. Lazy compilation
+ // has to be enabled, which is either forced by overall parse flags or via a
+ // ParsingModeScope.
+ const bool can_preparse = parse_lazily();
- // If parallel compile tasks are enabled, and the function is an eager
- // top level function, then we can pre-parse the function and parse / compile
- // in a parallel task on a worker thread.
- bool should_post_parallel_task =
- parse_lazily() && is_eager_top_level_function &&
- FLAG_parallel_compile_tasks && info()->parallel_tasks() &&
+ // Determine whether we can post any parallel compile tasks. Preparsing must
+ // be possible, there has to be a dispatcher, and the character stream must be
+ // cloneable.
+ const bool can_post_parallel_task =
+ can_preparse && info()->dispatcher() &&
scanner()->stream()->can_be_cloned_for_parallel_access();
- // This may be modified later to reflect preparsing decision taken
- bool should_preparse = (parse_lazily() && is_lazy_top_level_function) ||
- should_preparse_inner || should_post_parallel_task;
+ // If parallel compile tasks are enabled, enable parallel compile for the
+ // subset of functions as defined by flags.
+ bool should_post_parallel_task =
+ can_post_parallel_task &&
+ ((is_eager_top_level_function &&
+ flags().post_parallel_compile_tasks_for_eager_toplevel()) ||
+ (is_lazy && flags().post_parallel_compile_tasks_for_lazy()));
+
+ // Determine whether we should lazy parse the inner function. This will be
+ // when either the function is lazy by inspection, or when we force it to be
+ // preparsed now so that we can then post a parallel full parse & compile task
+ // for it.
+ const bool should_preparse =
+ can_preparse && (is_lazy || should_post_parallel_task);
ScopedPtrList<Statement> body(pointer_buffer());
int expected_property_count = 0;
@@ -2611,8 +2707,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
int function_literal_id = GetNextFunctionLiteralId();
ProducedPreparseData* produced_preparse_data = nullptr;
- // This Scope lives in the main zone. We'll migrate data into that zone later.
+ // Inner functions will be parsed using a temporary Zone. After parsing, we
+ // will migrate unresolved variable into a Scope in the main Zone.
Zone* parse_zone = should_preparse ? &preparser_zone_ : zone();
+ // This Scope lives in the main zone. We'll migrate data into that zone later.
DeclarationScope* scope = NewFunctionScope(kind, parse_zone);
SetLanguageMode(scope, language_mode);
#ifdef DEBUG
@@ -2692,9 +2790,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
RecordFunctionLiteralSourceRange(function_literal);
- if (should_post_parallel_task) {
- // Start a parallel parse / compile task on the compiler dispatcher.
- info()->parallel_tasks()->Enqueue(info(), function_name, function_literal);
+ if (should_post_parallel_task && !has_error()) {
+ function_literal->set_should_parallel_compile();
}
if (should_infer_name) {
@@ -2724,11 +2821,17 @@ bool Parser::SkipFunction(const AstRawString* function_name, FunctionKind kind,
int num_inner_functions;
bool uses_super_property;
if (stack_overflow()) return true;
- *produced_preparse_data =
- consumed_preparse_data_->GetDataForSkippableFunction(
- main_zone(), function_scope->start_position(), &end_position,
- num_parameters, function_length, &num_inner_functions,
- &uses_super_property, &language_mode);
+ {
+ base::Optional<UnparkedScope> unparked_scope;
+ if (overall_parse_is_parked_) {
+ unparked_scope.emplace(local_isolate_);
+ }
+ *produced_preparse_data =
+ consumed_preparse_data_->GetDataForSkippableFunction(
+ main_zone(), function_scope->start_position(), &end_position,
+ num_parameters, function_length, &num_inner_functions,
+ &uses_super_property, &language_mode);
+ }
function_scope->outer_scope()->SetMustUsePreparseData();
function_scope->set_is_skipped_function(true);
@@ -3118,7 +3221,9 @@ FunctionLiteral* Parser::CreateInitializerFunction(
FunctionSyntaxKind::kAccessorOrMethod,
FunctionLiteral::kShouldEagerCompile, scope->start_position(), false,
GetNextFunctionLiteralId());
-
+#ifdef DEBUG
+ scope->SetScopeName(ast_value_factory()->GetOneByteString(name));
+#endif
RecordFunctionLiteralSourceRange(result);
return result;
@@ -3265,55 +3370,76 @@ void Parser::UpdateStatistics(Isolate* isolate, Handle<Script> script) {
total_preparse_skipped_);
}
-void Parser::UpdateStatistics(Handle<Script> script, int* use_counts,
- int* preparse_skipped) {
+void Parser::UpdateStatistics(
+ Handle<Script> script,
+ base::SmallVector<v8::Isolate::UseCounterFeature, 8>* use_counts,
+ int* preparse_skipped) {
// Move statistics to Isolate.
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
if (use_counts_[feature] > 0) {
- use_counts[feature]++;
+ use_counts->emplace_back(v8::Isolate::UseCounterFeature(feature));
}
}
if (scanner_.FoundHtmlComment()) {
- use_counts[v8::Isolate::kHtmlComment]++;
+ use_counts->emplace_back(v8::Isolate::kHtmlComment);
if (script->line_offset() == 0 && script->column_offset() == 0) {
- use_counts[v8::Isolate::kHtmlCommentInExternalScript]++;
+ use_counts->emplace_back(v8::Isolate::kHtmlCommentInExternalScript);
}
}
*preparse_skipped = total_preparse_skipped_;
}
-void Parser::ParseOnBackground(ParseInfo* info, int start_position,
- int end_position, int function_literal_id) {
+void Parser::ParseOnBackground(LocalIsolate* isolate, ParseInfo* info,
+ int start_position, int end_position,
+ int function_literal_id) {
RCS_SCOPE(runtime_call_stats_, RuntimeCallCounterId::kParseBackgroundProgram);
parsing_on_main_thread_ = false;
DCHECK_NULL(info->literal());
FunctionLiteral* result = nullptr;
-
- scanner_.Initialize();
-
- DCHECK(original_scope_);
-
- // When streaming, we don't know the length of the source until we have parsed
- // it. The raw data can be UTF-8, so we wouldn't know the source length until
- // we have decoded it anyway even if we knew the raw data length (which we
- // don't). We work around this by storing all the scopes which need their end
- // position set at the end of the script (the top scope and possible eval
- // scopes) and set their end position after we know the script length.
+ {
+ // We can park the isolate while parsing, it doesn't need to allocate or
+ // access the main thread.
+ ParkedScope parked_scope(isolate);
+ overall_parse_is_parked_ = true;
+
+ scanner_.Initialize();
+
+ DCHECK(original_scope_);
+
+ // When streaming, we don't know the length of the source until we have
+ // parsed it. The raw data can be UTF-8, so we wouldn't know the source
+ // length until we have decoded it anyway even if we knew the raw data
+ // length (which we don't). We work around this by storing all the scopes
+ // which need their end position set at the end of the script (the top scope
+ // and possible eval scopes) and set their end position after we know the
+ // script length.
+ if (flags().is_toplevel()) {
+ DCHECK_EQ(start_position, 0);
+ DCHECK_EQ(end_position, 0);
+ DCHECK_EQ(function_literal_id, kFunctionLiteralIdTopLevel);
+ result = DoParseProgram(/* isolate = */ nullptr, info);
+ } else {
+ base::Optional<ClassScope::HeritageParsingScope> heritage;
+ if (V8_UNLIKELY(flags().private_name_lookup_skips_outer_class() &&
+ original_scope_->is_class_scope())) {
+ // If the function skips the outer class and the outer scope is a class,
+ // the function is in heritage position. Otherwise the function scope's
+ // skip bit will be correctly inherited from the outer scope.
+ heritage.emplace(original_scope_->AsClassScope());
+ }
+ result = DoParseFunction(/* isolate = */ nullptr, info, start_position,
+ end_position, function_literal_id,
+ info->function_name());
+ }
+ MaybeProcessSourceRanges(info, result, stack_limit_);
+ }
+ // We need to unpark by now though, to be able to internalize.
+ PostProcessParseResult(isolate, info, result);
if (flags().is_toplevel()) {
- DCHECK_EQ(start_position, 0);
- DCHECK_EQ(end_position, 0);
- DCHECK_EQ(function_literal_id, kFunctionLiteralIdTopLevel);
- result = DoParseProgram(/* isolate = */ nullptr, info);
- } else {
- result = DoParseFunction(/* isolate = */ nullptr, info, start_position,
- end_position, function_literal_id,
- info->function_name());
+ HandleSourceURLComments(isolate, script_);
}
- MaybeResetCharacterStream(info, result);
- MaybeProcessSourceRanges(info, result, stack_limit_);
- PostProcessParseResult(/* isolate = */ nullptr, info, result);
}
Parser::TemplateLiteralState Parser::OpenTemplateLiteral(int pos) {
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index f294e64f0a..c85f2afad7 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -12,6 +12,7 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/compiler-specific.h"
+#include "src/base/small-vector.h"
#include "src/base/threaded-list.h"
#include "src/common/globals.h"
#include "src/parsing/import-assertions.h"
@@ -130,7 +131,7 @@ struct ParserTypes<Parser> {
class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
public:
- explicit Parser(ParseInfo* info);
+ Parser(LocalIsolate* local_isolate, ParseInfo* info, Handle<Script> script);
~Parser() {
delete reusable_preparser_;
reusable_preparser_ = nullptr;
@@ -139,7 +140,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
static bool IsPreParser() { return false; }
// Sets the literal on |info| if parsing succeeded.
- void ParseOnBackground(ParseInfo* info, int start_position, int end_position,
+ void ParseOnBackground(LocalIsolate* isolate, ParseInfo* info,
+ int start_position, int end_position,
int function_literal_id);
// Initializes an empty scope chain for top-level scripts, or scopes which
@@ -154,15 +156,18 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// This only deserializes the scope chain, but doesn't connect the scopes to
// their corresponding scope infos. Therefore, looking up variables in the
// deserialized scopes is not possible.
- void DeserializeScopeChain(Isolate* isolate, ParseInfo* info,
+ template <typename IsolateT>
+ void DeserializeScopeChain(IsolateT* isolate, ParseInfo* info,
MaybeHandle<ScopeInfo> maybe_outer_scope_info,
Scope::DeserializationMode mode =
Scope::DeserializationMode::kScopesOnly);
// Move statistics to Isolate
void UpdateStatistics(Isolate* isolate, Handle<Script> script);
- void UpdateStatistics(Handle<Script> script, int* use_counters,
- int* preparse_skipped);
+ void UpdateStatistics(
+ Handle<Script> script,
+ base::SmallVector<v8::Isolate::UseCounterFeature, 8>* use_counters,
+ int* preparse_skipped);
template <typename IsolateT>
void HandleSourceURLComments(IsolateT* isolate, Handle<Script> script);
@@ -223,7 +228,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void ParseFunction(Isolate* isolate, ParseInfo* info,
Handle<SharedFunctionInfo> shared_info);
- void PostProcessParseResult(Isolate* isolate, ParseInfo* info,
+ template <typename IsolateT>
+ void PostProcessParseResult(IsolateT* isolate, ParseInfo* info,
FunctionLiteral* literal);
FunctionLiteral* DoParseFunction(Isolate* isolate, ParseInfo* info,
@@ -231,6 +237,15 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int function_literal_id,
const AstRawString* raw_name);
+ FunctionLiteral* DoParseDeserializedFunction(
+ Isolate* isolate, Handle<SharedFunctionInfo> shared_info, ParseInfo* info,
+ int start_position, int end_position, int function_literal_id,
+ const AstRawString* raw_name);
+
+ FunctionLiteral* ParseClassForInstanceMemberInitialization(
+ Isolate* isolate, ClassScope* scope, int initializer_pos,
+ int initializer_id);
+
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(Isolate* isolate, ParseInfo* info);
@@ -1056,11 +1071,14 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
friend class PreParserZoneScope; // Uses reusable_preparser().
friend class PreparseDataBuilder; // Uses preparse_data_buffer()
+ LocalIsolate* local_isolate_;
ParseInfo* info_;
+ Handle<Script> script_;
Scanner scanner_;
Zone preparser_zone_;
PreParser* reusable_preparser_;
Mode mode_;
+ bool overall_parse_is_parked_ = false;
MaybeHandle<FixedArray> maybe_wrapped_arguments_;
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
index 53f6cf045b..add1f203f0 100644
--- a/deps/v8/src/parsing/parsing.cc
+++ b/deps/v8/src/parsing/parsing.cc
@@ -23,9 +23,9 @@ namespace parsing {
namespace {
-void MaybeReportErrorsAndStatistics(ParseInfo* info, Handle<Script> script,
- Isolate* isolate, Parser* parser,
- ReportStatisticsMode mode) {
+void MaybeReportStatistics(ParseInfo* info, Handle<Script> script,
+ Isolate* isolate, Parser* parser,
+ ReportStatisticsMode mode) {
switch (mode) {
case ReportStatisticsMode::kYes:
parser->UpdateStatistics(isolate, script);
@@ -52,12 +52,12 @@ bool ParseProgram(ParseInfo* info, Handle<Script> script,
ScannerStream::For(isolate, source));
info->set_character_stream(std::move(stream));
- Parser parser(info);
+ Parser parser(isolate->main_thread_local_isolate(), info, script);
// Ok to use Isolate here; this function is only called in the main thread.
DCHECK(parser.parsing_on_main_thread_);
parser.ParseProgram(isolate, script, info, maybe_outer_scope_info);
- MaybeReportErrorsAndStatistics(info, script, isolate, &parser, mode);
+ MaybeReportStatistics(info, script, isolate, &parser, mode);
return info->literal() != nullptr;
}
@@ -83,12 +83,12 @@ bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
shared_info->EndPosition()));
info->set_character_stream(std::move(stream));
- Parser parser(info);
+ Parser parser(isolate->main_thread_local_isolate(), info, script);
// Ok to use Isolate here; this function is only called in the main thread.
DCHECK(parser.parsing_on_main_thread_);
parser.ParseFunction(isolate, info, shared_info);
- MaybeReportErrorsAndStatistics(info, script, isolate, &parser, mode);
+ MaybeReportStatistics(info, script, isolate, &parser, mode);
return info->literal() != nullptr;
}
diff --git a/deps/v8/src/parsing/preparse-data-impl.h b/deps/v8/src/parsing/preparse-data-impl.h
index eb528fa645..3e271844a5 100644
--- a/deps/v8/src/parsing/preparse-data-impl.h
+++ b/deps/v8/src/parsing/preparse-data-impl.h
@@ -182,13 +182,13 @@ class BaseConsumedPreparseData : public ConsumedPreparseData {
class OnHeapConsumedPreparseData final
: public BaseConsumedPreparseData<PreparseData> {
public:
- OnHeapConsumedPreparseData(Isolate* isolate, Handle<PreparseData> data);
+ OnHeapConsumedPreparseData(LocalIsolate* isolate, Handle<PreparseData> data);
PreparseData GetScopeData() final;
ProducedPreparseData* GetChildData(Zone* zone, int child_index) final;
private:
- Isolate* isolate_;
+ LocalIsolate* isolate_;
Handle<PreparseData> data_;
};
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index f368a11f9a..3ad36e20c1 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -528,8 +528,9 @@ class OnHeapProducedPreparseData final : public ProducedPreparseData {
}
Handle<PreparseData> Serialize(LocalIsolate* isolate) final {
- // Not required.
- UNREACHABLE();
+ DCHECK(!data_->is_null());
+ DCHECK(isolate->heap()->ContainsLocalHandle(data_.location()));
+ return data_;
}
ZonePreparseData* Serialize(Zone* zone) final {
@@ -553,7 +554,11 @@ class ZoneProducedPreparseData final : public ProducedPreparseData {
return data_->Serialize(isolate);
}
- ZonePreparseData* Serialize(Zone* zone) final { return data_; }
+ ZonePreparseData* Serialize(Zone* zone) final {
+ base::Vector<uint8_t> data(data_->byte_data()->data(),
+ data_->byte_data()->size());
+ return zone->New<ZonePreparseData>(zone, &data, data_->children_length());
+ }
private:
ZonePreparseData* data_;
@@ -765,7 +770,7 @@ ProducedPreparseData* OnHeapConsumedPreparseData::GetChildData(Zone* zone,
}
OnHeapConsumedPreparseData::OnHeapConsumedPreparseData(
- Isolate* isolate, Handle<PreparseData> data)
+ LocalIsolate* isolate, Handle<PreparseData> data)
: BaseConsumedPreparseData<PreparseData>(), isolate_(isolate), data_(data) {
DCHECK_NOT_NULL(isolate);
DCHECK(data->IsPreparseData());
@@ -829,6 +834,11 @@ ProducedPreparseData* ZoneConsumedPreparseData::GetChildData(Zone* zone,
std::unique_ptr<ConsumedPreparseData> ConsumedPreparseData::For(
Isolate* isolate, Handle<PreparseData> data) {
+ return ConsumedPreparseData::For(isolate->main_thread_local_isolate(), data);
+}
+
+std::unique_ptr<ConsumedPreparseData> ConsumedPreparseData::For(
+ LocalIsolate* isolate, Handle<PreparseData> data) {
DCHECK(!data.is_null());
return std::make_unique<OnHeapConsumedPreparseData>(isolate, data);
}
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index 8132ac1dc5..ba081c736d 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -281,6 +281,8 @@ class ConsumedPreparseData {
// PreparseData |data|.
V8_EXPORT_PRIVATE static std::unique_ptr<ConsumedPreparseData> For(
Isolate* isolate, Handle<PreparseData> data);
+ V8_EXPORT_PRIVATE static std::unique_ptr<ConsumedPreparseData> For(
+ LocalIsolate* isolate, Handle<PreparseData> data);
// Creates a ConsumedPreparseData representing the data of an off-heap
// ZonePreparseData |data|.
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 5ecd85f2f2..34a8788c57 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -153,52 +153,47 @@ template <typename Char>
class ChunkedStream {
public:
explicit ChunkedStream(ScriptCompiler::ExternalSourceStream* source)
- : source_(source) {}
+ : source_(source), chunks_(std::make_shared<std::vector<Chunk>>()) {}
- ChunkedStream(const ChunkedStream&) V8_NOEXCEPT {
- // TODO(rmcilroy): Implement cloning for chunked streams.
- UNREACHABLE();
- }
+ ChunkedStream(const ChunkedStream& other) V8_NOEXCEPT
+ : source_(nullptr),
+ chunks_(other.chunks_) {}
// The no_gc argument is only here because of the templated way this class
// is used along with other implementations that require V8 heap access.
Range<Char> GetDataAt(size_t pos, RuntimeCallStats* stats,
DisallowGarbageCollection* no_gc = nullptr) {
- Chunk chunk = FindChunk(pos, stats);
+ Chunk& chunk = FindChunk(pos, stats);
size_t buffer_end = chunk.length;
size_t buffer_pos = std::min(buffer_end, pos - chunk.position);
- return {&chunk.data[buffer_pos], &chunk.data[buffer_end]};
- }
-
- ~ChunkedStream() {
- for (Chunk& chunk : chunks_) delete[] chunk.data;
+ return {&chunk.data.get()[buffer_pos], &chunk.data.get()[buffer_end]};
}
- static const bool kCanBeCloned = false;
+ static const bool kCanBeCloned = true;
static const bool kCanAccessHeap = false;
private:
struct Chunk {
Chunk(const Char* const data, size_t position, size_t length)
: data(data), position(position), length(length) {}
- const Char* const data;
+ std::unique_ptr<const Char[]> data;
// The logical position of data.
const size_t position;
const size_t length;
size_t end_position() const { return position + length; }
};
- Chunk FindChunk(size_t position, RuntimeCallStats* stats) {
- while (V8_UNLIKELY(chunks_.empty())) FetchChunk(size_t{0}, stats);
+ Chunk& FindChunk(size_t position, RuntimeCallStats* stats) {
+ while (V8_UNLIKELY(chunks_->empty())) FetchChunk(size_t{0}, stats);
// Walk forwards while the position is in front of the current chunk.
- while (position >= chunks_.back().end_position() &&
- chunks_.back().length > 0) {
- FetchChunk(chunks_.back().end_position(), stats);
+ while (position >= chunks_->back().end_position() &&
+ chunks_->back().length > 0) {
+ FetchChunk(chunks_->back().end_position(), stats);
}
// Walk backwards.
- for (auto reverse_it = chunks_.rbegin(); reverse_it != chunks_.rend();
+ for (auto reverse_it = chunks_->rbegin(); reverse_it != chunks_->rend();
++reverse_it) {
if (reverse_it->position <= position) return *reverse_it;
}
@@ -210,11 +205,15 @@ class ChunkedStream {
size_t length) {
// Incoming data has to be aligned to Char size.
DCHECK_EQ(0, length % sizeof(Char));
- chunks_.emplace_back(reinterpret_cast<const Char*>(data), position,
- length / sizeof(Char));
+ chunks_->emplace_back(reinterpret_cast<const Char*>(data), position,
+ length / sizeof(Char));
}
void FetchChunk(size_t position, RuntimeCallStats* stats) {
+ // Cloned ChunkedStreams have a null source, and therefore can't fetch any
+ // new data.
+ DCHECK_NOT_NULL(source_);
+
const uint8_t* data = nullptr;
size_t length;
{
@@ -227,7 +226,7 @@ class ChunkedStream {
ScriptCompiler::ExternalSourceStream* source_;
protected:
- std::vector<struct Chunk> chunks_;
+ std::shared_ptr<std::vector<struct Chunk>> chunks_;
};
// Provides a buffered utf-16 view on the bytes from the underlying ByteStream.
@@ -522,18 +521,18 @@ class Utf8ExternalStreamingStream final : public BufferedUtf16CharacterStream {
public:
Utf8ExternalStreamingStream(
ScriptCompiler::ExternalSourceStream* source_stream)
- : current_({0, {0, 0, 0, unibrow::Utf8::State::kAccept}}),
+ : chunks_(std::make_shared<std::vector<Chunk>>()),
+ current_({0, {0, 0, 0, unibrow::Utf8::State::kAccept}}),
source_stream_(source_stream) {}
- ~Utf8ExternalStreamingStream() final {
- for (const Chunk& chunk : chunks_) delete[] chunk.data;
- }
+ ~Utf8ExternalStreamingStream() final = default;
bool can_access_heap() const final { return false; }
- bool can_be_cloned() const final { return false; }
+ bool can_be_cloned() const final { return true; }
std::unique_ptr<Utf16CharacterStream> Clone() const override {
- UNREACHABLE();
+ return std::unique_ptr<Utf16CharacterStream>(
+ new Utf8ExternalStreamingStream(*this));
}
protected:
@@ -563,11 +562,18 @@ class Utf8ExternalStreamingStream final : public BufferedUtf16CharacterStream {
// - The chunk data (data pointer and length), and
// - the position at the first byte of the chunk.
struct Chunk {
- const uint8_t* data;
+ Chunk(const uint8_t* data, size_t length, StreamPosition start)
+ : data(data), length(length), start(start) {}
+ std::unique_ptr<const uint8_t[]> data;
size_t length;
StreamPosition start;
};
+ Utf8ExternalStreamingStream(const Utf8ExternalStreamingStream& source_stream)
+ V8_NOEXCEPT : chunks_(source_stream.chunks_),
+ current_({0, {0, 0, 0, unibrow::Utf8::State::kAccept}}),
+ source_stream_(nullptr) {}
+
// Within the current chunk, skip forward from current_ towards position.
bool SkipToPosition(size_t position);
// Within the current chunk, fill the buffer_ (while it has capacity).
@@ -578,7 +584,9 @@ class Utf8ExternalStreamingStream final : public BufferedUtf16CharacterStream {
// (This call is potentially expensive.)
void SearchPosition(size_t position);
- std::vector<Chunk> chunks_;
+ Chunk& GetChunk(size_t chunk_no) { return (*chunks_)[chunk_no]; }
+
+ std::shared_ptr<std::vector<Chunk>> chunks_;
Position current_;
ScriptCompiler::ExternalSourceStream* source_stream_;
};
@@ -589,14 +597,14 @@ bool Utf8ExternalStreamingStream::SkipToPosition(size_t position) {
// Already there? Then return immediately.
if (current_.pos.chars == position) return true;
- const Chunk& chunk = chunks_[current_.chunk_no];
+ const Chunk& chunk = GetChunk(current_.chunk_no);
DCHECK(current_.pos.bytes >= chunk.start.bytes);
unibrow::Utf8::State state = chunk.start.state;
uint32_t incomplete_char = chunk.start.incomplete_char;
size_t it = current_.pos.bytes - chunk.start.bytes;
- const uint8_t* cursor = &chunk.data[it];
- const uint8_t* end = &chunk.data[chunk.length];
+ const uint8_t* cursor = &chunk.data.get()[it];
+ const uint8_t* end = &chunk.data.get()[chunk.length];
size_t chars = current_.pos.chars;
@@ -622,7 +630,7 @@ bool Utf8ExternalStreamingStream::SkipToPosition(size_t position) {
}
}
- current_.pos.bytes = chunk.start.bytes + (cursor - chunk.data);
+ current_.pos.bytes = chunk.start.bytes + (cursor - chunk.data.get());
current_.pos.chars = chars;
current_.pos.incomplete_char = incomplete_char;
current_.pos.state = state;
@@ -632,11 +640,11 @@ bool Utf8ExternalStreamingStream::SkipToPosition(size_t position) {
}
void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
- DCHECK_LT(current_.chunk_no, chunks_.size());
+ DCHECK_LT(current_.chunk_no, chunks_->size());
DCHECK_EQ(buffer_start_, buffer_cursor_);
DCHECK_LT(buffer_end_ + 1, buffer_start_ + kBufferSize);
- const Chunk& chunk = chunks_[current_.chunk_no];
+ const Chunk& chunk = GetChunk(current_.chunk_no);
// The buffer_ is writable, but buffer_*_ members are const. So we get a
// non-const pointer into buffer that points to the same char as buffer_end_.
@@ -662,8 +670,8 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
}
size_t it = current_.pos.bytes - chunk.start.bytes;
- const uint8_t* cursor = chunk.data + it;
- const uint8_t* end = chunk.data + chunk.length;
+ const uint8_t* cursor = chunk.data.get() + it;
+ const uint8_t* end = chunk.data.get() + chunk.length;
// Deal with possible BOM.
if (V8_UNLIKELY(current_.pos.bytes < 3 && current_.pos.chars == 0)) {
@@ -711,7 +719,7 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
output_cursor += ascii_length;
}
- current_.pos.bytes = chunk.start.bytes + (cursor - chunk.data);
+ current_.pos.bytes = chunk.start.bytes + (cursor - chunk.data.get());
current_.pos.chars += (output_cursor - buffer_end_);
current_.pos.incomplete_char = incomplete_char;
current_.pos.state = state;
@@ -722,12 +730,20 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
bool Utf8ExternalStreamingStream::FetchChunk() {
RCS_SCOPE(runtime_call_stats(), RuntimeCallCounterId::kGetMoreDataCallback);
- DCHECK_EQ(current_.chunk_no, chunks_.size());
- DCHECK(chunks_.empty() || chunks_.back().length != 0);
+ DCHECK_EQ(current_.chunk_no, chunks_->size());
+ DCHECK(chunks_->empty() || chunks_->back().length != 0);
+
+ // Clone Utf8ExternalStreamingStreams have a null source stream, and
+ // therefore can't fetch any new data.
+ DCHECK_NOT_NULL(source_stream_);
+
+ // Utf8ExternalStreamingStreams that have been cloned are not allowed to fetch
+ // any more.
+ DCHECK_EQ(chunks_.use_count(), 1);
const uint8_t* chunk = nullptr;
size_t length = source_stream_->GetMoreData(&chunk);
- chunks_.push_back({chunk, length, current_.pos});
+ chunks_->emplace_back(chunk, length, current_.pos);
return length > 0;
}
@@ -738,8 +754,8 @@ void Utf8ExternalStreamingStream::SearchPosition(size_t position) {
// FillBuffer right after the current buffer.
if (current_.pos.chars == position) return;
- // No chunks. Fetch at least one, so we can assume !chunks_.empty() below.
- if (chunks_.empty()) {
+ // No chunks. Fetch at least one, so we can assume !chunks_->empty() below.
+ if (chunks_->empty()) {
DCHECK_EQ(current_.chunk_no, 0u);
DCHECK_EQ(current_.pos.bytes, 0u);
DCHECK_EQ(current_.pos.chars, 0u);
@@ -748,38 +764,39 @@ void Utf8ExternalStreamingStream::SearchPosition(size_t position) {
// Search for the last chunk whose start position is less or equal to
// position.
- size_t chunk_no = chunks_.size() - 1;
- while (chunk_no > 0 && chunks_[chunk_no].start.chars > position) {
+ size_t chunk_no = chunks_->size() - 1;
+ while (chunk_no > 0 && GetChunk(chunk_no).start.chars > position) {
chunk_no--;
}
// Did we find the terminating (zero-length) chunk? Then we're seeking
// behind the end of the data, and position does not exist.
// Set current_ to point to the terminating chunk.
- if (chunks_[chunk_no].length == 0) {
- current_ = {chunk_no, chunks_[chunk_no].start};
+ if (GetChunk(chunk_no).length == 0) {
+ current_ = {chunk_no, GetChunk(chunk_no).start};
return;
}
// Did we find the non-last chunk? Then our position must be within chunk_no.
- if (chunk_no + 1 < chunks_.size()) {
+ if (chunk_no + 1 < chunks_->size()) {
// Fancy-pants optimization for ASCII chunks within a utf-8 stream.
// (Many web sites declare utf-8 encoding, but use only (or almost only) the
// ASCII subset for their JavaScript sources. We can exploit this, by
// checking whether the # bytes in a chunk are equal to the # chars, and if
// so avoid the expensive SkipToPosition.)
bool ascii_only_chunk =
- chunks_[chunk_no].start.incomplete_char == 0 &&
- (chunks_[chunk_no + 1].start.bytes - chunks_[chunk_no].start.bytes) ==
- (chunks_[chunk_no + 1].start.chars - chunks_[chunk_no].start.chars);
+ GetChunk(chunk_no).start.incomplete_char == 0 &&
+ (GetChunk(chunk_no + 1).start.bytes - GetChunk(chunk_no).start.bytes) ==
+ (GetChunk(chunk_no + 1).start.chars -
+ GetChunk(chunk_no).start.chars);
if (ascii_only_chunk) {
- size_t skip = position - chunks_[chunk_no].start.chars;
+ size_t skip = position - GetChunk(chunk_no).start.chars;
current_ = {chunk_no,
- {chunks_[chunk_no].start.bytes + skip,
- chunks_[chunk_no].start.chars + skip, 0,
+ {GetChunk(chunk_no).start.bytes + skip,
+ GetChunk(chunk_no).start.chars + skip, 0,
unibrow::Utf8::State::kAccept}};
} else {
- current_ = {chunk_no, chunks_[chunk_no].start};
+ current_ = {chunk_no, GetChunk(chunk_no).start};
SkipToPosition(position);
}
@@ -792,12 +809,12 @@ void Utf8ExternalStreamingStream::SearchPosition(size_t position) {
// What's left: We're in the last, non-terminating chunk. Our position
// may be in the chunk, but it may also be in 'future' chunks, which we'll
// have to obtain.
- DCHECK_EQ(chunk_no, chunks_.size() - 1);
- current_ = {chunk_no, chunks_[chunk_no].start};
+ DCHECK_EQ(chunk_no, chunks_->size() - 1);
+ current_ = {chunk_no, GetChunk(chunk_no).start};
bool have_more_data = true;
bool found = SkipToPosition(position);
while (have_more_data && !found) {
- DCHECK_EQ(current_.chunk_no, chunks_.size());
+ DCHECK_EQ(current_.chunk_no, chunks_->size());
have_more_data = FetchChunk();
found = have_more_data && SkipToPosition(position);
}
@@ -805,9 +822,9 @@ void Utf8ExternalStreamingStream::SearchPosition(size_t position) {
// We'll return with a postion != the desired position only if we're out
// of data. In that case, we'll point to the terminating chunk.
DCHECK_EQ(found, current_.pos.chars == position);
- DCHECK_EQ(have_more_data, chunks_.back().length != 0);
+ DCHECK_EQ(have_more_data, chunks_->back().length != 0);
DCHECK_IMPLIES(!found, !have_more_data);
- DCHECK_IMPLIES(!found, current_.chunk_no == chunks_.size() - 1);
+ DCHECK_IMPLIES(!found, current_.chunk_no == chunks_->size() - 1);
}
size_t Utf8ExternalStreamingStream::FillBuffer(size_t position) {
@@ -815,8 +832,8 @@ size_t Utf8ExternalStreamingStream::FillBuffer(size_t position) {
buffer_end_ = buffer_;
SearchPosition(position);
- bool out_of_data = current_.chunk_no != chunks_.size() &&
- chunks_[current_.chunk_no].length == 0 &&
+ bool out_of_data = current_.chunk_no != chunks_->size() &&
+ GetChunk(current_.chunk_no).length == 0 &&
current_.pos.incomplete_char == 0;
if (out_of_data) return 0;
@@ -826,7 +843,7 @@ size_t Utf8ExternalStreamingStream::FillBuffer(size_t position) {
// can't guarantee progress with one chunk. Thus we iterate.)
while (!out_of_data && buffer_cursor_ == buffer_end_) {
// At end of current data, but there might be more? Then fetch it.
- if (current_.chunk_no == chunks_.size()) {
+ if (current_.chunk_no == chunks_->size()) {
out_of_data = !FetchChunk();
}
FillBufferFromCurrentChunk();
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index cbfd399020..d3a3095c93 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -211,7 +211,7 @@ Token::Value Scanner::SkipSingleLineComment() {
// separately by the lexical grammar and becomes part of the
// stream of input elements for the syntactic grammar (see
// ECMA-262, section 7.4).
- AdvanceUntil([](base::uc32 c0_) { return unibrow::IsLineTerminator(c0_); });
+ AdvanceUntil([](base::uc32 c0) { return unibrow::IsLineTerminator(c0); });
return Token::WHITESPACE;
}
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 829f2ab67f..99dbd9f9c1 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -190,7 +190,7 @@ void ProfilerEventsProcessor::StopSynchronously() {
bool ProfilerEventsProcessor::ProcessCodeEvent() {
CodeEventsContainer record;
if (events_buffer_.Dequeue(&record)) {
- if (record.generic.type == CodeEventRecord::NATIVE_CONTEXT_MOVE) {
+ if (record.generic.type == CodeEventRecord::Type::kNativeContextMove) {
NativeContextMoveEventRecord& nc_record =
record.NativeContextMoveEventRecord_;
profiles_->UpdateNativeContextAddressForCurrentProfiles(
@@ -207,14 +207,14 @@ bool ProfilerEventsProcessor::ProcessCodeEvent() {
void ProfilerEventsProcessor::CodeEventHandler(
const CodeEventsContainer& evt_rec) {
switch (evt_rec.generic.type) {
- case CodeEventRecord::CODE_CREATION:
- case CodeEventRecord::CODE_MOVE:
- case CodeEventRecord::CODE_DISABLE_OPT:
- case CodeEventRecord::CODE_DELETE:
- case CodeEventRecord::NATIVE_CONTEXT_MOVE:
+ case CodeEventRecord::Type::kCodeCreation:
+ case CodeEventRecord::Type::kCodeMove:
+ case CodeEventRecord::Type::kCodeDisableOpt:
+ case CodeEventRecord::Type::kCodeDelete:
+ case CodeEventRecord::Type::kNativeContextMove:
Enqueue(evt_rec);
break;
- case CodeEventRecord::CODE_DEOPT: {
+ case CodeEventRecord::Type::kCodeDeopt: {
const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
Address pc = rec->pc;
int fp_to_sp_delta = rec->fp_to_sp_delta;
@@ -222,20 +222,23 @@ void ProfilerEventsProcessor::CodeEventHandler(
AddDeoptStack(pc, fp_to_sp_delta);
break;
}
- case CodeEventRecord::NONE:
- case CodeEventRecord::REPORT_BUILTIN:
+ case CodeEventRecord::Type::kNoEvent:
+ case CodeEventRecord::Type::kReportBuiltin:
UNREACHABLE();
}
}
void SamplingEventsProcessor::SymbolizeAndAddToProfiles(
const TickSampleEventRecord* record) {
+ const TickSample& tick_sample = record->sample;
Symbolizer::SymbolizedSample symbolized =
- symbolizer_->SymbolizeTickSample(record->sample);
+ symbolizer_->SymbolizeTickSample(tick_sample);
profiles_->AddPathToCurrentProfiles(
- record->sample.timestamp, symbolized.stack_trace, symbolized.src_line,
- record->sample.update_stats, record->sample.sampling_interval,
- reinterpret_cast<Address>(record->sample.context));
+ tick_sample.timestamp, symbolized.stack_trace, symbolized.src_line,
+ tick_sample.update_stats_, tick_sample.sampling_interval_,
+ tick_sample.state, tick_sample.embedder_state,
+ reinterpret_cast<Address>(tick_sample.context),
+ reinterpret_cast<Address>(tick_sample.embedder_context));
}
ProfilerEventsProcessor::SampleProcessingResult
@@ -378,7 +381,7 @@ void ProfilerCodeObserver::CodeEventHandlerInternal(
CodeEventsContainer record = evt_rec;
switch (evt_rec.generic.type) {
#define PROFILER_TYPE_CASE(type, clss) \
- case CodeEventRecord::type: \
+ case CodeEventRecord::Type::type: \
record.clss##_.UpdateCodeMap(&code_map_); \
break;
@@ -408,7 +411,7 @@ void ProfilerCodeObserver::LogBuiltins() {
DCHECK(builtins->is_initialized());
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kReportBuiltin);
ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
Code code = builtins->code(builtin);
rec->instruction_start = code.InstructionStart();
@@ -471,6 +474,16 @@ class CpuProfilersManager {
}
}
+ size_t GetAllProfilersMemorySize(Isolate* isolate) {
+ base::MutexGuard lock(&mutex_);
+ size_t estimated_memory = 0;
+ auto range = profilers_.equal_range(isolate);
+ for (auto it = range.first; it != range.second; ++it) {
+ estimated_memory += it->second->GetEstimatedMemoryUsage();
+ }
+ return estimated_memory;
+ }
+
private:
std::unordered_multimap<Isolate*, CpuProfiler*> profilers_;
base::Mutex mutex_;
@@ -578,6 +591,15 @@ void CpuProfiler::CollectSample() {
}
}
+// static
+size_t CpuProfiler::GetAllProfilersMemorySize(Isolate* isolate) {
+ return GetProfilersManager()->GetAllProfilersMemorySize(isolate);
+}
+
+size_t CpuProfiler::GetEstimatedMemoryUsage() const {
+ return code_observer_->GetEstimatedMemoryUsage();
+}
+
CpuProfilingStatus CpuProfiler::StartProfiling(
const char* title, CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate) {
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index ea14d6c618..791c8cc1a1 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -29,22 +29,22 @@ class CpuProfilesCollection;
class Isolate;
class Symbolizer;
-#define CODE_EVENTS_TYPE_LIST(V) \
- V(CODE_CREATION, CodeCreateEventRecord) \
- V(CODE_MOVE, CodeMoveEventRecord) \
- V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \
- V(CODE_DEOPT, CodeDeoptEventRecord) \
- V(REPORT_BUILTIN, ReportBuiltinEventRecord) \
- V(CODE_DELETE, CodeDeleteEventRecord)
+#define CODE_EVENTS_TYPE_LIST(V) \
+ V(kCodeCreation, CodeCreateEventRecord) \
+ V(kCodeMove, CodeMoveEventRecord) \
+ V(kCodeDisableOpt, CodeDisableOptEventRecord) \
+ V(kCodeDeopt, CodeDeoptEventRecord) \
+ V(kReportBuiltin, ReportBuiltinEventRecord) \
+ V(kCodeDelete, CodeDeleteEventRecord)
#define VM_EVENTS_TYPE_LIST(V) \
CODE_EVENTS_TYPE_LIST(V) \
- V(NATIVE_CONTEXT_MOVE, NativeContextMoveEventRecord)
+ V(kNativeContextMove, NativeContextMoveEventRecord)
class CodeEventRecord {
public:
#define DECLARE_TYPE(type, ignore) type,
- enum Type { NONE = 0, VM_EVENTS_TYPE_LIST(DECLARE_TYPE) };
+ enum class Type { kNoEvent = 0, VM_EVENTS_TYPE_LIST(DECLARE_TYPE) };
#undef DECLARE_TYPE
Type type;
@@ -135,7 +135,7 @@ class CodeDeleteEventRecord : public CodeEventRecord {
class CodeEventsContainer {
public:
explicit CodeEventsContainer(
- CodeEventRecord::Type type = CodeEventRecord::NONE) {
+ CodeEventRecord::Type type = CodeEventRecord::Type::kNoEvent) {
generic.type = type;
}
union {
@@ -333,6 +333,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
CpuProfiler& operator=(const CpuProfiler&) = delete;
static void CollectSample(Isolate* isolate);
+ static size_t GetAllProfilersMemorySize(Isolate* isolate);
using ProfilingMode = v8::CpuProfilingMode;
using NamingMode = v8::CpuProfilingNamingMode;
@@ -343,6 +344,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
void set_sampling_interval(base::TimeDelta value);
void set_use_precise_sampling(bool);
void CollectSample();
+ size_t GetEstimatedMemoryUsage() const;
StartProfilingStatus StartProfiling(
const char* title, CpuProfilingOptions options = {},
std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 75f0daa13b..d7e384494d 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -27,8 +27,8 @@
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
-#include "src/objects/objects-body-descriptors.h"
#include "src/objects/objects-inl.h"
#include "src/objects/prototype.h"
#include "src/objects/slots-inl.h"
@@ -127,7 +127,7 @@ void HeapEntry::Print(const char* prefix, const char* edge_name, int max_depth,
HeapGraphEdge& edge = **i;
const char* edge_prefix = "";
base::EmbeddedVector<char, 64> index;
- const char* edge_name = index.begin();
+ edge_name = index.begin();
switch (edge.type()) {
case HeapGraphEdge::kContextVariable:
edge_prefix = "#";
@@ -423,14 +423,16 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
}
heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kHeapProfiler);
+ PtrComprCageBase cage_base(heap_->isolate());
CombinedHeapObjectIterator iterator(heap_);
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
- FindOrAddEntry(obj.address(), obj.Size());
+ int object_size = obj.Size(cage_base);
+ FindOrAddEntry(obj.address(), object_size);
if (FLAG_heap_profiler_trace_objects) {
PrintF("Update object : %p %6d. Next address is %p\n",
- reinterpret_cast<void*>(obj.address()), obj.Size(),
- reinterpret_cast<void*>(obj.address() + obj.Size()));
+ reinterpret_cast<void*>(obj.address()), object_size,
+ reinterpret_cast<void*>(obj.address() + object_size));
}
}
RemoveDeadEntries();
@@ -659,8 +661,8 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object, HeapEntry::Type type,
if (FLAG_heap_profiler_show_hidden_objects && type == HeapEntry::kHidden) {
type = HeapEntry::kNative;
}
-
- return AddEntry(object.address(), type, name, object.Size());
+ PtrComprCageBase cage_base(isolate());
+ return AddEntry(object.address(), type, name, object.Size(cage_base));
}
HeapEntry* V8HeapExplorer::AddEntry(Address address,
@@ -731,7 +733,8 @@ class IndexedReferencesExtractor : public ObjectVisitorWithCageBases {
generator_(generator),
parent_obj_(parent_obj),
parent_start_(parent_obj_.RawMaybeWeakField(0)),
- parent_end_(parent_obj_.RawMaybeWeakField(parent_obj_.Size())),
+ parent_end_(
+ parent_obj_.RawMaybeWeakField(parent_obj_.Size(cage_base()))),
parent_(parent),
next_index_(0) {}
void VisitPointers(HeapObject host, ObjectSlot start,
@@ -824,6 +827,8 @@ void V8HeapExplorer::ExtractReferences(HeapEntry* entry, HeapObject obj) {
ExtractJSPromiseReferences(entry, JSPromise::cast(obj));
} else if (obj.IsJSGeneratorObject()) {
ExtractJSGeneratorObjectReferences(entry, JSGeneratorObject::cast(obj));
+ } else if (obj.IsJSWeakRef()) {
+ ExtractJSWeakRefReferences(entry, JSWeakRef::cast(obj));
}
ExtractJSObjectReferences(entry, JSObject::cast(obj));
} else if (obj.IsString()) {
@@ -869,6 +874,8 @@ void V8HeapExplorer::ExtractReferences(HeapEntry* entry, HeapObject obj) {
ExtractEphemeronHashTableReferences(entry, EphemeronHashTable::cast(obj));
} else if (obj.IsFixedArray()) {
ExtractFixedArrayReferences(entry, FixedArray::cast(obj));
+ } else if (obj.IsWeakCell()) {
+ ExtractWeakCellReferences(entry, WeakCell::cast(obj));
} else if (obj.IsHeapNumber()) {
if (snapshot_->capture_numeric_value()) {
ExtractNumberReference(entry, obj);
@@ -1216,7 +1223,22 @@ void V8HeapExplorer::ExtractAccessorPairReferences(HeapEntry* entry,
AccessorPair::kSetterOffset);
}
-void V8HeapExplorer::TagBuiltinCodeObject(Code code, const char* name) {
+void V8HeapExplorer::ExtractJSWeakRefReferences(HeapEntry* entry,
+ JSWeakRef js_weak_ref) {
+ SetWeakReference(entry, "target", js_weak_ref.target(),
+ JSWeakRef::kTargetOffset);
+}
+
+void V8HeapExplorer::ExtractWeakCellReferences(HeapEntry* entry,
+ WeakCell weak_cell) {
+ SetWeakReference(entry, "target", weak_cell.target(),
+ WeakCell::kTargetOffset);
+ SetWeakReference(entry, "unregister_token", weak_cell.unregister_token(),
+ WeakCell::kUnregisterTokenOffset);
+}
+
+void V8HeapExplorer::TagBuiltinCodeObject(Object code, const char* name) {
+ DCHECK(code.IsCode() || (V8_EXTERNAL_CODE_SPACE_BOOL && code.IsCodeT()));
TagObject(code, names_->GetFormatted("(%s builtin)", name));
}
@@ -1577,7 +1599,7 @@ class RootsReferencesExtractor : public RootVisitor {
void VisitRootPointer(Root root, const char* description,
FullObjectSlot object) override {
if (root == Root::kBuiltins) {
- explorer_->TagBuiltinCodeObject(Code::cast(*object), description);
+ explorer_->TagBuiltinCodeObject(*object, description);
}
explorer_->SetGcSubrootReference(root, description, visiting_weak_roots_,
*object);
@@ -1595,13 +1617,39 @@ class RootsReferencesExtractor : public RootVisitor {
OffHeapObjectSlot start,
OffHeapObjectSlot end) override {
DCHECK_EQ(root, Root::kStringTable);
- PtrComprCageBase cage_base = Isolate::FromHeap(explorer_->heap_);
+ PtrComprCageBase cage_base(explorer_->heap_->isolate());
for (OffHeapObjectSlot p = start; p < end; ++p) {
explorer_->SetGcSubrootReference(root, description, visiting_weak_roots_,
p.load(cage_base));
}
}
+ void VisitRunningCode(FullObjectSlot p) override {
+ // Must match behavior in
+ // MarkCompactCollector::RootMarkingVisitor::VisitRunningCode, which treats
+ // deoptimization literals in running code as stack roots.
+ Code code = Code::cast(*p);
+ if (code.kind() != CodeKind::BASELINE) {
+ DeoptimizationData deopt_data =
+ DeoptimizationData::cast(code.deoptimization_data());
+ if (deopt_data.length() > 0) {
+ DeoptimizationLiteralArray literals = deopt_data.LiteralArray();
+ int literals_length = literals.length();
+ for (int i = 0; i < literals_length; ++i) {
+ MaybeObject maybe_literal = literals.Get(i);
+ HeapObject heap_literal;
+ if (maybe_literal.GetHeapObject(&heap_literal)) {
+ VisitRootPointer(Root::kStackRoots, nullptr,
+ FullObjectSlot(&heap_literal));
+ }
+ }
+ }
+ }
+
+ // Finally visit the Code itself.
+ VisitRootPointer(Root::kStackRoots, nullptr, p);
+ }
+
private:
V8HeapExplorer* explorer_;
bool visiting_weak_roots_;
@@ -1634,12 +1682,13 @@ bool V8HeapExplorer::IterateAndExtractReferences(
CombinedHeapObjectIterator iterator(heap_,
HeapObjectIterator::kFilterUnreachable);
+ PtrComprCageBase cage_base(heap_->isolate());
// Heap iteration with filtering must be finished in any case.
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next(), progress_->ProgressStep()) {
if (interrupted) continue;
- size_t max_pointer = obj.Size() / kTaggedSize;
+ size_t max_pointer = obj.Size(cage_base) / kTaggedSize;
if (max_pointer > visited_fields_.size()) {
// Clear the current bits.
std::vector<bool>().swap(visited_fields_);
@@ -1649,11 +1698,12 @@ bool V8HeapExplorer::IterateAndExtractReferences(
HeapEntry* entry = GetEntry(obj);
ExtractReferences(entry, obj);
- SetInternalReference(entry, "map", obj.map(), HeapObject::kMapOffset);
+ SetInternalReference(entry, "map", obj.map(cage_base),
+ HeapObject::kMapOffset);
// Extract unvisited fields as hidden references and restore tags
// of visited fields.
IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj.Iterate(&refs_extractor);
+ obj.Iterate(cage_base, &refs_extractor);
// Ensure visited_fields_ doesn't leak to the next object.
for (size_t i = 0; i < max_pointer; ++i) {
@@ -1697,6 +1747,9 @@ bool V8HeapExplorer::IsEssentialHiddenReference(Object parent,
if (parent.IsContext() &&
field_offset == Context::OffsetOfElementAt(Context::NEXT_CONTEXT_LINK))
return false;
+ if (parent.IsJSFinalizationRegistry() &&
+ field_offset == JSFinalizationRegistry::kNextDirtyOffset)
+ return false;
return true;
}
@@ -1806,7 +1859,7 @@ void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry, int index,
void V8HeapExplorer::SetDataOrAccessorPropertyReference(
PropertyKind kind, HeapEntry* parent_entry, Name reference_name,
Object child_obj, const char* name_format_string, int field_offset) {
- if (kind == kAccessor) {
+ if (kind == PropertyKind::kAccessor) {
ExtractAccessorPairProperty(parent_entry, reference_name, child_obj,
field_offset);
} else {
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 70882153cf..a4f7f5ac48 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -358,7 +358,7 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
bool IterateAndExtractReferences(HeapSnapshotGenerator* generator);
void CollectGlobalObjectsTags();
void MakeGlobalObjectTagMap(const SafepointScope& safepoint_scope);
- void TagBuiltinCodeObject(Code code, const char* name);
+ void TagBuiltinCodeObject(Object code, const char* name);
HeapEntry* AddEntry(Address address,
HeapEntry::Type type,
const char* name,
@@ -398,6 +398,8 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
void ExtractAccessorPairReferences(HeapEntry* entry, AccessorPair accessors);
void ExtractCodeReferences(HeapEntry* entry, Code code);
void ExtractCellReferences(HeapEntry* entry, Cell cell);
+ void ExtractJSWeakRefReferences(HeapEntry* entry, JSWeakRef js_weak_ref);
+ void ExtractWeakCellReferences(HeapEntry* entry, WeakCell weak_cell);
void ExtractFeedbackCellReferences(HeapEntry* entry,
FeedbackCell feedback_cell);
void ExtractPropertyCellReferences(HeapEntry* entry, PropertyCell cell);
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 34a15159a3..1a4665b874 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -195,7 +195,6 @@ const std::vector<CodeEntryAndLineNumber>* CodeEntry::GetInlineStack(
void CodeEntry::set_deopt_info(
const char* deopt_reason, int deopt_id,
std::vector<CpuProfileDeoptFrame> inlined_frames) {
- DCHECK(!has_deopt_info());
RareData* rare_data = EnsureRareData();
rare_data->deopt_reason_ = deopt_reason;
rare_data->deopt_id_ = deopt_id;
@@ -208,7 +207,7 @@ void CodeEntry::FillFunctionInfo(SharedFunctionInfo shared) {
set_script_id(script.id());
set_position(shared.StartPosition());
if (shared.optimization_disabled()) {
- set_bailout_reason(GetBailoutReason(shared.disable_optimization_reason()));
+ set_bailout_reason(GetBailoutReason(shared.disabled_optimization_reason()));
}
}
@@ -621,7 +620,9 @@ bool CpuProfile::CheckSubsample(base::TimeDelta source_sampling_interval) {
void CpuProfile::AddPath(base::TimeTicks timestamp,
const ProfileStackTrace& path, int src_line,
- bool update_stats, base::TimeDelta sampling_interval) {
+ bool update_stats, base::TimeDelta sampling_interval,
+ StateTag state_tag,
+ EmbedderStateTag embedder_state_tag) {
if (!CheckSubsample(sampling_interval)) return;
ProfileNode* top_frame_node =
@@ -633,7 +634,8 @@ void CpuProfile::AddPath(base::TimeTicks timestamp,
samples_.size() < options_.max_samples());
if (should_record_sample) {
- samples_.push_back({top_frame_node, timestamp, src_line});
+ samples_.push_back(
+ {top_frame_node, timestamp, src_line, state_tag, embedder_state_tag});
}
if (!should_record_sample && delegate_ != nullptr) {
@@ -989,19 +991,24 @@ base::TimeDelta CpuProfilesCollection::GetCommonSamplingInterval() const {
void CpuProfilesCollection::AddPathToCurrentProfiles(
base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line,
- bool update_stats, base::TimeDelta sampling_interval,
- Address native_context_address) {
+ bool update_stats, base::TimeDelta sampling_interval, StateTag state,
+ EmbedderStateTag embedder_state_tag, Address native_context_address,
+ Address embedder_native_context_address) {
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
current_profiles_semaphore_.Wait();
const ProfileStackTrace empty_path;
for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
+ ContextFilter& context_filter = profile->context_filter();
// If the context filter check failed, omit the contents of the stack.
- bool accepts_context =
- profile->context_filter().Accept(native_context_address);
+ bool accepts_context = context_filter.Accept(native_context_address);
+ bool accepts_embedder_context =
+ context_filter.Accept(embedder_native_context_address);
profile->AddPath(timestamp, accepts_context ? path : empty_path, src_line,
- update_stats, sampling_interval);
+ update_stats, sampling_interval, state,
+ accepts_embedder_context ? embedder_state_tag
+ : EmbedderStateTag::EMPTY);
}
current_profiles_semaphore_.Signal();
}
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index bb0adbfe3b..85402564ff 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -17,6 +17,7 @@
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
#include "src/builtins/builtins.h"
+#include "src/execution/vm-state.h"
#include "src/logging/code-events.h"
#include "src/profiler/strings-storage.h"
#include "src/utils/allocation.h"
@@ -405,6 +406,8 @@ class CpuProfile {
ProfileNode* node;
base::TimeTicks timestamp;
int line;
+ StateTag state_tag;
+ EmbedderStateTag embedder_state_tag;
};
V8_EXPORT_PRIVATE CpuProfile(
@@ -419,7 +422,8 @@ class CpuProfile {
// Add pc -> ... -> main() call path to the profile.
void AddPath(base::TimeTicks timestamp, const ProfileStackTrace& path,
int src_line, bool update_stats,
- base::TimeDelta sampling_interval);
+ base::TimeDelta sampling_interval, StateTag state,
+ EmbedderStateTag embedder_state);
void FinishProfile();
const char* title() const { return title_; }
@@ -554,11 +558,12 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
base::TimeDelta GetCommonSamplingInterval() const;
// Called from profile generator thread.
- void AddPathToCurrentProfiles(base::TimeTicks timestamp,
- const ProfileStackTrace& path, int src_line,
- bool update_stats,
- base::TimeDelta sampling_interval,
- Address native_context_address = kNullAddress);
+ void AddPathToCurrentProfiles(
+ base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line,
+ bool update_stats, base::TimeDelta sampling_interval, StateTag state,
+ EmbedderStateTag embedder_state_tag,
+ Address native_context_address = kNullAddress,
+ Address native_embedder_context_address = kNullAddress);
// Called from profile generator thread.
void UpdateNativeContextAddressForCurrentProfiles(Address from, Address to);
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index 57e3b3f3b1..af8581a8ac 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -43,7 +43,7 @@ ProfilerListener::~ProfilerListener() = default;
void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
Handle<AbstractCode> code,
const char* name) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = code->InstructionStart();
rec->entry =
@@ -58,7 +58,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
Handle<AbstractCode> code,
Handle<Name> name) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = code->InstructionStart();
rec->entry =
@@ -74,7 +74,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = code->InstructionStart();
rec->entry =
@@ -111,7 +111,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name, int line,
int column) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = abstract_code->InstructionStart();
std::unique_ptr<SourcePositionTable> line_table;
@@ -177,7 +177,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
if (pos_info.position.ScriptOffset() == kNoSourcePosition) continue;
if (pos_info.script.is_null()) continue;
- int line_number =
+ line_number =
pos_info.script->GetLineNumber(pos_info.position.ScriptOffset()) +
1;
@@ -235,7 +235,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
wasm::WasmName name,
const char* source_url, int code_offset,
int script_id) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = code->instruction_start();
rec->entry = code_entries_.Create(tag, GetName(name), GetName(source_url), 1,
@@ -249,7 +249,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
#endif // V8_ENABLE_WEBASSEMBLY
void ProfilerListener::CallbackEvent(Handle<Name> name, Address entry_point) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = entry_point;
rec->entry =
@@ -260,7 +260,7 @@ void ProfilerListener::CallbackEvent(Handle<Name> name, Address entry_point) {
void ProfilerListener::GetterCallbackEvent(Handle<Name> name,
Address entry_point) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = entry_point;
rec->entry = code_entries_.Create(CodeEventListener::CALLBACK_TAG,
@@ -271,7 +271,7 @@ void ProfilerListener::GetterCallbackEvent(Handle<Name> name,
void ProfilerListener::SetterCallbackEvent(Handle<Name> name,
Address entry_point) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = entry_point;
rec->entry = code_entries_.Create(CodeEventListener::CALLBACK_TAG,
@@ -282,7 +282,7 @@ void ProfilerListener::SetterCallbackEvent(Handle<Name> name,
void ProfilerListener::RegExpCodeCreateEvent(Handle<AbstractCode> code,
Handle<String> source) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = code->InstructionStart();
rec->entry = code_entries_.Create(
@@ -296,7 +296,7 @@ void ProfilerListener::RegExpCodeCreateEvent(Handle<AbstractCode> code,
void ProfilerListener::CodeMoveEvent(AbstractCode from, AbstractCode to) {
DisallowGarbageCollection no_gc;
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeMove);
CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
rec->from_instruction_start = from.InstructionStart();
rec->to_instruction_start = to.InstructionStart();
@@ -304,7 +304,7 @@ void ProfilerListener::CodeMoveEvent(AbstractCode from, AbstractCode to) {
}
void ProfilerListener::NativeContextMoveEvent(Address from, Address to) {
- CodeEventsContainer evt_rec(CodeEventRecord::NATIVE_CONTEXT_MOVE);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kNativeContextMove);
evt_rec.NativeContextMoveEventRecord_.from_address = from;
evt_rec.NativeContextMoveEventRecord_.to_address = to;
DispatchCodeEvent(evt_rec);
@@ -312,10 +312,11 @@ void ProfilerListener::NativeContextMoveEvent(Address from, Address to) {
void ProfilerListener::CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeDisableOpt);
CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
rec->instruction_start = code->InstructionStart();
- rec->bailout_reason = GetBailoutReason(shared->disable_optimization_reason());
+ rec->bailout_reason =
+ GetBailoutReason(shared->disabled_optimization_reason());
DispatchCodeEvent(evt_rec);
}
@@ -324,7 +325,7 @@ void ProfilerListener::CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind,
bool reuse_code) {
// When reuse_code is true it is just a bailout and not an actual deopt.
if (reuse_code) return;
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeDeopt);
CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(*code, pc);
rec->instruction_start = code->InstructionStart();
@@ -342,7 +343,7 @@ void ProfilerListener::CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind,
void ProfilerListener::WeakCodeClearEvent() { weak_code_registry_.Sweep(this); }
void ProfilerListener::OnHeapObjectDeletion(CodeEntry* entry) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_DELETE);
+ CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeDelete);
evt_rec.CodeDeleteEventRecord_.entry = entry;
DispatchCodeEvent(evt_rec);
}
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 6a6b2c93ee..45c72ec202 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -75,7 +75,7 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
DisallowGarbageCollection no_gc;
// Check if the area is iterable by confirming that it starts with a map.
- DCHECK(HeapObject::FromAddress(soon_object).map().IsMap());
+ DCHECK(HeapObject::FromAddress(soon_object).map(isolate_).IsMap(isolate_));
HandleScope scope(isolate_);
HeapObject heap_object = HeapObject::FromAddress(soon_object);
@@ -132,11 +132,11 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
AllocationNode* node = &profile_root_;
std::vector<SharedFunctionInfo> stack;
- JavaScriptFrameIterator it(isolate_);
+ JavaScriptFrameIterator frame_it(isolate_);
int frames_captured = 0;
bool found_arguments_marker_frames = false;
- while (!it.done() && frames_captured < stack_depth_) {
- JavaScriptFrame* frame = it.frame();
+ while (!frame_it.done() && frames_captured < stack_depth_) {
+ JavaScriptFrame* frame = frame_it.frame();
// If we are materializing objects during deoptimization, inlined
// closures may not yet be materialized, and this includes the
// closure on the stack. Skip over any such frames (they'll be
@@ -149,7 +149,7 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
} else {
found_arguments_marker_frames = true;
}
- it.Advance();
+ frame_it.Advance();
}
if (frames_captured == 0) {
diff --git a/deps/v8/src/profiler/symbolizer.cc b/deps/v8/src/profiler/symbolizer.cc
index d3aa629d25..6c9d92b2c1 100644
--- a/deps/v8/src/profiler/symbolizer.cc
+++ b/deps/v8/src/profiler/symbolizer.cc
@@ -125,8 +125,8 @@ Symbolizer::SymbolizedSample Symbolizer::SymbolizeTickSample(
entry->GetInlineStack(pc_offset);
if (inline_stack) {
int most_inlined_frame_line_number = entry->GetSourceLine(pc_offset);
- for (auto entry : *inline_stack) {
- stack_trace.push_back(entry);
+ for (auto inline_stack_entry : *inline_stack) {
+ stack_trace.push_back(inline_stack_entry);
}
// This is a bit of a messy hack. The line number for the most-inlined
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index daef48eb26..20732bfb76 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -9,6 +9,7 @@
#include "include/v8-profiler.h"
#include "src/base/sanitizer/asan.h"
#include "src/base/sanitizer/msan.h"
+#include "src/execution/embedder-state.h"
#include "src/execution/frames-inl.h"
#include "src/execution/simulator.h"
#include "src/execution/vm-state-inl.h"
@@ -159,7 +160,7 @@ DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
bool update_stats,
bool use_simulator_reg_state,
base::TimeDelta sampling_interval) {
- this->update_stats = update_stats;
+ update_stats_ = update_stats;
SampleInfo info;
RegisterState regs = reg_state;
if (!GetStackSample(v8_isolate, &regs, record_c_entry_frame, stack,
@@ -178,6 +179,8 @@ DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
frames_count = static_cast<unsigned>(info.frames_count);
has_external_callback = info.external_callback_entry != nullptr;
context = info.context;
+ embedder_context = info.embedder_context;
+ embedder_state = info.embedder_state;
if (has_external_callback) {
external_callback_entry = info.external_callback_entry;
} else if (frames_count) {
@@ -196,7 +199,7 @@ DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
} else {
tos = nullptr;
}
- this->sampling_interval = sampling_interval;
+ sampling_interval_ = sampling_interval;
timestamp = base::TimeTicks::HighResolutionNow();
}
@@ -210,9 +213,19 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
sample_info->frames_count = 0;
sample_info->vm_state = isolate->current_vm_state();
sample_info->external_callback_entry = nullptr;
+ sample_info->embedder_state = EmbedderStateTag::EMPTY;
+ sample_info->embedder_context = nullptr;
sample_info->context = nullptr;
+
if (sample_info->vm_state == GC) return true;
+ EmbedderState* embedder_state = isolate->current_embedder_state();
+ if (embedder_state != nullptr) {
+ sample_info->embedder_context =
+ reinterpret_cast<void*>(embedder_state->native_context_address());
+ sample_info->embedder_state = embedder_state->GetState();
+ }
+
i::Address js_entry_sp = isolate->js_entry_sp();
if (js_entry_sp == 0) return true; // Not executing JS now.
@@ -352,9 +365,9 @@ void TickSample::print() const {
PrintF(" - has_external_callback: %d\n", has_external_callback);
PrintF(" - %s: %p\n",
has_external_callback ? "external_callback_entry" : "tos", tos);
- PrintF(" - update_stats: %d\n", update_stats);
+ PrintF(" - update_stats: %d\n", update_stats_);
PrintF(" - sampling_interval: %" PRId64 "\n",
- sampling_interval.InMicroseconds());
+ sampling_interval_.InMicroseconds());
PrintF("\n");
}
diff --git a/deps/v8/src/profiler/tick-sample.h b/deps/v8/src/profiler/tick-sample.h
index 4402bdc272..236e4a2f86 100644
--- a/deps/v8/src/profiler/tick-sample.h
+++ b/deps/v8/src/profiler/tick-sample.h
@@ -23,11 +23,12 @@ struct V8_EXPORT TickSample {
TickSample()
: state(OTHER),
+ embedder_state(EmbedderStateTag::EMPTY),
pc(nullptr),
external_callback_entry(nullptr),
frames_count(0),
has_external_callback(false),
- update_stats(true) {}
+ update_stats_(true) {}
/**
* Initialize a tick sample from the isolate.
@@ -82,6 +83,7 @@ struct V8_EXPORT TickSample {
void print() const;
StateTag state; // The state of the VM.
+ EmbedderStateTag embedder_state;
void* pc; // Instruction pointer.
union {
void* tos; // Top stack value (*sp).
@@ -91,12 +93,13 @@ struct V8_EXPORT TickSample {
static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
void* stack[kMaxFramesCount]; // Call stack.
void* context = nullptr; // Address of the incumbent native context.
+ void* embedder_context = nullptr; // Address of the embedder native context.
unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
bool has_external_callback : 1;
- bool update_stats : 1; // Whether the sample should update aggregated stats.
+ bool update_stats_ : 1; // Whether the sample should update aggregated stats.
base::TimeTicks timestamp;
- base::TimeDelta sampling_interval; // Sampling interval used to capture.
+ base::TimeDelta sampling_interval_; // Sampling interval used to capture.
};
} // namespace internal
diff --git a/deps/v8/src/regexp/experimental/experimental-bytecode.cc b/deps/v8/src/regexp/experimental/experimental-bytecode.cc
index 299a41577a..3fbb6d7dc5 100644
--- a/deps/v8/src/regexp/experimental/experimental-bytecode.cc
+++ b/deps/v8/src/regexp/experimental/experimental-bytecode.cc
@@ -36,22 +36,22 @@ std::ostream& operator<<(std::ostream& os, const RegExpInstruction& inst) {
case RegExpInstruction::ASSERTION:
os << "ASSERTION ";
switch (inst.payload.assertion_type) {
- case RegExpAssertion::START_OF_INPUT:
+ case RegExpAssertion::Type::START_OF_INPUT:
os << "START_OF_INPUT";
break;
- case RegExpAssertion::END_OF_INPUT:
+ case RegExpAssertion::Type::END_OF_INPUT:
os << "END_OF_INPUT";
break;
- case RegExpAssertion::START_OF_LINE:
+ case RegExpAssertion::Type::START_OF_LINE:
os << "START_OF_LINE";
break;
- case RegExpAssertion::END_OF_LINE:
+ case RegExpAssertion::Type::END_OF_LINE:
os << "END_OF_LINE";
break;
- case RegExpAssertion::BOUNDARY:
+ case RegExpAssertion::Type::BOUNDARY:
os << "BOUNDARY";
break;
- case RegExpAssertion::NON_BOUNDARY:
+ case RegExpAssertion::Type::NON_BOUNDARY:
os << "NON_BOUNDARY";
break;
}
diff --git a/deps/v8/src/regexp/experimental/experimental-bytecode.h b/deps/v8/src/regexp/experimental/experimental-bytecode.h
index 7042f95af1..e8b33ad76d 100644
--- a/deps/v8/src/regexp/experimental/experimental-bytecode.h
+++ b/deps/v8/src/regexp/experimental/experimental-bytecode.h
@@ -158,7 +158,7 @@ struct RegExpInstruction {
return result;
}
- static RegExpInstruction Assertion(RegExpAssertion::AssertionType t) {
+ static RegExpInstruction Assertion(RegExpAssertion::Type t) {
RegExpInstruction result;
result.opcode = ASSERTION;
result.payload.assertion_type = t;
@@ -174,7 +174,7 @@ struct RegExpInstruction {
// Payload of SET_REGISTER_TO_CP and CLEAR_REGISTER:
int32_t register_index;
// Payload of ASSERTION:
- RegExpAssertion::AssertionType assertion_type;
+ RegExpAssertion::Type assertion_type;
} payload;
STATIC_ASSERT(sizeof(payload) == 4);
};
diff --git a/deps/v8/src/regexp/experimental/experimental-compiler.cc b/deps/v8/src/regexp/experimental/experimental-compiler.cc
index ae4abce7b5..ce7862d0fd 100644
--- a/deps/v8/src/regexp/experimental/experimental-compiler.cc
+++ b/deps/v8/src/regexp/experimental/experimental-compiler.cc
@@ -221,7 +221,7 @@ class BytecodeAssembler {
void Accept() { code_.Add(RegExpInstruction::Accept(), zone_); }
- void Assertion(RegExpAssertion::AssertionType t) {
+ void Assertion(RegExpAssertion::Type t) {
code_.Add(RegExpInstruction::Assertion(t), zone_);
}
diff --git a/deps/v8/src/regexp/experimental/experimental-interpreter.cc b/deps/v8/src/regexp/experimental/experimental-interpreter.cc
index 06283db011..078fa25f20 100644
--- a/deps/v8/src/regexp/experimental/experimental-interpreter.cc
+++ b/deps/v8/src/regexp/experimental/experimental-interpreter.cc
@@ -22,23 +22,23 @@ namespace {
constexpr int kUndefinedRegisterValue = -1;
template <class Character>
-bool SatisfiesAssertion(RegExpAssertion::AssertionType type,
+bool SatisfiesAssertion(RegExpAssertion::Type type,
base::Vector<const Character> context, int position) {
DCHECK_LE(position, context.length());
DCHECK_GE(position, 0);
switch (type) {
- case RegExpAssertion::START_OF_INPUT:
+ case RegExpAssertion::Type::START_OF_INPUT:
return position == 0;
- case RegExpAssertion::END_OF_INPUT:
+ case RegExpAssertion::Type::END_OF_INPUT:
return position == context.length();
- case RegExpAssertion::START_OF_LINE:
+ case RegExpAssertion::Type::START_OF_LINE:
if (position == 0) return true;
return unibrow::IsLineTerminator(context[position - 1]);
- case RegExpAssertion::END_OF_LINE:
+ case RegExpAssertion::Type::END_OF_LINE:
if (position == context.length()) return true;
return unibrow::IsLineTerminator(context[position]);
- case RegExpAssertion::BOUNDARY:
+ case RegExpAssertion::Type::BOUNDARY:
if (context.length() == 0) {
return false;
} else if (position == 0) {
@@ -49,8 +49,9 @@ bool SatisfiesAssertion(RegExpAssertion::AssertionType type,
return IsRegExpWord(context[position - 1]) !=
IsRegExpWord(context[position]);
}
- case RegExpAssertion::NON_BOUNDARY:
- return !SatisfiesAssertion(RegExpAssertion::BOUNDARY, context, position);
+ case RegExpAssertion::Type::NON_BOUNDARY:
+ return !SatisfiesAssertion(RegExpAssertion::Type::BOUNDARY, context,
+ position);
}
}
diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
index b2ca8c15c6..3db9a90c29 100644
--- a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
@@ -19,18 +19,16 @@ namespace internal {
/* clang-format off
*
* This assembler uses the following register assignment convention
- * - t3 : Temporarily stores the index of capture start after a matching pass
- * for a global regexp.
- * - a5 : Pointer to current Code object including heap object tag.
- * - a6 : Current position in input, as negative offset from end of string.
+ * - s0 : Unused.
+ * - s1 : Pointer to current Code object including heap object tag.
+ * - s2 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
- * - a7 : Currently loaded character. Must be loaded using
+ * - s5 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
- * - t0 : Points to tip of backtrack stack
- * - t1 : Unused.
- * - t2 : End of input (points to byte after last character in input).
+ * - s6 : Points to tip of backtrack stack
+ * - s7 : End of input (points to byte after last character in input).
* - fp : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
+ * RegExp registers.
* - sp : Points to tip of C stack.
*
* The remaining registers are free for computations.
@@ -216,21 +214,6 @@ void RegExpMacroAssemblerLOONG64::CheckGreedyLoop(Label* on_equal) {
BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
}
-// Push (pop) caller-saved registers used by irregexp.
-void RegExpMacroAssemblerLOONG64::PushCallerSavedRegisters() {
- RegList caller_saved_regexp =
- current_input_offset().bit() | current_character().bit() |
- end_of_input_address().bit() | backtrack_stackpointer().bit();
- __ MultiPush(caller_saved_regexp);
-}
-
-void RegExpMacroAssemblerLOONG64::PopCallerSavedRegisters() {
- RegList caller_saved_regexp =
- current_input_offset().bit() | current_character().bit() |
- end_of_input_address().bit() | backtrack_stackpointer().bit();
- __ MultiPop(caller_saved_regexp);
-}
-
void RegExpMacroAssemblerLOONG64::CheckNotBackReferenceIgnoreCase(
int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
@@ -312,7 +295,6 @@ void RegExpMacroAssemblerLOONG64::CheckNotBackReferenceIgnoreCase(
}
} else {
DCHECK(mode_ == UC16);
- PushCallerSavedRegisters();
int argument_count = 4;
__ PrepareCallCFunction(argument_count, a2);
@@ -350,10 +332,6 @@ void RegExpMacroAssemblerLOONG64::CheckNotBackReferenceIgnoreCase(
__ CallCFunction(function, argument_count);
}
- PopCallerSavedRegisters();
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
-
// Check if function returned non-zero for success or zero for failure.
BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
// On success, increment position by length of capture.
@@ -474,7 +452,6 @@ void RegExpMacroAssemblerLOONG64::CheckCharacterNotInRange(
void RegExpMacroAssemblerLOONG64::CallIsCharacterInRangeArray(
const ZoneList<CharacterRange>* ranges) {
static const int kNumArguments = 3;
- PushCallerSavedRegisters();
__ PrepareCallCFunction(kNumArguments, a0);
__ mov(a0, current_character());
@@ -488,7 +465,6 @@ void RegExpMacroAssemblerLOONG64::CallIsCharacterInRangeArray(
kNumArguments);
}
- PopCallerSavedRegisters();
__ li(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -643,26 +619,26 @@ void RegExpMacroAssemblerLOONG64::StoreRegExpStackPointerToMemory(
__ St_d(src, MemOperand(scratch, 0));
}
-void RegExpMacroAssemblerLOONG64::PushRegExpBasePointer(Register scratch1,
- Register scratch2) {
- LoadRegExpStackPointerFromMemory(scratch1);
+void RegExpMacroAssemblerLOONG64::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
- __ li(scratch2, ref);
- __ Ld_d(scratch2, MemOperand(scratch2, 0));
- __ Sub_d(scratch2, scratch1, scratch2);
- __ St_d(scratch2, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch, ref);
+ __ Ld_d(scratch, MemOperand(scratch, 0));
+ __ Sub_d(scratch, stack_pointer, scratch);
+ __ St_d(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
}
-void RegExpMacroAssemblerLOONG64::PopRegExpBasePointer(Register scratch1,
- Register scratch2) {
+void RegExpMacroAssemblerLOONG64::PopRegExpBasePointer(
+ Register stack_pointer_out, Register scratch) {
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
- __ Ld_d(scratch1, MemOperand(frame_pointer(), kRegExpStackBasePointer));
- __ li(scratch2, ref);
- __ Ld_d(scratch2, MemOperand(scratch2, 0));
- __ Add_d(scratch1, scratch1, scratch2);
- StoreRegExpStackPointerToMemory(scratch1, scratch2);
+ __ Ld_d(stack_pointer_out,
+ MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch, ref);
+ __ Ld_d(scratch, MemOperand(scratch, 0));
+ __ Add_d(stack_pointer_out, stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
}
Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
@@ -714,35 +690,43 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
kBacktrackCount - kSystemPointerSize);
__ Push(a0); // The regexp stack base ptr.
+ // Initialize backtrack stack pointer. It must not be clobbered from here
+ // on. Note the backtrack_stackpointer is callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == s7);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// Store the regexp base pointer - we'll later restore it / write it to
// memory when returning from this irregexp code object.
- PushRegExpBasePointer(a0, a1);
-
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(masm_->isolate());
- __ li(a0, Operand(stack_limit));
- __ Ld_d(a0, MemOperand(a0, 0));
- __ Sub_d(a0, sp, a0);
- // Handle it if the stack pointer is already below the stack limit.
- __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ li(a0, Operand(EXCEPTION));
- __ jmp(&return_v0);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(a0);
- // If returned value is non-zero, we exit with the returned value as result.
- __ Branch(&return_v0, ne, a0, Operand(zero_reg));
-
- __ bind(&stack_ok);
+ PushRegExpBasePointer(backtrack_stackpointer(), a1);
+
+ {
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ Ld_d(a0, MemOperand(a0, 0));
+ __ Sub_d(a0, sp, a0);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ li(a0, Operand(EXCEPTION));
+ __ jmp(&return_v0);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(a0);
+ // If returned value is non-zero, we exit with the returned value as
+ // result.
+ __ Branch(&return_v0, ne, a0, Operand(zero_reg));
+
+ __ bind(&stack_ok);
+ }
+
// Allocate space on stack for registers.
__ Sub_d(sp, sp, Operand(num_registers_ * kPointerSize));
// Load string end.
@@ -764,17 +748,20 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
- __ li(current_character(), Operand('\n'));
- __ jmp(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ Label load_char_start_regexp;
+ {
+ Label start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
+ __ li(current_character(), Operand('\n'));
+ __ jmp(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+ }
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@@ -796,9 +783,6 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
}
}
- // Initialize backtrack stack pointer.
- LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
-
__ jmp(&start_label_);
// Exit code:
@@ -870,6 +854,10 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
// Prepare a0 to initialize registers with its value in the next run.
__ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), a2);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// t3: capture start index
@@ -901,7 +889,7 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
__ bind(&return_v0);
// Restore the original regexp stack pointer value (effectively, pop the
// stored base pointer).
- PopRegExpBasePointer(a1, a2);
+ PopRegExpBasePointer(backtrack_stackpointer(), a2);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
@@ -923,9 +911,7 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
// Put regexp engine registers on stack.
StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a1);
- PushCallerSavedRegisters();
CallCheckStackGuardState(a0);
- PopCallerSavedRegisters();
// If returning non-zero, we should end execution with the given
// result as return value.
__ Branch(&return_v0, ne, a0, Operand(zero_reg));
@@ -934,7 +920,6 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
// String might have moved: Reload end of string from frame.
__ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
SafeReturn();
}
@@ -944,10 +929,6 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
SafeCallTarget(&stack_overflow_label_);
StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a1);
// Reached if the backtrack-stack limit has been hit.
- // Put regexp engine registers on stack first.
- RegList regexp_registers =
- current_input_offset().bit() | current_character().bit();
- __ MultiPush(regexp_registers);
// Call GrowStack(isolate).
static const int kNumArguments = 1;
@@ -955,16 +936,11 @@ Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
__ li(a0, Operand(ExternalReference::isolate_address(masm_->isolate())));
ExternalReference grow_stack = ExternalReference::re_grow_stack();
__ CallCFunction(grow_stack, kNumArguments);
- // Restore regexp registers.
- __ MultiPop(regexp_registers);
// If nullptr is returned, we have failed to grow the stack, and must exit
// with a stack-overflow exception.
__ Branch(&exit_with_exception, eq, a0, Operand(zero_reg));
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), a0);
- // Restore saved registers and continue.
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
SafeReturn();
}
diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
index b18cb0c871..a141936613 100644
--- a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
@@ -151,13 +151,13 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerLOONG64
// Register holding the current input position as negative offset from
// the end of the string.
- static constexpr Register current_input_offset() { return a6; }
+ static constexpr Register current_input_offset() { return s2; }
// The register containing the current character after LoadCurrentCharacter.
- static constexpr Register current_character() { return a7; }
+ static constexpr Register current_character() { return s5; }
// Register holding address of the end of the input string.
- static constexpr Register end_of_input_address() { return t2; }
+ static constexpr Register end_of_input_address() { return s6; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
@@ -165,10 +165,10 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerLOONG64
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- static constexpr Register backtrack_stackpointer() { return t0; }
+ static constexpr Register backtrack_stackpointer() { return s7; }
// Register holding pointer to the current code object.
- static constexpr Register code_pointer() { return a5; }
+ static constexpr Register code_pointer() { return s1; }
// Byte size of chars in the string to match (decided by the Mode argument).
inline int char_size() { return static_cast<int>(mode_); }
@@ -195,8 +195,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerLOONG64
void LoadRegExpStackPointerFromMemory(Register dst);
void StoreRegExpStackPointerToMemory(Register src, Register scratch);
- void PushRegExpBasePointer(Register scratch1, Register scratch2);
- void PopRegExpBasePointer(Register scratch1, Register scratch2);
+ void PushRegExpBasePointer(Register stack_pointer, Register scratch);
+ void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
Isolate* isolate() const { return masm_->isolate(); }
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index fb809b1442..74a42ef815 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -18,18 +18,16 @@ namespace internal {
/*
* This assembler uses the following register assignment convention
- * - t7 : Temporarily stores the index of capture start after a matching pass
- * for a global regexp.
- * - t1 : Pointer to current Code object including heap object tag.
- * - t2 : Current position in input, as negative offset from end of string.
+ * - s0 : Unused.
+ * - s1 : Pointer to current Code object including heap object tag.
+ * - s2 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
- * - t3 : Currently loaded character. Must be loaded using
+ * - s5 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
- * - t4 : Points to tip of backtrack stack
- * - t5 : Unused.
- * - t6 : End of input (points to byte after last character in input).
+ * - s6 : Points to tip of backtrack stack
+ * - s7 : End of input (points to byte after last character in input).
* - fp : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
+ * RegExp registers.
* - sp : Points to tip of C stack.
*
* The remaining registers are free for computations.
@@ -223,21 +221,6 @@ void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
}
-// Push (pop) caller-saved registers used by irregexp.
-void RegExpMacroAssemblerMIPS::PushCallerSavedRegisters() {
- RegList caller_saved_regexp =
- current_input_offset().bit() | current_character().bit() |
- end_of_input_address().bit() | backtrack_stackpointer().bit();
- __ MultiPush(caller_saved_regexp);
-}
-
-void RegExpMacroAssemblerMIPS::PopCallerSavedRegisters() {
- RegList caller_saved_regexp =
- current_input_offset().bit() | current_character().bit() |
- end_of_input_address().bit() | backtrack_stackpointer().bit();
- __ MultiPop(caller_saved_regexp);
-}
-
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
@@ -318,7 +301,6 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
}
} else {
DCHECK_EQ(UC16, mode_);
- PushCallerSavedRegisters();
int argument_count = 4;
__ PrepareCallCFunction(argument_count, a2);
@@ -356,11 +338,6 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ CallCFunction(function, argument_count);
}
- // Restore regexp engine registers.
- PopCallerSavedRegisters();
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
-
// Check if function returned non-zero for success or zero for failure.
BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
// On success, advance position by length of capture.
@@ -491,7 +468,6 @@ void RegExpMacroAssemblerMIPS::CheckCharacterNotInRange(
void RegExpMacroAssemblerMIPS::CallIsCharacterInRangeArray(
const ZoneList<CharacterRange>* ranges) {
static const int kNumArguments = 3;
- PushCallerSavedRegisters();
__ PrepareCallCFunction(kNumArguments, a0);
__ mov(a0, current_character());
@@ -505,7 +481,6 @@ void RegExpMacroAssemblerMIPS::CallIsCharacterInRangeArray(
kNumArguments);
}
- PopCallerSavedRegisters();
__ li(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -660,26 +635,26 @@ void RegExpMacroAssemblerMIPS::StoreRegExpStackPointerToMemory(
__ Sw(src, MemOperand(scratch));
}
-void RegExpMacroAssemblerMIPS::PushRegExpBasePointer(Register scratch1,
- Register scratch2) {
- LoadRegExpStackPointerFromMemory(scratch1);
+void RegExpMacroAssemblerMIPS::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
- __ li(scratch2, Operand(ref));
- __ Lw(scratch2, MemOperand(scratch2));
- __ Subu(scratch2, scratch1, scratch2);
- __ Sw(scratch2, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch, Operand(ref));
+ __ Lw(scratch, MemOperand(scratch));
+ __ Subu(scratch, stack_pointer, scratch);
+ __ Sw(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
}
-void RegExpMacroAssemblerMIPS::PopRegExpBasePointer(Register scratch1,
- Register scratch2) {
+void RegExpMacroAssemblerMIPS::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
- __ Lw(scratch1, MemOperand(frame_pointer(), kRegExpStackBasePointer));
- __ li(scratch2, Operand(ref));
- __ Lw(scratch2, MemOperand(scratch2));
- __ Addu(scratch1, scratch1, scratch2);
- StoreRegExpStackPointerToMemory(scratch1, scratch2);
+ __ Lw(stack_pointer_out,
+ MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch, Operand(ref));
+ __ Lw(scratch, MemOperand(scratch));
+ __ Addu(stack_pointer_out, stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
}
Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
@@ -726,35 +701,43 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
kBacktrackCount - kSystemPointerSize);
__ push(a0); // The regexp stack base ptr.
+ // Initialize backtrack stack pointer. It must not be clobbered from here
+ // on. Note the backtrack_stackpointer is callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == s7);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// Store the regexp base pointer - we'll later restore it / write it to
// memory when returning from this irregexp code object.
- PushRegExpBasePointer(a0, a1);
-
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(masm_->isolate());
- __ li(a0, Operand(stack_limit));
- __ lw(a0, MemOperand(a0));
- __ Subu(a0, sp, a0);
- // Handle it if the stack pointer is already below the stack limit.
- __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ li(v0, Operand(EXCEPTION));
- __ jmp(&return_v0);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(a0);
- // If returned value is non-zero, we exit with the returned value as result.
- __ Branch(&return_v0, ne, v0, Operand(zero_reg));
-
- __ bind(&stack_ok);
+ PushRegExpBasePointer(backtrack_stackpointer(), a1);
+
+ {
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ lw(a0, MemOperand(a0));
+ __ Subu(a0, sp, a0);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ li(v0, Operand(EXCEPTION));
+ __ jmp(&return_v0);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(a0);
+ // If returned value is non-zero, we exit with the returned value as
+ // result.
+ __ Branch(&return_v0, ne, v0, Operand(zero_reg));
+
+ __ bind(&stack_ok);
+ }
+
// Allocate space on stack for registers.
__ Subu(sp, sp, Operand(num_registers_ * kPointerSize));
// Load string end.
@@ -776,17 +759,20 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
- __ li(current_character(), Operand('\n'));
- __ jmp(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ Label load_char_start_regexp;
+ {
+ Label start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
+ __ li(current_character(), Operand('\n'));
+ __ jmp(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+ }
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@@ -808,9 +794,6 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
}
}
- // Initialize backtrack stack pointer.
- LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
-
__ jmp(&start_label_);
@@ -882,6 +865,10 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Prepare a0 to initialize registers with its value in the next run.
__ lw(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), a2);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// t7: capture start index
@@ -913,7 +900,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ bind(&return_v0);
// Restore the original regexp stack pointer value (effectively, pop the
// stored base pointer).
- PopRegExpBasePointer(a0, a1);
+ PopRegExpBasePointer(backtrack_stackpointer(), a1);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
@@ -933,9 +920,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0);
- PushCallerSavedRegisters();
CallCheckStackGuardState(a0);
- PopCallerSavedRegisters();
// If returning non-zero, we should end execution with the given
// result as return value.
__ Branch(&return_v0, ne, v0, Operand(zero_reg));
@@ -944,7 +929,6 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// String might have moved: Reload end of string from frame.
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
SafeReturn();
}
@@ -953,10 +937,6 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
SafeCallTarget(&stack_overflow_label_);
StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0);
// Reached if the backtrack-stack limit has been hit.
- // Put regexp engine registers on stack first.
- RegList regexp_registers = current_input_offset().bit() |
- current_character().bit();
- __ MultiPush(regexp_registers);
// Call GrowStack(isolate).
static constexpr int kNumArguments = 1;
@@ -964,16 +944,11 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ li(a0, Operand(ExternalReference::isolate_address(masm_->isolate())));
ExternalReference grow_stack = ExternalReference::re_grow_stack();
__ CallCFunction(grow_stack, kNumArguments);
- // Restore regexp registers.
- __ MultiPop(regexp_registers);
// If nullptr is returned, we have failed to grow the stack, and must exit
// with a stack-overflow exception.
__ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), v0);
- // Restore saved registers and continue.
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
SafeReturn();
}
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index c6f787fb64..79bbe4bc4d 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -147,13 +147,13 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// Register holding the current input position as negative offset from
// the end of the string.
- static constexpr Register current_input_offset() { return t2; }
+ static constexpr Register current_input_offset() { return s2; }
// The register containing the current character after LoadCurrentCharacter.
- static constexpr Register current_character() { return t3; }
+ static constexpr Register current_character() { return s5; }
// Register holding address of the end of the input string.
- static constexpr Register end_of_input_address() { return t6; }
+ static constexpr Register end_of_input_address() { return s6; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
@@ -161,10 +161,10 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- static constexpr Register backtrack_stackpointer() { return t4; }
+ static constexpr Register backtrack_stackpointer() { return s7; }
// Register holding pointer to the current code object.
- static constexpr Register code_pointer() { return t1; }
+ static constexpr Register code_pointer() { return s1; }
// Byte size of chars in the string to match (decided by the Mode argument).
inline int char_size() const { return static_cast<int>(mode_); }
@@ -195,8 +195,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
void LoadRegExpStackPointerFromMemory(Register dst);
void StoreRegExpStackPointerToMemory(Register src, Register scratch);
- void PushRegExpBasePointer(Register scratch1, Register scratch2);
- void PopRegExpBasePointer(Register scratch1, Register scratch2);
+ void PushRegExpBasePointer(Register stack_pointer, Register scratch);
+ void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
Isolate* isolate() const { return masm_->isolate(); }
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index bc6fed487c..bee0e57501 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -20,18 +20,16 @@ namespace internal {
/* clang-format off
*
* This assembler uses the following register assignment convention
- * - t3 : Temporarily stores the index of capture start after a matching pass
- * for a global regexp.
- * - a5 : Pointer to current Code object including heap object tag.
- * - a6 : Current position in input, as negative offset from end of string.
+ * - s0 : Unused.
+ * - s1 : Pointer to current Code object including heap object tag.
+ * - s2 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
- * - a7 : Currently loaded character. Must be loaded using
+ * - s5 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
- * - t0 : Points to tip of backtrack stack
- * - t1 : Unused.
- * - t2 : End of input (points to byte after last character in input).
+ * - s6 : Points to tip of backtrack stack
+ * - s7 : End of input (points to byte after last character in input).
* - fp : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
+ * RegExp registers.
* - sp : Points to tip of C stack.
*
* The remaining registers are free for computations.
@@ -261,21 +259,6 @@ void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
}
-// Push (pop) caller-saved registers used by irregexp.
-void RegExpMacroAssemblerMIPS::PushCallerSavedRegisters() {
- RegList caller_saved_regexp =
- current_input_offset().bit() | current_character().bit() |
- end_of_input_address().bit() | backtrack_stackpointer().bit();
- __ MultiPush(caller_saved_regexp);
-}
-
-void RegExpMacroAssemblerMIPS::PopCallerSavedRegisters() {
- RegList caller_saved_regexp =
- current_input_offset().bit() | current_character().bit() |
- end_of_input_address().bit() | backtrack_stackpointer().bit();
- __ MultiPop(caller_saved_regexp);
-}
-
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
@@ -356,7 +339,6 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
}
} else {
DCHECK(mode_ == UC16);
- PushCallerSavedRegisters();
int argument_count = 4;
__ PrepareCallCFunction(argument_count, a2);
@@ -394,10 +376,6 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ CallCFunction(function, argument_count);
}
- PopCallerSavedRegisters();
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- __ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
-
// Check if function returned non-zero for success or zero for failure.
BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
// On success, increment position by length of capture.
@@ -522,7 +500,6 @@ void RegExpMacroAssemblerMIPS::CheckCharacterNotInRange(
void RegExpMacroAssemblerMIPS::CallIsCharacterInRangeArray(
const ZoneList<CharacterRange>* ranges) {
static const int kNumArguments = 3;
- PushCallerSavedRegisters();
__ PrepareCallCFunction(kNumArguments, a0);
__ mov(a0, current_character());
@@ -536,7 +513,6 @@ void RegExpMacroAssemblerMIPS::CallIsCharacterInRangeArray(
kNumArguments);
}
- PopCallerSavedRegisters();
__ li(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -691,26 +667,26 @@ void RegExpMacroAssemblerMIPS::StoreRegExpStackPointerToMemory(
__ Sd(src, MemOperand(scratch));
}
-void RegExpMacroAssemblerMIPS::PushRegExpBasePointer(Register scratch1,
- Register scratch2) {
- LoadRegExpStackPointerFromMemory(scratch1);
+void RegExpMacroAssemblerMIPS::PushRegExpBasePointer(Register stack_pointer,
+ Register scratch) {
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
- __ li(scratch2, Operand(ref));
- __ Ld(scratch2, MemOperand(scratch2));
- __ Dsubu(scratch2, scratch1, scratch2);
- __ Sd(scratch2, MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch, Operand(ref));
+ __ Ld(scratch, MemOperand(scratch));
+ __ Dsubu(scratch, stack_pointer, scratch);
+ __ Sd(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer));
}
-void RegExpMacroAssemblerMIPS::PopRegExpBasePointer(Register scratch1,
- Register scratch2) {
+void RegExpMacroAssemblerMIPS::PopRegExpBasePointer(Register stack_pointer_out,
+ Register scratch) {
ExternalReference ref =
ExternalReference::address_of_regexp_stack_memory_top_address(isolate());
- __ Ld(scratch1, MemOperand(frame_pointer(), kRegExpStackBasePointer));
- __ li(scratch2, Operand(ref));
- __ Ld(scratch2, MemOperand(scratch2));
- __ Daddu(scratch1, scratch1, scratch2);
- StoreRegExpStackPointerToMemory(scratch1, scratch2);
+ __ Ld(stack_pointer_out,
+ MemOperand(frame_pointer(), kRegExpStackBasePointer));
+ __ li(scratch, Operand(ref));
+ __ Ld(scratch, MemOperand(scratch));
+ __ Daddu(stack_pointer_out, stack_pointer_out, scratch);
+ StoreRegExpStackPointerToMemory(stack_pointer_out, scratch);
}
Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
@@ -762,35 +738,43 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
kBacktrackCount - kSystemPointerSize);
__ push(a0); // The regexp stack base ptr.
+ // Initialize backtrack stack pointer. It must not be clobbered from here
+ // on. Note the backtrack_stackpointer is callee-saved.
+ STATIC_ASSERT(backtrack_stackpointer() == s7);
+ LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
+
// Store the regexp base pointer - we'll later restore it / write it to
// memory when returning from this irregexp code object.
- PushRegExpBasePointer(a0, a1);
-
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_jslimit(masm_->isolate());
- __ li(a0, Operand(stack_limit));
- __ Ld(a0, MemOperand(a0));
- __ Dsubu(a0, sp, a0);
- // Handle it if the stack pointer is already below the stack limit.
- __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ li(v0, Operand(EXCEPTION));
- __ jmp(&return_v0);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(a0);
- // If returned value is non-zero, we exit with the returned value as result.
- __ Branch(&return_v0, ne, v0, Operand(zero_reg));
-
- __ bind(&stack_ok);
+ PushRegExpBasePointer(backtrack_stackpointer(), a1);
+
+ {
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit, stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ Ld(a0, MemOperand(a0));
+ __ Dsubu(a0, sp, a0);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ li(v0, Operand(EXCEPTION));
+ __ jmp(&return_v0);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(a0);
+ // If returned value is non-zero, we exit with the returned value as
+ // result.
+ __ Branch(&return_v0, ne, v0, Operand(zero_reg));
+
+ __ bind(&stack_ok);
+ }
+
// Allocate space on stack for registers.
__ Dsubu(sp, sp, Operand(num_registers_ * kPointerSize));
// Load string end.
@@ -812,17 +796,20 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
- __ li(current_character(), Operand('\n'));
- __ jmp(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
+ Label load_char_start_regexp;
+ {
+ Label start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
+ __ li(current_character(), Operand('\n'));
+ __ jmp(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+ }
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@@ -844,12 +831,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
}
}
- // Initialize backtrack stack pointer.
- LoadRegExpStackPointerFromMemory(backtrack_stackpointer());
-
__ jmp(&start_label_);
-
// Exit code:
if (success_label_.is_linked()) {
// Save captures when successful.
@@ -919,6 +902,10 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Prepare a0 to initialize registers with its value in the next run.
__ Ld(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ // Restore the original regexp stack pointer value (effectively, pop the
+ // stored base pointer).
+ PopRegExpBasePointer(backtrack_stackpointer(), a2);
+
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// t3: capture start index
@@ -950,7 +937,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ bind(&return_v0);
// Restore the original regexp stack pointer value (effectively, pop the
// stored base pointer).
- PopRegExpBasePointer(a0, a1);
+ PopRegExpBasePointer(backtrack_stackpointer(), a1);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
@@ -971,9 +958,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
SafeCallTarget(&check_preempt_label_);
StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0);
- PushCallerSavedRegisters();
CallCheckStackGuardState(a0);
- PopCallerSavedRegisters();
// If returning non-zero, we should end execution with the given
// result as return value.
__ Branch(&return_v0, ne, v0, Operand(zero_reg));
@@ -982,7 +967,6 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// String might have moved: Reload end of string from frame.
__ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
SafeReturn();
}
@@ -991,10 +975,6 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
SafeCallTarget(&stack_overflow_label_);
StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0);
// Reached if the backtrack-stack limit has been hit.
- // Put regexp engine registers on stack first.
- RegList regexp_registers = current_input_offset().bit() |
- current_character().bit();
- __ MultiPush(regexp_registers);
// Call GrowStack(isolate)
static constexpr int kNumArguments = 1;
@@ -1002,16 +982,11 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ li(a0, Operand(ExternalReference::isolate_address(masm_->isolate())));
ExternalReference grow_stack = ExternalReference::re_grow_stack();
__ CallCFunction(grow_stack, kNumArguments);
- // Restore regexp registers.
- __ MultiPop(regexp_registers);
// If nullptr is returned, we have failed to grow the stack, and must exit
// with a stack-overflow exception.
__ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), v0);
- // Restore saved registers and continue.
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- __ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
SafeReturn();
}
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index da6f3f70c5..9b8c7c26d8 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -153,13 +153,13 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// Register holding the current input position as negative offset from
// the end of the string.
- static constexpr Register current_input_offset() { return a6; }
+ static constexpr Register current_input_offset() { return s2; }
// The register containing the current character after LoadCurrentCharacter.
- static constexpr Register current_character() { return a7; }
+ static constexpr Register current_character() { return s5; }
// Register holding address of the end of the input string.
- static constexpr Register end_of_input_address() { return t2; }
+ static constexpr Register end_of_input_address() { return s6; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
@@ -167,10 +167,10 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
- static constexpr Register backtrack_stackpointer() { return t0; }
+ static constexpr Register backtrack_stackpointer() { return s7; }
// Register holding pointer to the current code object.
- static constexpr Register code_pointer() { return a5; }
+ static constexpr Register code_pointer() { return s1; }
// Byte size of chars in the string to match (decided by the Mode argument).
inline int char_size() const { return static_cast<int>(mode_); }
@@ -201,8 +201,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS
void LoadRegExpStackPointerFromMemory(Register dst);
void StoreRegExpStackPointerToMemory(Register src, Register scratch);
- void PushRegExpBasePointer(Register scratch1, Register scratch2);
- void PopRegExpBasePointer(Register scratch1, Register scratch2);
+ void PushRegExpBasePointer(Register stack_pointer, Register scratch);
+ void PopRegExpBasePointer(Register stack_pointer_out, Register scratch);
Isolate* isolate() const { return masm_->isolate(); }
diff --git a/deps/v8/src/regexp/regexp-ast.cc b/deps/v8/src/regexp/regexp-ast.cc
index 0b22c06c97..6315057f02 100644
--- a/deps/v8/src/regexp/regexp-ast.cc
+++ b/deps/v8/src/regexp/regexp-ast.cc
@@ -65,12 +65,12 @@ Interval RegExpQuantifier::CaptureRegisters() {
bool RegExpAssertion::IsAnchoredAtStart() {
- return assertion_type() == RegExpAssertion::START_OF_INPUT;
+ return assertion_type() == RegExpAssertion::Type::START_OF_INPUT;
}
bool RegExpAssertion::IsAnchoredAtEnd() {
- return assertion_type() == RegExpAssertion::END_OF_INPUT;
+ return assertion_type() == RegExpAssertion::Type::END_OF_INPUT;
}
@@ -198,22 +198,22 @@ void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) {
switch (that->assertion_type()) {
- case RegExpAssertion::START_OF_INPUT:
+ case RegExpAssertion::Type::START_OF_INPUT:
os_ << "@^i";
break;
- case RegExpAssertion::END_OF_INPUT:
+ case RegExpAssertion::Type::END_OF_INPUT:
os_ << "@$i";
break;
- case RegExpAssertion::START_OF_LINE:
+ case RegExpAssertion::Type::START_OF_LINE:
os_ << "@^l";
break;
- case RegExpAssertion::END_OF_LINE:
+ case RegExpAssertion::Type::END_OF_LINE:
os_ << "@$l";
break;
- case RegExpAssertion::BOUNDARY:
+ case RegExpAssertion::Type::BOUNDARY:
os_ << "@b";
break;
- case RegExpAssertion::NON_BOUNDARY:
+ case RegExpAssertion::Type::NON_BOUNDARY:
os_ << "@B";
break;
}
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 5b40badf9f..9716920d72 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -227,16 +227,16 @@ class RegExpAlternative final : public RegExpTree {
class RegExpAssertion final : public RegExpTree {
public:
- enum AssertionType {
+ enum class Type {
START_OF_LINE = 0,
START_OF_INPUT = 1,
END_OF_LINE = 2,
END_OF_INPUT = 3,
BOUNDARY = 4,
NON_BOUNDARY = 5,
- LAST_TYPE = NON_BOUNDARY,
+ LAST_ASSERTION_TYPE = NON_BOUNDARY,
};
- explicit RegExpAssertion(AssertionType type) : assertion_type_(type) {}
+ explicit RegExpAssertion(Type type) : assertion_type_(type) {}
DECL_BOILERPLATE(Assertion);
@@ -244,10 +244,10 @@ class RegExpAssertion final : public RegExpTree {
bool IsAnchoredAtEnd() override;
int min_match() override { return 0; }
int max_match() override { return 0; }
- AssertionType assertion_type() const { return assertion_type_; }
+ Type assertion_type() const { return assertion_type_; }
private:
- const AssertionType assertion_type_;
+ const Type assertion_type_;
};
class CharacterSet final {
diff --git a/deps/v8/src/regexp/regexp-compiler-tonode.cc b/deps/v8/src/regexp/regexp-compiler-tonode.cc
index 9abb25f44d..d8c0d24732 100644
--- a/deps/v8/src/regexp/regexp-compiler-tonode.cc
+++ b/deps/v8/src/regexp/regexp-compiler-tonode.cc
@@ -637,16 +637,16 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
while (i < length) {
alternative = alternatives->at(i);
if (!alternative->IsAtom()) break;
- RegExpAtom* const atom = alternative->AsAtom();
+ RegExpAtom* const alt_atom = alternative->AsAtom();
#ifdef V8_INTL_SUPPORT
- icu::UnicodeString new_prefix(atom->data().at(0));
+ icu::UnicodeString new_prefix(alt_atom->data().at(0));
if (new_prefix != common_prefix) {
if (!IsIgnoreCase(compiler->flags())) break;
if (common_prefix.caseCompare(new_prefix, U_FOLD_CASE_DEFAULT) != 0)
break;
}
#else
- unibrow::uchar new_prefix = atom->data().at(0);
+ unibrow::uchar new_prefix = alt_atom->data().at(0);
if (new_prefix != common_prefix) {
if (!IsIgnoreCase(compiler->flags())) break;
unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
@@ -656,7 +656,7 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
if (new_prefix != common_prefix) break;
}
#endif // V8_INTL_SUPPORT
- prefix_length = std::min(prefix_length, atom->length());
+ prefix_length = std::min(prefix_length, alt_atom->length());
i++;
}
if (i > first_with_prefix + 2) {
@@ -666,19 +666,20 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
// common prefix if the terms were similar or presorted in the input.
// Find out how long the common prefix is.
int run_length = i - first_with_prefix;
- RegExpAtom* const atom = alternatives->at(first_with_prefix)->AsAtom();
+ RegExpAtom* const alt_atom =
+ alternatives->at(first_with_prefix)->AsAtom();
for (int j = 1; j < run_length && prefix_length > 1; j++) {
RegExpAtom* old_atom =
alternatives->at(j + first_with_prefix)->AsAtom();
for (int k = 1; k < prefix_length; k++) {
- if (atom->data().at(k) != old_atom->data().at(k)) {
+ if (alt_atom->data().at(k) != old_atom->data().at(k)) {
prefix_length = k;
break;
}
}
}
RegExpAtom* prefix =
- zone->New<RegExpAtom>(atom->data().SubVector(0, prefix_length));
+ zone->New<RegExpAtom>(alt_atom->data().SubVector(0, prefix_length));
ZoneList<RegExpTree*>* pair = zone->New<ZoneList<RegExpTree*>>(2, zone);
pair->Add(prefix, zone);
ZoneList<RegExpTree*>* suffixes =
@@ -741,12 +742,12 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
while (i < length) {
alternative = alternatives->at(i);
if (!alternative->IsAtom()) break;
- RegExpAtom* const atom = alternative->AsAtom();
- if (atom->length() != 1) break;
+ RegExpAtom* const alt_atom = alternative->AsAtom();
+ if (alt_atom->length() != 1) break;
DCHECK_IMPLIES(IsUnicode(flags),
- !unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
+ !unibrow::Utf16::IsLeadSurrogate(alt_atom->data().at(0)));
contains_trail_surrogate |=
- unibrow::Utf16::IsTrailSurrogate(atom->data().at(0));
+ unibrow::Utf16::IsTrailSurrogate(alt_atom->data().at(0));
i++;
}
if (i > first_in_run + 1) {
@@ -810,7 +811,7 @@ namespace {
// \B to (?<=\w)(?=\w)|(?<=\W)(?=\W)
RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
RegExpNode* on_success,
- RegExpAssertion::AssertionType type,
+ RegExpAssertion::Type type,
RegExpFlags flags) {
CHECK(NeedsUnicodeCaseEquivalents(flags));
Zone* zone = compiler->zone();
@@ -826,7 +827,7 @@ RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
for (int i = 0; i < 2; i++) {
bool lookbehind_for_word = i == 0;
bool lookahead_for_word =
- (type == RegExpAssertion::BOUNDARY) ^ lookbehind_for_word;
+ (type == RegExpAssertion::Type::BOUNDARY) ^ lookbehind_for_word;
// Look to the left.
RegExpLookaround::Builder lookbehind(lookbehind_for_word, on_success,
stack_register, position_register);
@@ -850,23 +851,24 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
Zone* zone = compiler->zone();
switch (assertion_type()) {
- case START_OF_LINE:
+ case Type::START_OF_LINE:
return AssertionNode::AfterNewline(on_success);
- case START_OF_INPUT:
+ case Type::START_OF_INPUT:
return AssertionNode::AtStart(on_success);
- case BOUNDARY:
+ case Type::BOUNDARY:
return NeedsUnicodeCaseEquivalents(compiler->flags())
- ? BoundaryAssertionAsLookaround(compiler, on_success, BOUNDARY,
- compiler->flags())
+ ? BoundaryAssertionAsLookaround(
+ compiler, on_success, Type::BOUNDARY, compiler->flags())
: AssertionNode::AtBoundary(on_success);
- case NON_BOUNDARY:
+ case Type::NON_BOUNDARY:
return NeedsUnicodeCaseEquivalents(compiler->flags())
- ? BoundaryAssertionAsLookaround(
- compiler, on_success, NON_BOUNDARY, compiler->flags())
+ ? BoundaryAssertionAsLookaround(compiler, on_success,
+ Type::NON_BOUNDARY,
+ compiler->flags())
: AssertionNode::AtNonBoundary(on_success);
- case END_OF_INPUT:
+ case Type::END_OF_INPUT:
return AssertionNode::AtEnd(on_success);
- case END_OF_LINE: {
+ case Type::END_OF_LINE: {
// Compile $ in multiline regexps as an alternation with a positive
// lookahead in one side and an end-of-input on the other side.
// We need two registers for the lookahead.
@@ -1037,11 +1039,12 @@ class AssertionSequenceRewriter final {
// Bitfield of all seen assertions.
uint32_t seen_assertions = 0;
- STATIC_ASSERT(RegExpAssertion::LAST_TYPE < kUInt32Size * kBitsPerByte);
+ STATIC_ASSERT(static_cast<int>(RegExpAssertion::Type::LAST_ASSERTION_TYPE) <
+ kUInt32Size * kBitsPerByte);
for (int i = from; i < to; i++) {
RegExpAssertion* t = terms_->at(i)->AsAssertion();
- const uint32_t bit = 1 << t->assertion_type();
+ const uint32_t bit = 1 << static_cast<int>(t->assertion_type());
if (seen_assertions & bit) {
// Fold duplicates.
@@ -1053,7 +1056,8 @@ class AssertionSequenceRewriter final {
// Collapse failures.
const uint32_t always_fails_mask =
- 1 << RegExpAssertion::BOUNDARY | 1 << RegExpAssertion::NON_BOUNDARY;
+ 1 << static_cast<int>(RegExpAssertion::Type::BOUNDARY) |
+ 1 << static_cast<int>(RegExpAssertion::Type::NON_BOUNDARY);
if ((seen_assertions & always_fails_mask) == always_fails_mask) {
ReplaceSequenceWithFailure(from, to);
}
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
index 54e7813c48..c3ecff9d43 100644
--- a/deps/v8/src/regexp/regexp-compiler.cc
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -1695,7 +1695,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
uint32_t common_bits = ~SmearBitsRight(differing_bits);
uint32_t bits = (first_from & common_bits);
for (int i = first_range + 1; i < ranges->length(); i++) {
- CharacterRange range = ranges->at(i);
+ range = ranges->at(i);
const base::uc32 from = range.from();
if (from > char_mask) continue;
const base::uc32 to =
@@ -1710,8 +1710,8 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
new_common_bits = ~SmearBitsRight(new_common_bits);
common_bits &= new_common_bits;
bits &= new_common_bits;
- uint32_t differing_bits = (from & common_bits) ^ bits;
- common_bits ^= differing_bits;
+ uint32_t new_differing_bits = (from & common_bits) ^ bits;
+ common_bits ^= new_differing_bits;
bits &= common_bits;
}
pos->mask = common_bits;
@@ -3848,8 +3848,8 @@ void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
int length = GetCaseIndependentLetters(
isolate, character, bm->max_char() == String::kMaxOneByteCharCode,
chars, 4);
- for (int j = 0; j < length; j++) {
- bm->Set(offset, chars[j]);
+ for (int k = 0; k < length; k++) {
+ bm->Set(offset, chars[k]);
}
} else {
if (character <= max_char) bm->Set(offset, character);
diff --git a/deps/v8/src/regexp/regexp-dotprinter.cc b/deps/v8/src/regexp/regexp-dotprinter.cc
index b4e92aebbf..bf651963af 100644
--- a/deps/v8/src/regexp/regexp-dotprinter.cc
+++ b/deps/v8/src/regexp/regexp-dotprinter.cc
@@ -130,8 +130,8 @@ void DotPrinterImpl::VisitText(TextNode* that) {
switch (elm.text_type()) {
case TextElement::ATOM: {
base::Vector<const base::uc16> data = elm.atom()->data();
- for (int i = 0; i < data.length(); i++) {
- os_ << static_cast<char>(data[i]);
+ for (int j = 0; j < data.length(); j++) {
+ os_ << static_cast<char>(data[j]);
}
break;
}
diff --git a/deps/v8/src/regexp/regexp-interpreter.cc b/deps/v8/src/regexp/regexp-interpreter.cc
index be3bb45a5f..e1549f95be 100644
--- a/deps/v8/src/regexp/regexp-interpreter.cc
+++ b/deps/v8/src/regexp/regexp-interpreter.cc
@@ -1075,6 +1075,9 @@ IrregexpInterpreter::Result IrregexpInterpreter::MatchInternal(
uint32_t backtrack_limit) {
DCHECK(subject_string.IsFlat());
+ // TODO(chromium:1262676): Remove this CHECK once fixed.
+ CHECK(code_array.IsByteArray());
+
// Note: Heap allocation *is* allowed in two situations if calling from
// Runtime:
// 1. When creating & throwing a stack overflow exception. The interpreter
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index fa7fd127c4..675df8de58 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -4,7 +4,9 @@
#include "src/regexp/regexp-parser.h"
+#include "src/base/small-vector.h"
#include "src/execution/isolate.h"
+#include "src/objects/string-inl.h"
#include "src/regexp/property-sequences.h"
#include "src/regexp/regexp-ast.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -12,6 +14,7 @@
#include "src/strings/char-predicates-inl.h"
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
+#include "src/zone/zone-allocator.h"
#include "src/zone/zone-list-inl.h"
#ifdef V8_INTL_SUPPORT
@@ -36,9 +39,9 @@ class RegExpBuilder {
RegExpBuilder(Zone* zone, RegExpFlags flags)
: zone_(zone),
flags_(flags),
- terms_(2, zone),
- text_(2, zone),
- alternatives_(2, zone) {}
+ terms_(ZoneAllocator<RegExpTree*>{zone}),
+ text_(ZoneAllocator<RegExpTree*>{zone}),
+ alternatives_(ZoneAllocator<RegExpTree*>{zone}) {}
void AddCharacter(base::uc16 character);
void AddUnicodeCharacter(base::uc32 character);
void AddEscapedUnicodeCharacter(base::uc32 character);
@@ -78,9 +81,12 @@ class RegExpBuilder {
const RegExpFlags flags_;
ZoneList<base::uc16>* characters_ = nullptr;
base::uc16 pending_surrogate_ = kNoPendingSurrogate;
- ZoneList<RegExpTree*> terms_;
- ZoneList<RegExpTree*> text_;
- ZoneList<RegExpTree*> alternatives_;
+
+ using SmallRegExpTreeVector =
+ base::SmallVector<RegExpTree*, 8, ZoneAllocator<RegExpTree*>>;
+ SmallRegExpTreeVector terms_;
+ SmallRegExpTreeVector text_;
+ SmallRegExpTreeVector alternatives_;
#ifdef DEBUG
enum {
ADD_NONE,
@@ -233,17 +239,18 @@ class RegExpParserImpl final {
RegExpTree* ReportError(RegExpError error);
void Advance();
void Advance(int dist);
+ void RewindByOneCodepoint(); // Rewinds to before the previous Advance().
void Reset(int pos);
// Reports whether the pattern might be used as a literal search string.
// Only use if the result of the parse is a single atom node.
- bool simple();
- bool contains_anchor() { return contains_anchor_; }
+ bool simple() const { return simple_; }
+ bool contains_anchor() const { return contains_anchor_; }
void set_contains_anchor() { contains_anchor_ = true; }
- int captures_started() { return captures_started_; }
- int position() { return next_pos_ - 1; }
- bool failed() { return failed_; }
- bool unicode() const { return IsUnicode(top_level_flags_); }
+ int captures_started() const { return captures_started_; }
+ int position() const { return next_pos_ - 1; }
+ bool failed() const { return failed_; }
+ bool unicode() const { return IsUnicode(top_level_flags_) || force_unicode_; }
static bool IsSyntaxCharacterOrSlash(base::uc32 c);
@@ -279,9 +286,9 @@ class RegExpParserImpl final {
Zone* zone() const { return zone_; }
- base::uc32 current() { return current_; }
- bool has_more() { return has_more_; }
- bool has_next() { return next_pos_ < input_length(); }
+ base::uc32 current() const { return current_; }
+ bool has_more() const { return has_more_; }
+ bool has_next() const { return next_pos_ < input_length(); }
base::uc32 Next();
template <bool update_position>
base::uc32 ReadNext();
@@ -300,6 +307,22 @@ class RegExpParserImpl final {
}
};
+ class ForceUnicodeScope final {
+ public:
+ explicit ForceUnicodeScope(RegExpParserImpl<CharT>* parser)
+ : parser_(parser) {
+ DCHECK(!parser_->force_unicode_);
+ parser_->force_unicode_ = true;
+ }
+ ~ForceUnicodeScope() {
+ DCHECK(parser_->force_unicode_);
+ parser_->force_unicode_ = false;
+ }
+
+ private:
+ RegExpParserImpl<CharT>* const parser_;
+ };
+
const DisallowGarbageCollection no_gc_;
Zone* const zone_;
RegExpError error_ = RegExpError::kNone;
@@ -311,6 +334,7 @@ class RegExpParserImpl final {
const int input_length_;
base::uc32 current_;
const RegExpFlags top_level_flags_;
+ bool force_unicode_ = false; // Force parser to act as if unicode were set.
int next_pos_;
int captures_started_;
int capture_count_; // Only valid after we have scanned for captures.
@@ -422,6 +446,17 @@ void RegExpParserImpl<CharT>::Advance() {
}
template <class CharT>
+void RegExpParserImpl<CharT>::RewindByOneCodepoint() {
+ if (current() == kEndMarker) return;
+ // Rewinds by one code point, i.e.: two code units if `current` is outside
+ // the basic multilingual plane (= composed of a lead and trail surrogate),
+ // or one code unit otherwise.
+ const int rewind_by =
+ current() > unibrow::Utf16::kMaxNonSurrogateCharCode ? -2 : -1;
+ Advance(rewind_by); // Undo the last Advance.
+}
+
+template <class CharT>
void RegExpParserImpl<CharT>::Reset(int pos) {
next_pos_ = pos;
has_more_ = (pos < input_length());
@@ -435,11 +470,6 @@ void RegExpParserImpl<CharT>::Advance(int dist) {
}
template <class CharT>
-bool RegExpParserImpl<CharT>::simple() {
- return simple_;
-}
-
-template <class CharT>
bool RegExpParserImpl<CharT>::IsSyntaxCharacterOrSlash(base::uc32 c) {
switch (c) {
case '^':
@@ -581,16 +611,16 @@ RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
case '^': {
Advance();
builder->AddAssertion(zone()->template New<RegExpAssertion>(
- builder->multiline() ? RegExpAssertion::START_OF_LINE
- : RegExpAssertion::START_OF_INPUT));
+ builder->multiline() ? RegExpAssertion::Type::START_OF_LINE
+ : RegExpAssertion::Type::START_OF_INPUT));
set_contains_anchor();
continue;
}
case '$': {
Advance();
- RegExpAssertion::AssertionType assertion_type =
- builder->multiline() ? RegExpAssertion::END_OF_LINE
- : RegExpAssertion::END_OF_INPUT;
+ RegExpAssertion::Type assertion_type =
+ builder->multiline() ? RegExpAssertion::Type::END_OF_LINE
+ : RegExpAssertion::Type::END_OF_INPUT;
builder->AddAssertion(
zone()->template New<RegExpAssertion>(assertion_type));
continue;
@@ -698,12 +728,12 @@ RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
case 'b':
Advance(2);
builder->AddAssertion(zone()->template New<RegExpAssertion>(
- RegExpAssertion::BOUNDARY));
+ RegExpAssertion::Type::BOUNDARY));
continue;
case 'B':
Advance(2);
builder->AddAssertion(zone()->template New<RegExpAssertion>(
- RegExpAssertion::NON_BOUNDARY));
+ RegExpAssertion::Type::NON_BOUNDARY));
continue;
// AtomEscape ::
// CharacterClassEscape
@@ -1047,48 +1077,73 @@ void push_code_unit(ZoneVector<base::uc16>* v, uint32_t code_unit) {
template <class CharT>
const ZoneVector<base::uc16>* RegExpParserImpl<CharT>::ParseCaptureGroupName() {
+ // Due to special Advance requirements (see the next comment), rewind by one
+ // such that names starting with a surrogate pair are parsed correctly for
+ // patterns where the unicode flag is unset.
+ //
+ // Note that we use this odd pattern of rewinding the last advance in order
+ // to adhere to the common parser behavior of expecting `current` to point at
+ // the first candidate character for a function (e.g. when entering ParseFoo,
+ // `current` should point at the first character of Foo).
+ RewindByOneCodepoint();
+
ZoneVector<base::uc16>* name =
zone()->template New<ZoneVector<base::uc16>>(zone());
- bool at_start = true;
- while (true) {
- base::uc32 c = current();
- Advance();
-
- // Convert unicode escapes.
- if (c == '\\' && current() == 'u') {
+ {
+ // Advance behavior inside this function is tricky since
+ // RegExpIdentifierName explicitly enables unicode (in spec terms, sets +U)
+ // and thus allows surrogate pairs and \u{}-style escapes even in
+ // non-unicode patterns. Therefore Advance within the capture group name
+ // has to force-enable unicode, and outside the name revert to default
+ // behavior.
+ ForceUnicodeScope force_unicode(this);
+
+ bool at_start = true;
+ while (true) {
Advance();
- if (!ParseUnicodeEscape(&c)) {
- ReportError(RegExpError::kInvalidUnicodeEscape);
- return nullptr;
- }
- }
+ base::uc32 c = current();
- // The backslash char is misclassified as both ID_Start and ID_Continue.
- if (c == '\\') {
- ReportError(RegExpError::kInvalidCaptureGroupName);
- return nullptr;
- }
+ // Convert unicode escapes.
+ if (c == '\\' && Next() == 'u') {
+ Advance(2);
+ if (!ParseUnicodeEscape(&c)) {
+ ReportError(RegExpError::kInvalidUnicodeEscape);
+ return nullptr;
+ }
+ RewindByOneCodepoint();
+ }
- if (at_start) {
- if (!IsIdentifierStart(c)) {
+ // The backslash char is misclassified as both ID_Start and ID_Continue.
+ if (c == '\\') {
ReportError(RegExpError::kInvalidCaptureGroupName);
return nullptr;
}
- push_code_unit(name, c);
- at_start = false;
- } else {
- if (c == '>') {
- break;
- } else if (IsIdentifierPart(c)) {
+
+ if (at_start) {
+ if (!IsIdentifierStart(c)) {
+ ReportError(RegExpError::kInvalidCaptureGroupName);
+ return nullptr;
+ }
push_code_unit(name, c);
+ at_start = false;
} else {
- ReportError(RegExpError::kInvalidCaptureGroupName);
- return nullptr;
+ if (c == '>') {
+ break;
+ } else if (IsIdentifierPart(c)) {
+ push_code_unit(name, c);
+ } else {
+ ReportError(RegExpError::kInvalidCaptureGroupName);
+ return nullptr;
+ }
}
}
}
+ // This final advance goes back into the state of pointing at the next
+ // relevant char, which the rest of the parser expects. See also the previous
+ // comments in this function.
+ Advance();
return name;
}
@@ -2044,34 +2099,32 @@ void RegExpBuilder::FlushPendingSurrogate() {
}
}
-
void RegExpBuilder::FlushCharacters() {
FlushPendingSurrogate();
pending_empty_ = false;
if (characters_ != nullptr) {
RegExpTree* atom = zone()->New<RegExpAtom>(characters_->ToConstVector());
characters_ = nullptr;
- text_.Add(atom, zone());
+ text_.emplace_back(atom);
LAST(ADD_ATOM);
}
}
-
void RegExpBuilder::FlushText() {
FlushCharacters();
- int num_text = text_.length();
+ size_t num_text = text_.size();
if (num_text == 0) {
return;
} else if (num_text == 1) {
- terms_.Add(text_.last(), zone());
+ terms_.emplace_back(text_.back());
} else {
RegExpText* text = zone()->New<RegExpText>(zone());
- for (int i = 0; i < num_text; i++) {
+ for (size_t i = 0; i < num_text; i++) {
text_[i]->AppendToText(text, zone());
}
- terms_.Add(text, zone());
+ terms_.emplace_back(text);
}
- text_.Rewind(0);
+ text_.clear();
}
void RegExpBuilder::AddCharacter(base::uc16 c) {
@@ -2112,7 +2165,6 @@ void RegExpBuilder::AddEscapedUnicodeCharacter(base::uc32 character) {
void RegExpBuilder::AddEmpty() { pending_empty_ = true; }
-
void RegExpBuilder::AddCharacterClass(RegExpCharacterClass* cc) {
if (NeedsDesugaringForUnicode(cc)) {
// With /u, character class needs to be desugared, so it
@@ -2135,50 +2187,46 @@ void RegExpBuilder::AddAtom(RegExpTree* term) {
}
if (term->IsTextElement()) {
FlushCharacters();
- text_.Add(term, zone());
+ text_.emplace_back(term);
} else {
FlushText();
- terms_.Add(term, zone());
+ terms_.emplace_back(term);
}
LAST(ADD_ATOM);
}
-
void RegExpBuilder::AddTerm(RegExpTree* term) {
FlushText();
- terms_.Add(term, zone());
+ terms_.emplace_back(term);
LAST(ADD_ATOM);
}
-
void RegExpBuilder::AddAssertion(RegExpTree* assert) {
FlushText();
- terms_.Add(assert, zone());
+ terms_.emplace_back(assert);
LAST(ADD_ASSERT);
}
-
void RegExpBuilder::NewAlternative() { FlushTerms(); }
-
void RegExpBuilder::FlushTerms() {
FlushText();
- int num_terms = terms_.length();
+ size_t num_terms = terms_.size();
RegExpTree* alternative;
if (num_terms == 0) {
alternative = zone()->New<RegExpEmpty>();
} else if (num_terms == 1) {
- alternative = terms_.last();
+ alternative = terms_.back();
} else {
- alternative = zone()->New<RegExpAlternative>(
- zone()->New<ZoneList<RegExpTree*>>(terms_, zone()));
+ alternative =
+ zone()->New<RegExpAlternative>(zone()->New<ZoneList<RegExpTree*>>(
+ base::VectorOf(terms_.begin(), terms_.size()), zone()));
}
- alternatives_.Add(alternative, zone());
- terms_.Rewind(0);
+ alternatives_.emplace_back(alternative);
+ terms_.clear();
LAST(ADD_NONE);
}
-
bool RegExpBuilder::NeedsDesugaringForUnicode(RegExpCharacterClass* cc) {
if (!unicode()) return false;
// TODO(yangguo): we could be smarter than this. Case-insensitivity does not
@@ -2214,11 +2262,11 @@ bool RegExpBuilder::NeedsDesugaringForIgnoreCase(base::uc32 c) {
RegExpTree* RegExpBuilder::ToRegExp() {
FlushTerms();
- int num_alternatives = alternatives_.length();
+ size_t num_alternatives = alternatives_.size();
if (num_alternatives == 0) return zone()->New<RegExpEmpty>();
- if (num_alternatives == 1) return alternatives_.last();
- return zone()->New<RegExpDisjunction>(
- zone()->New<ZoneList<RegExpTree*>>(alternatives_, zone()));
+ if (num_alternatives == 1) return alternatives_.back();
+ return zone()->New<RegExpDisjunction>(zone()->New<ZoneList<RegExpTree*>>(
+ base::VectorOf(alternatives_.begin(), alternatives_.size()), zone()));
}
bool RegExpBuilder::AddQuantifierToAtom(
@@ -2237,19 +2285,21 @@ bool RegExpBuilder::AddQuantifierToAtom(
if (num_chars > 1) {
base::Vector<const base::uc16> prefix =
char_vector.SubVector(0, num_chars - 1);
- text_.Add(zone()->New<RegExpAtom>(prefix), zone());
+ text_.emplace_back(zone()->New<RegExpAtom>(prefix));
char_vector = char_vector.SubVector(num_chars - 1, num_chars);
}
characters_ = nullptr;
atom = zone()->New<RegExpAtom>(char_vector);
FlushText();
- } else if (text_.length() > 0) {
+ } else if (text_.size() > 0) {
DCHECK(last_added_ == ADD_ATOM);
- atom = text_.RemoveLast();
+ atom = text_.back();
+ text_.pop_back();
FlushText();
- } else if (terms_.length() > 0) {
+ } else if (terms_.size() > 0) {
DCHECK(last_added_ == ADD_ATOM);
- atom = terms_.RemoveLast();
+ atom = terms_.back();
+ terms_.pop_back();
if (atom->IsLookaround()) {
// With /u, lookarounds are not quantifiable.
if (unicode()) return false;
@@ -2264,15 +2314,15 @@ bool RegExpBuilder::AddQuantifierToAtom(
if (min == 0) {
return true;
}
- terms_.Add(atom, zone());
+ terms_.emplace_back(atom);
return true;
}
} else {
// Only call immediately after adding an atom or character!
UNREACHABLE();
}
- terms_.Add(zone()->New<RegExpQuantifier>(min, max, quantifier_type, atom),
- zone());
+ terms_.emplace_back(
+ zone()->New<RegExpQuantifier>(min, max, quantifier_type, atom));
LAST(ADD_TERM);
return true;
}
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index 1a6c531a7f..623ee23298 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -111,11 +111,12 @@ class Symbol;
V(Map, source_text_module_map, SourceTextModuleMap) \
V(Map, swiss_name_dictionary_map, SwissNameDictionaryMap) \
V(Map, synthetic_module_map, SyntheticModuleMap) \
+ IF_WASM(V, Map, wasm_api_function_ref_map, WasmApiFunctionRefMap) \
IF_WASM(V, Map, wasm_capi_function_data_map, WasmCapiFunctionDataMap) \
IF_WASM(V, Map, wasm_exported_function_data_map, \
WasmExportedFunctionDataMap) \
+ IF_WASM(V, Map, wasm_internal_function_map, WasmInternalFunctionMap) \
IF_WASM(V, Map, wasm_js_function_data_map, WasmJSFunctionDataMap) \
- IF_WASM(V, Map, wasm_api_function_ref_map, WasmApiFunctionRefMap) \
IF_WASM(V, Map, wasm_type_info_map, WasmTypeInfoMap) \
V(Map, weak_fixed_array_map, WeakFixedArrayMap) \
V(Map, weak_array_list_map, WeakArrayListMap) \
@@ -142,6 +143,14 @@ class Symbol;
UncachedExternalOneByteInternalizedStringMap) \
V(Map, uncached_external_one_byte_string_map, \
UncachedExternalOneByteStringMap) \
+ V(Map, shared_one_byte_string_map, SharedOneByteStringMap) \
+ V(Map, shared_string_map, SharedStringMap) \
+ V(Map, shared_thin_one_byte_string_map, SharedThinOneByteStringMap) \
+ V(Map, shared_thin_string_map, SharedThinStringMap) \
+ V(Map, seq_string_migration_sentinel_map, \
+ TwoByteSeqStringMigrationSentinelMap) \
+ V(Map, one_byte_seq_string_migration_sentinel_map, \
+ OneByteSeqStringMigrationSentinelMap) \
/* Oddball maps */ \
V(Map, undefined_map, UndefinedMap) \
V(Map, the_hole_map, TheHoleMap) \
@@ -308,7 +317,8 @@ class Symbol;
V(Object, pending_optimize_for_test_bytecode, \
PendingOptimizeForTestBytecode) \
V(ArrayList, basic_block_profiling_data, BasicBlockProfilingData) \
- V(WeakArrayList, shared_wasm_memories, SharedWasmMemories)
+ V(WeakArrayList, shared_wasm_memories, SharedWasmMemories) \
+ IF_WASM(V, HeapObject, active_continuation, ActiveContinuation)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index b584a7de99..11992b5a96 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -299,7 +299,7 @@ bool AddDescriptorsByTemplate(
for (InternalIndex i : InternalIndex::Range(nof_descriptors)) {
PropertyDetails details = descriptors_template->GetDetails(i);
if (details.location() == PropertyLocation::kDescriptor &&
- details.kind() == kData) {
+ details.kind() == PropertyKind::kData) {
count++;
}
}
@@ -321,14 +321,14 @@ bool AddDescriptorsByTemplate(
DCHECK(name.IsUniqueName());
PropertyDetails details = descriptors_template->GetDetails(i);
if (details.location() == PropertyLocation::kDescriptor) {
- if (details.kind() == kData) {
+ if (details.kind() == PropertyKind::kData) {
if (value.IsSmi()) {
value = GetMethodWithSharedName(isolate, args, value);
}
details = details.CopyWithRepresentation(
value.OptimalRepresentation(isolate));
} else {
- DCHECK_EQ(kAccessor, details.kind());
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
if (value.IsAccessorPair()) {
AccessorPair pair = AccessorPair::cast(value);
Object tmp = pair.getter();
@@ -346,7 +346,7 @@ bool AddDescriptorsByTemplate(
}
DCHECK(value.FitsRepresentation(details.representation()));
if (details.location() == PropertyLocation::kDescriptor &&
- details.kind() == kData) {
+ details.kind() == PropertyKind::kData) {
details =
PropertyDetails(details.kind(), details.attributes(),
PropertyLocation::kField, PropertyConstness::kConst,
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 54924e0f7b..a3f7872bca 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -45,7 +45,8 @@ Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
// As a post-condition of CompileOptimized, the function *must* be compiled,
// i.e. the installed Code object must not be the CompileLazy builtin.
DCHECK(function->is_compiled());
- return function->code();
+ // TODO(v8:11880): avoid roundtrips between cdc and code.
+ return ToCodeT(function->code());
}
} // namespace
@@ -75,7 +76,8 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
return ReadOnlyRoots(isolate).exception();
}
DCHECK(function->is_compiled());
- return function->code();
+ // TODO(v8:11880): avoid roundtrips between cdc and code.
+ return ToCodeT(function->code());
}
RUNTIME_FUNCTION(Runtime_InstallBaselineCode) {
@@ -89,7 +91,7 @@ RUNTIME_FUNCTION(Runtime_InstallBaselineCode) {
DCHECK(!function->HasOptimizationMarker());
DCHECK(!function->has_feedback_vector());
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
- Code baseline_code = sfi->baseline_code(kAcquireLoad);
+ CodeT baseline_code = sfi->baseline_code(kAcquireLoad);
function->set_code(baseline_code);
return baseline_code;
}
@@ -125,7 +127,8 @@ RUNTIME_FUNCTION(Runtime_FunctionFirstExecution) {
function->feedback_vector().ClearOptimizationMarker();
// Return the code to continue execution, we don't care at this point whether
// this is for lazy compilation or has been eagerly complied.
- return function->code();
+ // TODO(v8:11880): avoid roundtrips between cdc and code.
+ return ToCodeT(function->code());
}
RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) {
@@ -138,7 +141,8 @@ RUNTIME_FUNCTION(Runtime_HealOptimizedCodeSlot) {
function->feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
function->raw_feedback_cell(), function->shared(),
"Runtime_HealOptimizedCodeSlot");
- return function->code();
+ // TODO(v8:11880): avoid roundtrips between cdc and code.
+ return ToCodeT(function->code());
}
RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
@@ -294,7 +298,7 @@ BytecodeOffset DetermineEntryAndDisarmOSRForUnoptimized(
} // namespace
RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
- HandleScope scope(isolate);
+ HandleScope handle_scope(isolate);
DCHECK_EQ(0, args.length());
// Only reachable when OST is enabled.
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 588dce9222..38f3ef7d90 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -137,7 +137,9 @@ RUNTIME_FUNCTION(Runtime_HandleDebuggerStatement) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
if (isolate->debug()->break_points_active()) {
- isolate->debug()->HandleDebugBreak(kIgnoreIfTopFrameBlackboxed);
+ isolate->debug()->HandleDebugBreak(
+ kIgnoreIfTopFrameBlackboxed,
+ v8::debug::BreakReasons({v8::debug::BreakReason::kDebuggerStatement}));
}
return isolate->stack_guard()->HandleInterrupts();
}
@@ -146,7 +148,11 @@ RUNTIME_FUNCTION(Runtime_ScheduleBreak) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
isolate->RequestInterrupt(
- [](v8::Isolate* isolate, void*) { v8::debug::BreakRightNow(isolate); },
+ [](v8::Isolate* isolate, void*) {
+ v8::debug::BreakRightNow(
+ isolate,
+ v8::debug::BreakReasons({v8::debug::BreakReason::kScheduled}));
+ },
nullptr);
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index da08f4e95a..dce6cc4086 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -78,6 +78,12 @@ RUNTIME_FUNCTION(Runtime_ReThrow) {
return isolate->ReThrow(args[0]);
}
+RUNTIME_FUNCTION(Runtime_ReThrowWithMessage) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ return isolate->ReThrow(args[0], args[1]);
+}
+
RUNTIME_FUNCTION(Runtime_ThrowStackOverflow) {
SealHandleScope shs(isolate);
DCHECK_LE(0, args.length());
@@ -458,7 +464,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
- bool double_align = AllocateDoubleAlignFlag::decode(flags);
+ AllocationAlignment alignment =
+ AllocateDoubleAlignFlag::decode(flags) ? kDoubleAligned : kTaggedAligned;
bool allow_large_object_allocation =
AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize));
@@ -479,9 +486,9 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
// TODO(v8:9472): Until double-aligned allocation is fixed for new-space
// allocations, don't request it.
- double_align = false;
+ alignment = kTaggedAligned;
- return *isolate->factory()->NewFillerObject(size, double_align,
+ return *isolate->factory()->NewFillerObject(size, alignment,
AllocationType::kYoung,
AllocationOrigin::kGeneratedCode);
}
@@ -491,7 +498,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
- bool double_align = AllocateDoubleAlignFlag::decode(flags);
+ AllocationAlignment alignment =
+ AllocateDoubleAlignFlag::decode(flags) ? kDoubleAligned : kTaggedAligned;
bool allow_large_object_allocation =
AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize));
@@ -499,9 +507,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
if (!allow_large_object_allocation) {
CHECK(size <= kMaxRegularHeapObjectSize);
}
- return *isolate->factory()->NewFillerObject(size, double_align,
- AllocationType::kOld,
- AllocationOrigin::kGeneratedCode);
+ return *isolate->factory()->NewFillerObject(
+ size, alignment, AllocationType::kOld, AllocationOrigin::kGeneratedCode);
}
RUNTIME_FUNCTION(Runtime_AllocateByteArray) {
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 958bc2277f..7d4e0e0924 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -111,7 +111,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
for (InternalIndex i : copy->map(isolate).IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
DCHECK_EQ(PropertyLocation::kField, details.location());
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
FieldIndex index = FieldIndex::ForPropertyIndex(
copy->map(isolate), details.field_index(),
details.representation());
@@ -410,16 +410,16 @@ Handle<JSObject> CreateObjectLiteral(
if (value->IsHeapObject()) {
if (HeapObject::cast(*value).IsArrayBoilerplateDescription(isolate)) {
- Handle<ArrayBoilerplateDescription> boilerplate =
+ Handle<ArrayBoilerplateDescription> array_boilerplate =
Handle<ArrayBoilerplateDescription>::cast(value);
- value = CreateArrayLiteral(isolate, boilerplate, allocation);
+ value = CreateArrayLiteral(isolate, array_boilerplate, allocation);
} else if (HeapObject::cast(*value).IsObjectBoilerplateDescription(
isolate)) {
- Handle<ObjectBoilerplateDescription> boilerplate =
+ Handle<ObjectBoilerplateDescription> object_boilerplate =
Handle<ObjectBoilerplateDescription>::cast(value);
- value = CreateObjectLiteral(isolate, boilerplate, boilerplate->flags(),
- allocation);
+ value = CreateObjectLiteral(isolate, object_boilerplate,
+ object_boilerplate->flags(), allocation);
}
}
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index a6587da1a3..47bb8f0f56 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -49,9 +49,10 @@ MaybeHandle<Object> Runtime::GetObjectProperty(
if (!it.IsFound() && key->IsSymbol() &&
Symbol::cast(*key).is_private_name()) {
- MessageTemplate message = Symbol::cast(*key).IsPrivateBrand()
- ? MessageTemplate::kInvalidPrivateBrand
- : MessageTemplate::kInvalidPrivateMemberRead;
+ MessageTemplate message =
+ Symbol::cast(*key).IsPrivateBrand()
+ ? MessageTemplate::kInvalidPrivateBrandInstance
+ : MessageTemplate::kInvalidPrivateMemberRead;
THROW_NEW_ERROR(isolate, NewTypeError(message, key, lookup_start_object),
Object);
}
@@ -124,11 +125,11 @@ void GeneralizeAllTransitionsToFieldAsMutable(Isolate* isolate, Handle<Map> map,
DCHECK_EQ(*name, target.GetLastDescriptorName(isolate));
PropertyDetails details = target.GetLastDescriptorDetails(isolate);
// Currently, we track constness only for fields.
- if (details.kind() == kData &&
+ if (details.kind() == PropertyKind::kData &&
details.constness() == PropertyConstness::kConst) {
target_maps[target_maps_count++] = handle(target, isolate);
}
- DCHECK_IMPLIES(details.kind() == kAccessor,
+ DCHECK_IMPLIES(details.kind() == PropertyKind::kAccessor,
details.constness() == PropertyConstness::kConst);
},
&no_gc);
@@ -459,7 +460,8 @@ RUNTIME_FUNCTION(Runtime_AddDictionaryProperty) {
DCHECK(name->IsUniqueName());
PropertyDetails property_details(
- kData, NONE, PropertyDetails::kConstIfDictConstnessTracking);
+ PropertyKind::kData, NONE,
+ PropertyDetails::kConstIfDictConstnessTracking);
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
Handle<SwissNameDictionary> dictionary(
receiver->property_dictionary_swiss(), isolate);
@@ -777,7 +779,7 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
InternalIndex entry = dictionary.FindEntry(isolate, key);
if (entry.is_found()) {
PropertyCell cell = dictionary.CellAt(entry);
- if (cell.property_details().kind() == kData) {
+ if (cell.property_details().kind() == PropertyKind::kData) {
Object value = cell.value();
if (!value.IsTheHole(isolate)) return value;
// If value is the hole (meaning, absent) do the general lookup.
@@ -790,7 +792,7 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
lookup_start_object->property_dictionary_swiss();
InternalIndex entry = dictionary.FindEntry(isolate, *key);
if (entry.is_found() &&
- (dictionary.DetailsAt(entry).kind() == kData)) {
+ (dictionary.DetailsAt(entry).kind() == PropertyKind::kData)) {
return dictionary.ValueAt(entry);
}
} else {
@@ -798,7 +800,7 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
lookup_start_object->property_dictionary();
InternalIndex entry = dictionary.FindEntry(isolate, key);
if ((entry.is_found()) &&
- (dictionary.DetailsAt(entry).kind() == kData)) {
+ (dictionary.DetailsAt(entry).kind() == PropertyKind::kData)) {
return dictionary.ValueAt(entry);
}
}
@@ -825,11 +827,11 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
} else if (lookup_start_obj->IsString() && key_obj->IsSmi()) {
// Fast case for string indexing using [] with a smi index.
Handle<String> str = Handle<String>::cast(lookup_start_obj);
- int index = Handle<Smi>::cast(key_obj)->value();
- if (index >= 0 && index < str->length()) {
+ int smi_index = Handle<Smi>::cast(key_obj)->value();
+ if (smi_index >= 0 && smi_index < str->length()) {
Factory* factory = isolate->factory();
return *factory->LookupSingleCharacterStringFromCode(
- String::Flatten(isolate, str)->Get(index));
+ String::Flatten(isolate, str)->Get(smi_index));
}
}
@@ -1033,7 +1035,7 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTrackingForMap) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Map, initial_map, 0);
- initial_map->CompleteInobjectSlackTracking(isolate);
+ MapUpdater::CompleteInobjectSlackTracking(isolate, *initial_map);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1093,16 +1095,16 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
DCHECK(maybe_vector->IsFeedbackVector());
Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(maybe_vector);
FeedbackNexus nexus(vector, FeedbackVector::ToSlot(index));
- if (nexus.ic_state() == UNINITIALIZED) {
+ if (nexus.ic_state() == InlineCacheState::UNINITIALIZED) {
if (name->IsUniqueName()) {
nexus.ConfigureMonomorphic(name, handle(object->map(), isolate),
MaybeObjectHandle());
} else {
- nexus.ConfigureMegamorphic(PROPERTY);
+ nexus.ConfigureMegamorphic(IcCheckType::kProperty);
}
- } else if (nexus.ic_state() == MONOMORPHIC) {
+ } else if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
if (nexus.GetFirstMap() != object->map() || nexus.GetName() != *name) {
- nexus.ConfigureMegamorphic(PROPERTY);
+ nexus.ConfigureMegamorphic(IcCheckType::kProperty);
}
}
}
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 8b65ffb7cc..cb88bec373 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -11,6 +11,7 @@
#include "src/execution/arguments-inl.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
@@ -295,7 +296,7 @@ Object DeclareEvalHelper(Isolate* isolate, Handle<String> name,
} else if (context->has_extension()) {
object = handle(context->extension_object(), isolate);
DCHECK(object->IsJSContextExtensionObject());
- } else {
+ } else if (context->scope_info().HasContextExtensionSlot()) {
// Sloppy varblock and function contexts might not have an extension object
// yet. Sloppy eval will never have an extension object, as vars are hoisted
// out, and lets are known statically.
@@ -306,6 +307,10 @@ Object DeclareEvalHelper(Isolate* isolate, Handle<String> name,
isolate->factory()->NewJSObject(isolate->context_extension_function());
context->set_extension(*object);
+ } else {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewEvalError(MessageTemplate::kVarNotAllowedInEvalScope, name));
}
RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 78759e8a59..5e5aae89fc 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -423,17 +423,17 @@ RUNTIME_FUNCTION(Runtime_StringEscapeQuotes) {
Handle<String> quotes =
isolate->factory()->LookupSingleCharacterStringFromCode('"');
- int index = String::IndexOf(isolate, string, quotes, 0);
+ int quote_index = String::IndexOf(isolate, string, quotes, 0);
// No quotes, nothing to do.
- if (index == -1) return *string;
+ if (quote_index == -1) return *string;
// Find all quotes.
- std::vector<int> indices = {index};
- while (index + 1 < string_length) {
- index = String::IndexOf(isolate, string, quotes, index + 1);
- if (index == -1) break;
- indices.emplace_back(index);
+ std::vector<int> indices = {quote_index};
+ while (quote_index + 1 < string_length) {
+ quote_index = String::IndexOf(isolate, string, quotes, quote_index + 1);
+ if (quote_index == -1) break;
+ indices.emplace_back(quote_index);
}
// Build the replacement string.
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index fff5a4f400..c5f9218911 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -48,7 +48,7 @@ RUNTIME_FUNCTION(Runtime_SymbolDescriptiveString) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Symbol, symbol, 0);
IncrementalStringBuilder builder(isolate);
- builder.AppendCString("Symbol(");
+ builder.AppendCStringLiteral("Symbol(");
if (symbol->description().IsString()) {
builder.AppendString(handle(String::cast(symbol->description()), isolate));
}
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 7dd66ab4e8..54b53b719e 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <fstream>
+#include <memory>
+
#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/base/numbers/double.h"
@@ -9,6 +12,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compiler.h"
#include "src/codegen/pending-optimization-table.h"
+#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/debug/debug-evaluate.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -26,6 +30,7 @@
#include "src/objects/js-function-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/smi.h"
+#include "src/profiler/heap-snapshot-generator.h"
#include "src/regexp/regexp.h"
#include "src/runtime/runtime-utils.h"
#include "src/snapshot/snapshot.h"
@@ -250,7 +255,7 @@ bool CanOptimizeFunction(Handle<JSFunction> function, Isolate* isolate,
if (!FLAG_opt) return false;
if (function->shared().optimization_disabled() &&
- function->shared().disable_optimization_reason() ==
+ function->shared().disabled_optimization_reason() ==
BailoutReason::kNeverOptimize) {
return CrashUnlessFuzzingReturnFalse(isolate);
}
@@ -430,7 +435,7 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
// If optimization is disabled for the function, return without making it
// pending optimize for test.
if (function->shared().optimization_disabled() &&
- function->shared().disable_optimization_reason() ==
+ function->shared().disabled_optimization_reason() ==
BailoutReason::kNeverOptimize) {
return CrashUnlessFuzzing(isolate);
}
@@ -474,7 +479,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionForTopTier) {
}
RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
- HandleScope scope(isolate);
+ HandleScope handle_scope(isolate);
DCHECK(args.length() == 0 || args.length() == 1);
Handle<JSFunction> function;
@@ -499,7 +504,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
}
if (function->shared().optimization_disabled() &&
- function->shared().disable_optimization_reason() ==
+ function->shared().disabled_optimization_reason() ==
BailoutReason::kNeverOptimize) {
return CrashUnlessFuzzing(isolate);
}
@@ -571,12 +576,20 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- SharedFunctionInfo sfi = function->shared();
- if (sfi.abstract_code(isolate).kind() != CodeKind::INTERPRETED_FUNCTION &&
- sfi.abstract_code(isolate).kind() != CodeKind::BUILTIN) {
+ Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
+ if (sfi->abstract_code(isolate).kind() != CodeKind::INTERPRETED_FUNCTION &&
+ sfi->abstract_code(isolate).kind() != CodeKind::BUILTIN) {
return CrashUnlessFuzzing(isolate);
}
- sfi.DisableOptimization(BailoutReason::kNeverOptimize);
+ // Make sure to finish compilation if there is a parallel lazy compilation in
+ // progress, to make sure that the compilation finalization doesn't clobber
+ // the SharedFunctionInfo's disable_optimization field.
+ if (isolate->lazy_compile_dispatcher() &&
+ isolate->lazy_compile_dispatcher()->IsEnqueued(sfi)) {
+ isolate->lazy_compile_dispatcher()->FinishNow(sfi);
+ }
+
+ sfi->DisableOptimization(BailoutReason::kNeverOptimize);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -828,6 +841,50 @@ RUNTIME_FUNCTION(Runtime_ScheduleGCInStackCheck) {
return ReadOnlyRoots(isolate).undefined_value();
}
+class FileOutputStream : public v8::OutputStream {
+ public:
+ explicit FileOutputStream(const char* filename) : os_(filename) {}
+ ~FileOutputStream() override { os_.close(); }
+
+ WriteResult WriteAsciiChunk(char* data, int size) override {
+ os_.write(data, size);
+ return kContinue;
+ }
+
+ void EndOfStream() override { os_.close(); }
+
+ private:
+ std::ofstream os_;
+};
+
+RUNTIME_FUNCTION(Runtime_TakeHeapSnapshot) {
+ if (FLAG_fuzzing) {
+ // We don't want to create snapshots in fuzzers.
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ std::string filename = "heap.heapsnapshot";
+
+ if (args.length() >= 1) {
+ HandleScope hs(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(String, filename_as_js_string, 0);
+ std::unique_ptr<char[]> buffer = filename_as_js_string->ToCString();
+ filename = std::string(buffer.get());
+ }
+
+ HeapProfiler* heap_profiler = isolate->heap_profiler();
+ // Since this API is intended for V8 devs, we do not treat globals as roots
+ // here on purpose.
+ HeapSnapshot* snapshot = heap_profiler->TakeSnapshot(
+ /* control = */ nullptr, /* resolver = */ nullptr,
+ /* treat_global_objects_as_roots = */ false,
+ /* capture_numeric_value = */ true);
+ FileOutputStream stream(filename.c_str());
+ HeapSnapshotJSONSerializer serializer(snapshot);
+ serializer.Serialize(&stream);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
static void DebugPrintImpl(MaybeObject maybe_object) {
StdoutStream os;
if (maybe_object->IsCleared()) {
@@ -1333,7 +1390,7 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- object->map().CompleteInobjectSlackTracking(isolate);
+ MapUpdater::CompleteInobjectSlackTracking(isolate, object->map());
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1422,29 +1479,11 @@ RUNTIME_FUNCTION(Runtime_Is64Bit) {
return isolate->heap()->ToBoolean(kSystemPointerSize == 8);
}
-#if V8_ENABLE_WEBASSEMBLY
-// TODO(thibaudm): Handle this in Suspender.returnPromiseOnSuspend() when
-// the Suspender object is added.
-RUNTIME_FUNCTION(Runtime_WasmReturnPromiseOnSuspend) {
- CHECK(FLAG_experimental_wasm_stack_switching);
- DCHECK_EQ(1, args.length());
+RUNTIME_FUNCTION(Runtime_BigIntMaxLengthBits) {
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- SharedFunctionInfo sfi = function->shared();
- // TODO(thibaudm): Throw an error if this is not a wasm function.
- CHECK(sfi.HasWasmExportedFunctionData());
- WasmExportedFunctionData data = sfi.wasm_exported_function_data();
- int index = data.function_index();
- Handle<WasmInstanceObject> instance(WasmInstanceObject::cast(data.ref()),
- isolate);
- auto wrapper =
- isolate->builtins()->code_handle(Builtin::kWasmReturnPromiseOnSuspend);
- auto result = Handle<WasmExternalFunction>::cast(WasmExportedFunction::New(
- isolate, instance, index, static_cast<int>(data.sig()->parameter_count()),
- wrapper));
- return *result;
+ DCHECK_EQ(0, args.length());
+ return *isolate->factory()->NewNumber(BigInt::kMaxLengthBits);
}
-#endif
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-trace.cc b/deps/v8/src/runtime/runtime-trace.cc
index 87249607f3..06cfd73548 100644
--- a/deps/v8/src/runtime/runtime-trace.cc
+++ b/deps/v8/src/runtime/runtime-trace.cc
@@ -38,6 +38,22 @@ void AdvanceToOffsetForTracing(
interpreter::OperandScale::kSingle));
}
+void PrintRegisterRange(UnoptimizedFrame* frame, std::ostream& os,
+ interpreter::BytecodeArrayIterator& bytecode_iterator,
+ const int& reg_field_width, const char* arrow_direction,
+ interpreter::Register first_reg, int range) {
+ for (int reg_index = first_reg.index(); reg_index < first_reg.index() + range;
+ reg_index++) {
+ Object reg_object = frame->ReadInterpreterRegister(reg_index);
+ os << " [ " << std::setw(reg_field_width)
+ << interpreter::Register(reg_index).ToString(
+ bytecode_iterator.bytecode_array()->parameter_count())
+ << arrow_direction;
+ reg_object.ShortPrint(os);
+ os << " ]" << std::endl;
+ }
+}
+
void PrintRegisters(UnoptimizedFrame* frame, std::ostream& os, bool is_input,
interpreter::BytecodeArrayIterator& bytecode_iterator,
Handle<Object> accumulator) {
@@ -74,18 +90,15 @@ void PrintRegisters(UnoptimizedFrame* frame, std::ostream& os, bool is_input,
interpreter::Register first_reg =
bytecode_iterator.GetRegisterOperand(operand_index);
int range = bytecode_iterator.GetRegisterOperandRange(operand_index);
- for (int reg_index = first_reg.index();
- reg_index < first_reg.index() + range; reg_index++) {
- Object reg_object = frame->ReadInterpreterRegister(reg_index);
- os << " [ " << std::setw(kRegFieldWidth)
- << interpreter::Register(reg_index).ToString(
- bytecode_iterator.bytecode_array()->parameter_count())
- << kArrowDirection;
- reg_object.ShortPrint(os);
- os << " ]" << std::endl;
- }
+ PrintRegisterRange(frame, os, bytecode_iterator, kRegFieldWidth,
+ kArrowDirection, first_reg, range);
}
}
+ if (!is_input && interpreter::Bytecodes::IsShortStar(bytecode)) {
+ PrintRegisterRange(frame, os, bytecode_iterator, kRegFieldWidth,
+ kArrowDirection,
+ interpreter::Register::FromShortStar(bytecode), 1);
+ }
if (FLAG_log_colour) {
os << kNormalColourCode;
}
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 0843076ab4..c6e39c4c01 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/base/memory.h"
+#include "src/common/assert-scope.h"
#include "src/common/message-template.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/debug.h"
@@ -100,17 +101,23 @@ RUNTIME_FUNCTION(Runtime_WasmIsValidRefValue) {
!trap_handler::IsThreadInWasm());
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0)
+ // 'raw_instance' can be either a WasmInstanceObject or undefined.
+ CONVERT_ARG_HANDLE_CHECKED(Object, raw_instance, 0)
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
// Make sure ValueType fits properly in a Smi.
STATIC_ASSERT(wasm::ValueType::kLastUsedBit + 1 <= kSmiValueSize);
CONVERT_SMI_ARG_CHECKED(raw_type, 2);
+ const wasm::WasmModule* module =
+ raw_instance->IsWasmInstanceObject()
+ ? Handle<WasmInstanceObject>::cast(raw_instance)->module()
+ : nullptr;
+
wasm::ValueType type = wasm::ValueType::FromRawBitField(raw_type);
const char* error_message;
- bool result = internal::wasm::TypecheckJSObject(isolate, instance->module(),
- value, type, &error_message);
+ bool result = internal::wasm::TypecheckJSObject(isolate, module, value, type,
+ &error_message);
return Smi::FromInt(result);
}
@@ -225,10 +232,12 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
namespace {
void ReplaceWrapper(Isolate* isolate, Handle<WasmInstanceObject> instance,
int function_index, Handle<Code> wrapper_code) {
- Handle<WasmExternalFunction> exported_function =
- WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
+ Handle<WasmInternalFunction> internal =
+ WasmInstanceObject::GetWasmInternalFunction(isolate, instance,
function_index)
.ToHandleChecked();
+ Handle<WasmExternalFunction> exported_function =
+ handle(WasmExternalFunction::cast(internal->external()), isolate);
exported_function->set_code(*wrapper_code, kReleaseStore);
WasmExportedFunctionData function_data =
exported_function->shared().wasm_exported_function_data();
@@ -253,11 +262,9 @@ RUNTIME_FUNCTION(Runtime_WasmCompileWrapper) {
// an exported function (although it is called as one).
// If there is no entry for the start function,
// the tier-up is abandoned.
- MaybeHandle<WasmExternalFunction> maybe_exported_function =
- WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
- function_index);
- Handle<WasmExternalFunction> exported_function;
- if (!maybe_exported_function.ToHandle(&exported_function)) {
+ if (WasmInstanceObject::GetWasmInternalFunction(isolate, instance,
+ function_index)
+ .is_null()) {
DCHECK_EQ(function_index, module->start_function_index);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -288,18 +295,17 @@ RUNTIME_FUNCTION(Runtime_WasmCompileWrapper) {
}
RUNTIME_FUNCTION(Runtime_WasmTriggerTierUp) {
+ ClearThreadInWasmScope clear_wasm_flag(isolate);
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- if (FLAG_new_wasm_dynamic_tiering) {
- // We're reusing this interrupt mechanism to interrupt long-running loops.
- StackLimitCheck check(isolate);
- DCHECK(!check.JsHasOverflowed());
- if (check.InterruptRequested()) {
- Object result = isolate->stack_guard()->HandleInterrupts();
- if (result.IsException()) return result;
- }
+ // We're reusing this interrupt mechanism to interrupt long-running loops.
+ StackLimitCheck check(isolate);
+ DCHECK(!check.JsHasOverflowed());
+ if (check.InterruptRequested()) {
+ Object result = isolate->stack_guard()->HandleInterrupts();
+ if (result.IsException()) return result;
}
FrameFinder<WasmFrame> frame_finder(isolate);
@@ -393,11 +399,8 @@ RUNTIME_FUNCTION(Runtime_WasmRefFunc) {
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_UINT32_ARG_CHECKED(function_index, 1);
- Handle<WasmExternalFunction> function =
- WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
- function_index);
-
- return *function;
+ return *WasmInstanceObject::GetOrCreateWasmInternalFunction(isolate, instance,
+ function_index);
}
RUNTIME_FUNCTION(Runtime_WasmFunctionTableGet) {
@@ -699,11 +702,12 @@ namespace {
// contains a sentinel value, and it is also thread-safe. So if an interrupt is
// requested before, during or after this call, it will be preserved and handled
// at the next stack check.
-void SyncStackLimit(Isolate* isolate, Handle<WasmInstanceObject> instance) {
- auto jmpbuf = Managed<wasm::JumpBuffer>::cast(
- instance->active_continuation().managed_jmpbuf())
- .get();
- uintptr_t limit = reinterpret_cast<uintptr_t>(jmpbuf->stack_limit);
+void SyncStackLimit(Isolate* isolate) {
+ DisallowGarbageCollection no_gc;
+ auto continuation = WasmContinuationObject::cast(
+ *isolate->roots_table().slot(RootIndex::kActiveContinuation));
+ auto stack = Managed<wasm::StackMemory>::cast(continuation.stack()).get();
+ uintptr_t limit = reinterpret_cast<uintptr_t>(stack->jmpbuf()->stack_limit);
isolate->stack_guard()->SetStackLimit(limit);
}
} // namespace
@@ -712,22 +716,24 @@ void SyncStackLimit(Isolate* isolate, Handle<WasmInstanceObject> instance) {
// active continuation and setting the stack limit.
RUNTIME_FUNCTION(Runtime_WasmAllocateContinuation) {
CHECK(FLAG_experimental_wasm_stack_switching);
- DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- auto parent = instance->active_continuation();
- auto target = WasmContinuationObject::New(isolate, parent);
- instance->set_active_continuation(*target);
- SyncStackLimit(isolate, instance);
+ auto parent =
+ handle(WasmContinuationObject::cast(
+ *isolate->roots_table().slot(RootIndex::kActiveContinuation)),
+ isolate);
+ auto target = WasmContinuationObject::New(isolate, *parent);
+ auto target_stack =
+ Managed<wasm::StackMemory>::cast(target->stack()).get().get();
+ isolate->wasm_stacks()->Add(target_stack);
+ isolate->roots_table().slot(RootIndex::kActiveContinuation).store(*target);
+ SyncStackLimit(isolate);
return *target;
}
// Update the stack limit after a stack switch, and preserve pending interrupts.
RUNTIME_FUNCTION(Runtime_WasmSyncStackLimit) {
CHECK(FLAG_experimental_wasm_stack_switching);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
- SyncStackLimit(isolate, instance);
+ SyncStackLimit(isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index e4ceef48f1..cce8dd2739 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -112,6 +112,7 @@ bool Runtime::NeedsExactContext(FunctionId id) {
case Runtime::kLoadPrivateGetter:
case Runtime::kLoadPrivateSetter:
case Runtime::kReThrow:
+ case Runtime::kReThrowWithMessage:
case Runtime::kThrow:
case Runtime::kThrowApplyNonFunction:
case Runtime::kThrowCalledNonCallable:
@@ -154,6 +155,7 @@ bool Runtime::IsNonReturning(FunctionId id) {
case Runtime::kThrowSuperAlreadyCalledError:
case Runtime::kThrowSuperNotCalled:
case Runtime::kReThrow:
+ case Runtime::kReThrowWithMessage:
case Runtime::kThrow:
case Runtime::kThrowApplyNonFunction:
case Runtime::kThrowCalledNonCallable:
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 96b404f95d..41bc3256a7 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -75,6 +75,7 @@ namespace internal {
F(BigIntEqualToBigInt, 2, 1) \
F(BigIntEqualToNumber, 2, 1) \
F(BigIntEqualToString, 2, 1) \
+ F(BigIntMaxLengthBits, 0, 1) \
F(BigIntToBoolean, 1, 1) \
F(BigIntToNumber, 1, 1) \
F(BigIntUnaryOp, 2, 1) \
@@ -233,6 +234,7 @@ namespace internal {
F(PromoteScheduledException, 0, 1) \
F(ReportMessageFromMicrotask, 1, 1) \
F(ReThrow, 1, 1) \
+ F(ReThrowWithMessage, 2, 1) \
F(RunMicrotaskCallback, 2, 1) \
F(PerformMicrotaskCheckpoint, 0, 1) \
F(StackGuard, 0, 1) \
@@ -552,6 +554,7 @@ namespace internal {
F(SimulateNewspaceFull, 0, 1) \
F(StringIteratorProtector, 0, 1) \
F(SystemBreak, 0, 1) \
+ F(TakeHeapSnapshot, -1, 1) \
F(TierupFunctionOnNextCall, -1, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
@@ -593,9 +596,8 @@ namespace internal {
F(WasmDebugBreak, 0, 1) \
F(WasmAllocateRtt, 3, 1) \
F(WasmArrayCopy, 5, 1) \
- F(WasmAllocateContinuation, 1, 1) \
- F(WasmReturnPromiseOnSuspend, 1, 1) \
- F(WasmSyncStackLimit, 1, 1)
+ F(WasmAllocateContinuation, 0, 1) \
+ F(WasmSyncStackLimit, 0, 1)
#define FOR_EACH_INTRINSIC_WASM_TEST(F, I) \
F(DeserializeWasmModule, 2, 1) \
diff --git a/deps/v8/src/security/caged-pointer-inl.h b/deps/v8/src/security/caged-pointer-inl.h
index 5c0959db25..93cd95a6bf 100644
--- a/deps/v8/src/security/caged-pointer-inl.h
+++ b/deps/v8/src/security/caged-pointer-inl.h
@@ -12,23 +12,27 @@
namespace v8 {
namespace internal {
+V8_INLINE Address ReadCagedPointerField(Address field_address,
+ PtrComprCageBase cage_base) {
#ifdef V8_CAGED_POINTERS
-
-V8_INLINE CagedPointer_t ReadCagedPointerField(Address field_address,
- PtrComprCageBase cage_base) {
// Caged pointers are currently only used if the sandbox is enabled.
DCHECK(V8_HEAP_SANDBOX_BOOL);
- Address caged_pointer = base::ReadUnalignedValue<Address>(field_address);
+ CagedPointer_t caged_pointer =
+ base::ReadUnalignedValue<CagedPointer_t>(field_address);
Address offset = caged_pointer >> kCagedPointerShift;
Address pointer = cage_base.address() + offset;
return pointer;
+#else
+ return base::ReadUnalignedValue<Address>(field_address);
+#endif
}
V8_INLINE void WriteCagedPointerField(Address field_address,
PtrComprCageBase cage_base,
- CagedPointer_t pointer) {
+ Address pointer) {
+#ifdef V8_CAGED_POINTERS
// Caged pointers are currently only used if the sandbox is enabled.
DCHECK(V8_HEAP_SANDBOX_BOOL);
@@ -36,12 +40,13 @@ V8_INLINE void WriteCagedPointerField(Address field_address,
DCHECK(GetProcessWideVirtualMemoryCage()->Contains(pointer));
Address offset = pointer - cage_base.address();
- Address caged_pointer = offset << kCagedPointerShift;
- base::WriteUnalignedValue<Address>(field_address, caged_pointer);
+ CagedPointer_t caged_pointer = offset << kCagedPointerShift;
+ base::WriteUnalignedValue<CagedPointer_t>(field_address, caged_pointer);
+#else
+ base::WriteUnalignedValue<Address>(field_address, pointer);
+#endif
}
-#endif // V8_CAGED_POINTERS
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/security/caged-pointer.h b/deps/v8/src/security/caged-pointer.h
index 5b15f63844..30c3b40db8 100644
--- a/deps/v8/src/security/caged-pointer.h
+++ b/deps/v8/src/security/caged-pointer.h
@@ -10,16 +10,12 @@
namespace v8 {
namespace internal {
-#ifdef V8_CAGED_POINTERS
-
-V8_INLINE CagedPointer_t ReadCagedPointerField(Address field_address,
- PtrComprCageBase cage_base);
+V8_INLINE Address ReadCagedPointerField(Address field_address,
+ PtrComprCageBase cage_base);
V8_INLINE void WriteCagedPointerField(Address field_address,
PtrComprCageBase cage_base,
- CagedPointer_t value);
-
-#endif // V8_CAGED_POINTERS
+ Address value);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/security/vm-cage.cc b/deps/v8/src/security/vm-cage.cc
index 38067bf86c..acd2d7c625 100644
--- a/deps/v8/src/security/vm-cage.cc
+++ b/deps/v8/src/security/vm-cage.cc
@@ -8,9 +8,13 @@
#include "src/base/bits.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/cpu.h"
+#include "src/base/emulated-virtual-address-subspace.h"
#include "src/base/lazy-instance.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/base/virtual-address-space-page-allocator.h"
+#include "src/base/virtual-address-space.h"
#include "src/flags/flags.h"
+#include "src/security/caged-pointer.h"
#include "src/utils/allocation.h"
#if defined(V8_OS_WIN)
@@ -24,159 +28,6 @@ namespace internal {
#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
-// A PageAllocator that allocates pages inside a given virtual address range
-// like the BoundedPageAllocator, except that only a (small) part of the range
-// has actually been reserved. As such, this allocator relies on page
-// allocation hints for the OS to obtain pages inside the non-reserved part.
-// This allocator is used on OSes where reserving virtual address space (and
-// thus a virtual memory cage) is too expensive, notabley Windows pre 8.1.
-class FakeBoundedPageAllocator : public v8::PageAllocator {
- public:
- FakeBoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
- size_t size, size_t reserved_size)
- : page_allocator_(page_allocator),
- start_(start),
- size_(size),
- reserved_size_(reserved_size),
- end_of_reserved_region_(start + reserved_size) {
- // The size is required to be a power of two so that obtaining a random
- // address inside the managed region simply requires a fixed number of
- // random bits as offset.
- DCHECK(base::bits::IsPowerOfTwo(size));
- DCHECK_LT(reserved_size, size);
-
- if (FLAG_random_seed != 0) {
- rng_.SetSeed(FLAG_random_seed);
- }
-
- reserved_region_page_allocator_ =
- std::make_unique<base::BoundedPageAllocator>(
- page_allocator_, start_, reserved_size_,
- page_allocator_->AllocatePageSize(),
- base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized);
- }
-
- ~FakeBoundedPageAllocator() override = default;
-
- size_t AllocatePageSize() override {
- return page_allocator_->AllocatePageSize();
- }
-
- size_t CommitPageSize() override { return page_allocator_->CommitPageSize(); }
-
- void SetRandomMmapSeed(int64_t seed) override { rng_.SetSeed(seed); }
-
- void* GetRandomMmapAddr() override {
- // Generate a random number between 0 and size_, then add that to the start
- // address to obtain a random mmap address. We deliberately don't use our
- // provided page allocator's GetRandomMmapAddr here since that could be
- // biased, while we want uniformly distributed random numbers here.
- Address addr = rng_.NextInt64() % size_ + start_;
- addr = RoundDown(addr, AllocatePageSize());
- void* ptr = reinterpret_cast<void*>(addr);
- DCHECK(Contains(ptr, 1));
- return ptr;
- }
-
- void* AllocatePages(void* hint, size_t size, size_t alignment,
- Permission access) override {
- DCHECK(IsAligned(size, AllocatePageSize()));
- DCHECK(IsAligned(alignment, AllocatePageSize()));
-
- // First, try allocating the memory inside the reserved region.
- void* ptr = reserved_region_page_allocator_->AllocatePages(
- hint, size, alignment, access);
- if (ptr) return ptr;
-
- // Then, fall back to allocating memory outside of the reserved region
- // through page allocator hints.
-
- // Somewhat arbitrary size limitation to ensure that the loop below for
- // finding a fitting base address hint terminates quickly.
- if (size >= size_ / 2) return nullptr;
-
- if (!hint || !Contains(hint, size)) hint = GetRandomMmapAddr();
-
- static constexpr int kMaxAttempts = 10;
- for (int i = 0; i < kMaxAttempts; i++) {
- // If the hint wouldn't result in the entire allocation being inside the
- // managed region, simply retry. There is at least a 50% chance of
- // getting a usable address due to the size restriction above.
- while (!Contains(hint, size)) {
- hint = GetRandomMmapAddr();
- }
-
- ptr = page_allocator_->AllocatePages(hint, size, alignment, access);
- if (ptr && Contains(ptr, size)) {
- return ptr;
- } else if (ptr) {
- page_allocator_->FreePages(ptr, size);
- }
-
- // Retry at a different address.
- hint = GetRandomMmapAddr();
- }
-
- return nullptr;
- }
-
- bool FreePages(void* address, size_t size) override {
- return AllocatorFor(address)->FreePages(address, size);
- }
-
- bool ReleasePages(void* address, size_t size, size_t new_length) override {
- return AllocatorFor(address)->ReleasePages(address, size, new_length);
- }
-
- bool SetPermissions(void* address, size_t size,
- Permission permissions) override {
- return AllocatorFor(address)->SetPermissions(address, size, permissions);
- }
-
- bool DiscardSystemPages(void* address, size_t size) override {
- return AllocatorFor(address)->DiscardSystemPages(address, size);
- }
-
- bool DecommitPages(void* address, size_t size) override {
- return AllocatorFor(address)->DecommitPages(address, size);
- }
-
- private:
- bool Contains(void* ptr, size_t length) {
- Address addr = reinterpret_cast<Address>(ptr);
- return (addr >= start_) && ((addr + length) < (start_ + size_));
- }
-
- v8::PageAllocator* AllocatorFor(void* ptr) {
- Address addr = reinterpret_cast<Address>(ptr);
- if (addr < end_of_reserved_region_) {
- DCHECK_GE(addr, start_);
- return reserved_region_page_allocator_.get();
- } else {
- return page_allocator_;
- }
- }
-
- // The page allocator through which pages inside the region are allocated.
- v8::PageAllocator* const page_allocator_;
- // The bounded page allocator managing the sub-region that was actually
- // reserved.
- std::unique_ptr<base::BoundedPageAllocator> reserved_region_page_allocator_;
-
- // Random number generator for generating random addresses.
- base::RandomNumberGenerator rng_;
-
- // The start of the virtual memory region in which to allocate pages. This is
- // also the start of the sub-region that was reserved.
- const Address start_;
- // The total size of the address space in which to allocate pages.
- const size_t size_;
- // The size of the sub-region that has actually been reserved.
- const size_t reserved_size_;
- // The end of the sub-region that has actually been reserved.
- const Address end_of_reserved_region_;
-};
-
// Best-effort helper function to determine the size of the userspace virtual
// address space. Used to determine appropriate cage size and placement.
static Address DetermineAddressSpaceLimit() {
@@ -230,7 +81,7 @@ static Address DetermineAddressSpaceLimit() {
return address_space_limit;
}
-bool V8VirtualMemoryCage::Initialize(PageAllocator* page_allocator) {
+bool V8VirtualMemoryCage::Initialize(v8::VirtualAddressSpace* vas) {
// Take the number of virtual address bits into account when determining the
// size of the cage. For example, if there are only 39 bits available, split
// evenly between userspace and kernel, then userspace can only address 256GB
@@ -267,25 +118,39 @@ bool V8VirtualMemoryCage::Initialize(PageAllocator* page_allocator) {
}
#endif // V8_OS_WIN
+ if (!vas->CanAllocateSubspaces()) {
+ // If we cannot create virtual memory subspaces, we also need to fall back
+ // to creating a fake cage. In practice, this should only happen on Windows
+ // version before Windows 10, maybe including early Windows 10 releases,
+ // where the necessary memory management APIs, in particular, VirtualAlloc2,
+ // are not available. This check should also in practice subsume the
+ // preceeding one for Windows 8 and earlier, but we'll keep both just to be
+ // sure since there the fake cage is technically required for a different
+ // reason (large virtual memory reservations being too expensive).
+ size_to_reserve = kFakeVirtualMemoryCageMinReservationSize;
+ create_fake_cage = true;
+ }
+
// In any case, the (fake) cage must be at most as large as our address space.
DCHECK_LE(cage_size, address_space_limit);
if (create_fake_cage) {
- return InitializeAsFakeCage(page_allocator, cage_size, size_to_reserve);
+ return InitializeAsFakeCage(vas, cage_size, size_to_reserve);
} else {
// TODO(saelo) if this fails, we could still fall back to creating a fake
// cage. Decide if that would make sense.
const bool use_guard_regions = true;
- return Initialize(page_allocator, cage_size, use_guard_regions);
+ return Initialize(vas, cage_size, use_guard_regions);
}
}
-bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
- size_t size, bool use_guard_regions) {
+bool V8VirtualMemoryCage::Initialize(v8::VirtualAddressSpace* vas, size_t size,
+ bool use_guard_regions) {
CHECK(!initialized_);
CHECK(!disabled_);
CHECK(base::bits::IsPowerOfTwo(size));
CHECK_GE(size, kVirtualMemoryCageMinimumSize);
+ CHECK(vas->CanAllocateSubspaces());
// Currently, we allow the cage to be smaller than the requested size. This
// way, we can gracefully handle cage reservation failures during the initial
@@ -297,52 +162,64 @@ bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
// Which of these options is ultimately taken likey depends on how frequently
// cage reservation failures occur in practice.
size_t reservation_size;
- while (!reservation_base_ && size >= kVirtualMemoryCageMinimumSize) {
+ while (!virtual_address_space_ && size >= kVirtualMemoryCageMinimumSize) {
reservation_size = size;
if (use_guard_regions) {
reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
}
- // Technically, we should use kNoAccessWillJitLater here instead since the
- // cage will contain JIT pages. However, currently this is not required as
- // PA anyway uses MAP_JIT for V8 mappings. Further, we want to eventually
- // move JIT pages out of the cage, at which point we'd like to forbid
- // making pages inside the cage executable, and so don't want MAP_JIT.
- Address hint = RoundDown(
- reinterpret_cast<Address>(page_allocator->GetRandomMmapAddr()),
- kVirtualMemoryCageAlignment);
- reservation_base_ = reinterpret_cast<Address>(page_allocator->AllocatePages(
- reinterpret_cast<void*>(hint), reservation_size,
- kVirtualMemoryCageAlignment, PageAllocator::kNoAccess));
- if (!reservation_base_) {
+ Address hint =
+ RoundDown(vas->RandomPageAddress(), kVirtualMemoryCageAlignment);
+
+ // Currently, executable memory is still allocated inside the cage. In the
+ // future, we should drop that and use kReadWrite as max_permissions.
+ virtual_address_space_ = vas->AllocateSubspace(
+ hint, reservation_size, kVirtualMemoryCageAlignment,
+ PagePermissions::kReadWriteExecute);
+ if (!virtual_address_space_) {
size /= 2;
}
}
- if (!reservation_base_) return false;
+ if (!virtual_address_space_) return false;
+ reservation_base_ = virtual_address_space_->base();
base_ = reservation_base_;
if (use_guard_regions) {
base_ += kVirtualMemoryCageGuardRegionSize;
}
- page_allocator_ = page_allocator;
size_ = size;
end_ = base_ + size_;
reservation_size_ = reservation_size;
- cage_page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
- page_allocator_, base_, size_, page_allocator_->AllocatePageSize(),
- base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized);
+ if (use_guard_regions) {
+ // These must succeed since nothing was allocated in the subspace yet.
+ CHECK_EQ(reservation_base_,
+ virtual_address_space_->AllocatePages(
+ reservation_base_, kVirtualMemoryCageGuardRegionSize,
+ vas->allocation_granularity(), PagePermissions::kNoAccess));
+ CHECK_EQ(end_,
+ virtual_address_space_->AllocatePages(
+ end_, kVirtualMemoryCageGuardRegionSize,
+ vas->allocation_granularity(), PagePermissions::kNoAccess));
+ }
+
+ cage_page_allocator_ =
+ std::make_unique<base::VirtualAddressSpacePageAllocator>(
+ virtual_address_space_.get());
initialized_ = true;
is_fake_cage_ = false;
+ InitializeConstants();
+
return true;
}
-bool V8VirtualMemoryCage::InitializeAsFakeCage(
- v8::PageAllocator* page_allocator, size_t size, size_t size_to_reserve) {
+bool V8VirtualMemoryCage::InitializeAsFakeCage(v8::VirtualAddressSpace* vas,
+ size_t size,
+ size_t size_to_reserve) {
CHECK(!initialized_);
CHECK(!disabled_);
CHECK(base::bits::IsPowerOfTwo(size));
@@ -353,7 +230,7 @@ bool V8VirtualMemoryCage::InitializeAsFakeCage(
// Use a custom random number generator here to ensure that we get uniformly
// distributed random numbers. We figure out the available address space
// ourselves, and so are potentially better positioned to determine a good
- // base address for the cage than the embedder-provided GetRandomMmapAddr().
+ // base address for the cage than the embedder.
base::RandomNumberGenerator rng;
if (FLAG_random_seed != 0) {
rng.SetSeed(FLAG_random_seed);
@@ -372,9 +249,9 @@ bool V8VirtualMemoryCage::InitializeAsFakeCage(
Address hint = rng.NextInt64() % highest_allowed_address;
hint = RoundDown(hint, kVirtualMemoryCageAlignment);
- reservation_base_ = reinterpret_cast<Address>(page_allocator->AllocatePages(
- reinterpret_cast<void*>(hint), size_to_reserve,
- kVirtualMemoryCageAlignment, PageAllocator::kNoAccess));
+ reservation_base_ =
+ vas->AllocatePages(hint, size_to_reserve, kVirtualMemoryCageAlignment,
+ PagePermissions::kNoAccess);
if (!reservation_base_) return false;
@@ -384,8 +261,7 @@ bool V8VirtualMemoryCage::InitializeAsFakeCage(
break;
// Can't use this base, so free the reservation and try again
- page_allocator->FreePages(reinterpret_cast<void*>(reservation_base_),
- size_to_reserve);
+ CHECK(vas->FreePages(reservation_base_, size_to_reserve));
reservation_base_ = kNullAddress;
}
DCHECK(reservation_base_);
@@ -396,18 +272,31 @@ bool V8VirtualMemoryCage::InitializeAsFakeCage(
reservation_size_ = size_to_reserve;
initialized_ = true;
is_fake_cage_ = true;
- page_allocator_ = page_allocator;
- cage_page_allocator_ = std::make_unique<FakeBoundedPageAllocator>(
- page_allocator_, base_, size_, reservation_size_);
+ virtual_address_space_ =
+ std::make_unique<base::EmulatedVirtualAddressSubspace>(
+ vas, reservation_base_, reservation_size_, size_);
+ cage_page_allocator_ =
+ std::make_unique<base::VirtualAddressSpacePageAllocator>(
+ virtual_address_space_.get());
+
+ InitializeConstants();
return true;
}
+void V8VirtualMemoryCage::InitializeConstants() {
+#ifdef V8_CAGED_POINTERS
+ // Place the empty backing store buffer at the end of the cage, so that any
+ // accidental access to it will most likely hit a guard page.
+ constants_.set_empty_backing_store_buffer(base_ + size_ - 1);
+#endif
+}
+
void V8VirtualMemoryCage::TearDown() {
if (initialized_) {
+ // This destroys the sub space and frees the underlying reservation.
+ virtual_address_space_.reset();
cage_page_allocator_.reset();
- CHECK(page_allocator_->FreePages(reinterpret_cast<void*>(reservation_base_),
- reservation_size_));
base_ = kNullAddress;
end_ = kNullAddress;
size_ = 0;
@@ -415,7 +304,9 @@ void V8VirtualMemoryCage::TearDown() {
reservation_size_ = 0;
initialized_ = false;
is_fake_cage_ = false;
- page_allocator_ = nullptr;
+#ifdef V8_CAGED_POINTERS
+ constants_.Reset();
+#endif
}
disabled_ = false;
}
diff --git a/deps/v8/src/security/vm-cage.h b/deps/v8/src/security/vm-cage.h
index 26aa2c8f37..b3f54d9bd1 100644
--- a/deps/v8/src/security/vm-cage.h
+++ b/deps/v8/src/security/vm-cage.h
@@ -11,8 +11,6 @@
namespace v8 {
-class PageAllocator;
-
namespace internal {
#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
@@ -59,7 +57,7 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
V8VirtualMemoryCage(const V8VirtualMemoryCage&) = delete;
V8VirtualMemoryCage& operator=(V8VirtualMemoryCage&) = delete;
- bool Initialize(v8::PageAllocator* page_allocator);
+ bool Initialize(v8::VirtualAddressSpace* vas);
void Disable() {
CHECK(!initialized_);
disabled_ = true;
@@ -84,6 +82,10 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
return cage_page_allocator_.get();
}
+ v8::VirtualAddressSpace* virtual_address_space() const {
+ return virtual_address_space_.get();
+ }
+
bool Contains(Address addr) const {
return addr >= base_ && addr < base_ + size_;
}
@@ -92,6 +94,27 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
return Contains(reinterpret_cast<Address>(ptr));
}
+#ifdef V8_CAGED_POINTERS
+ class CagedPointerConstants final {
+ public:
+ Address empty_backing_store_buffer() const {
+ return empty_backing_store_buffer_;
+ }
+ Address empty_backing_store_buffer_address() const {
+ return reinterpret_cast<Address>(&empty_backing_store_buffer_);
+ }
+ void set_empty_backing_store_buffer(Address value) {
+ empty_backing_store_buffer_ = value;
+ }
+
+ void Reset() { empty_backing_store_buffer_ = 0; }
+
+ private:
+ Address empty_backing_store_buffer_ = 0;
+ };
+ const CagedPointerConstants& constants() const { return constants_; }
+#endif
+
private:
// The SequentialUnmapperTest calls the private Initialize method to create a
// cage without guard regions, which would otherwise consume too much memory.
@@ -105,15 +128,24 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
// We allow tests to disable the guard regions around the cage. This is useful
// for example for tests like the SequentialUnmapperTest which track page
// allocations and so would incur a large overhead from the guard regions.
- bool Initialize(v8::PageAllocator* page_allocator, size_t size,
+ // The provided virtual address space must be able to allocate subspaces.
+ // The size must be a multiple of the allocation granularity of the virtual
+ // memory space.
+ bool Initialize(v8::VirtualAddressSpace* vas, size_t size,
bool use_guard_regions);
// Used on OSes where reserving virtual memory is too expensive. A fake cage
// does not reserve all of the virtual memory and so doesn't have the desired
// security properties.
- bool InitializeAsFakeCage(v8::PageAllocator* page_allocator, size_t size,
+ // The size and size_to_reserve parameters must be multiples of the
+ // allocation granularity of the virtual address space.
+ bool InitializeAsFakeCage(v8::VirtualAddressSpace* vas, size_t size,
size_t size_to_reserve);
+ // Initialize the caged pointer constants for this cage. Called by the
+ // Initialize methods above.
+ void InitializeConstants();
+
Address base_ = kNullAddress;
Address end_ = kNullAddress;
size_t size_ = 0;
@@ -128,10 +160,16 @@ class V8_EXPORT_PRIVATE V8VirtualMemoryCage {
bool disabled_ = false;
bool is_fake_cage_ = false;
- // The allocator through which the virtual memory of the cage was allocated.
- v8::PageAllocator* page_allocator_ = nullptr;
- // The allocator to allocate pages inside the cage.
+ // The virtual address subspace backing the cage.
+ std::unique_ptr<v8::VirtualAddressSpace> virtual_address_space_;
+
+ // The page allocator instance for this cage.
std::unique_ptr<v8::PageAllocator> cage_page_allocator_;
+
+#ifdef V8_CAGED_POINTERS
+ // CagedPointer constants inside this cage.
+ CagedPointerConstants constants_;
+#endif
};
#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE
@@ -151,6 +189,16 @@ V8_INLINE bool IsValidBackingStorePointer(void* ptr) {
#endif
}
+V8_INLINE void* EmptyBackingStoreBuffer() {
+#ifdef V8_CAGED_POINTERS
+ return reinterpret_cast<void*>(GetProcessWideVirtualMemoryCage()
+ ->constants()
+ .empty_backing_store_buffer());
+#else
+ return nullptr;
+#endif
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 8b519b6921..0a05c74d67 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -139,7 +139,7 @@ void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
if (SerializeReadOnlyObject(obj)) return;
- CHECK(!obj->IsCode());
+ CHECK(!obj->IsCode(cage_base()));
ReadOnlyRoots roots(isolate());
if (ElideObject(*obj)) {
@@ -201,6 +201,25 @@ void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
return;
}
+ if (obj->IsUncompiledDataWithoutPreparseDataWithJob()) {
+ Handle<UncompiledDataWithoutPreparseDataWithJob> data =
+ Handle<UncompiledDataWithoutPreparseDataWithJob>::cast(obj);
+ Address job = data->job();
+ data->set_job(kNullAddress);
+ SerializeGeneric(data);
+ data->set_job(job);
+ return;
+ }
+ if (obj->IsUncompiledDataWithPreparseDataAndJob()) {
+ Handle<UncompiledDataWithPreparseDataAndJob> data =
+ Handle<UncompiledDataWithPreparseDataAndJob>::cast(obj);
+ Address job = data->job();
+ data->set_job(kNullAddress);
+ SerializeGeneric(data);
+ data->set_job(job);
+ return;
+ }
+
// NOTE(mmarchini): If we try to serialize an InterpreterData our process
// will crash since it stores a code object. Instead, we serialize the
// bytecode array stored within the InterpreterData, which is the important
@@ -218,7 +237,8 @@ void CodeSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
// There should be no references to the global object embedded.
CHECK(!obj->IsJSGlobalProxy() && !obj->IsJSGlobalObject());
// Embedded FixedArrays that need rehashing must support rehashing.
- CHECK_IMPLIES(obj->NeedsRehashing(), obj->CanBeRehashed());
+ CHECK_IMPLIES(obj->NeedsRehashing(cage_base()),
+ obj->CanBeRehashed(cage_base()));
// We expect no instantiated function objects or contexts.
CHECK(!obj->IsJSFunction() && !obj->IsContext());
@@ -259,7 +279,7 @@ void CreateInterpreterDataForDeserializedCode(Isolate* isolate,
INTERPRETER_DATA_TYPE, AllocationType::kOld));
interpreter_data->set_bytecode_array(info->GetBytecodeArray(isolate));
- interpreter_data->set_interpreter_trampoline(*code);
+ interpreter_data->set_interpreter_trampoline(ToCodeT(*code));
info->set_interpreter_data(*interpreter_data);
@@ -557,8 +577,9 @@ SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
// Copy serialized data.
CopyBytes(data_ + kHeaderSize, payload->data(),
static_cast<size_t>(payload->size()));
-
- SetHeaderValue(kChecksumOffset, Checksum(ChecksummedContent()));
+ uint32_t checksum =
+ FLAG_verify_snapshot_checksum ? Checksum(ChecksummedContent()) : 0;
+ SetHeaderValue(kChecksumOffset, checksum);
}
SerializedCodeSanityCheckResult SerializedCodeData::SanityCheck(
@@ -587,21 +608,23 @@ SerializedCodeSanityCheckResult SerializedCodeData::SanityCheckWithoutSource()
return SerializedCodeSanityCheckResult::kMagicNumberMismatch;
}
uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
- uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
- uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
- uint32_t c = GetHeaderValue(kChecksumOffset);
if (version_hash != Version::Hash()) {
return SerializedCodeSanityCheckResult::kVersionMismatch;
}
+ uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
if (flags_hash != FlagList::Hash()) {
return SerializedCodeSanityCheckResult::kFlagsMismatch;
}
+ uint32_t payload_length = GetHeaderValue(kPayloadLengthOffset);
uint32_t max_payload_length = this->size_ - kHeaderSize;
if (payload_length > max_payload_length) {
return SerializedCodeSanityCheckResult::kLengthMismatch;
}
- if (Checksum(ChecksummedContent()) != c) {
- return SerializedCodeSanityCheckResult::kChecksumMismatch;
+ if (FLAG_verify_snapshot_checksum) {
+ uint32_t checksum = GetHeaderValue(kChecksumOffset);
+ if (Checksum(ChecksummedContent()) != checksum) {
+ return SerializedCodeSanityCheckResult::kChecksumMismatch;
+ }
}
return SerializedCodeSanityCheckResult::kSuccess;
}
diff --git a/deps/v8/src/snapshot/context-serializer.cc b/deps/v8/src/snapshot/context-serializer.cc
index 780446e1d8..4aba58f660 100644
--- a/deps/v8/src/snapshot/context-serializer.cc
+++ b/deps/v8/src/snapshot/context-serializer.cc
@@ -225,7 +225,7 @@ bool ContextSerializer::SerializeJSObjectWithEmbedderFields(
int embedder_fields_count = js_obj->GetEmbedderFieldCount();
if (embedder_fields_count == 0) return false;
CHECK_GT(embedder_fields_count, 0);
- DCHECK(!js_obj->NeedsRehashing());
+ DCHECK(!js_obj->NeedsRehashing(cage_base()));
DisallowGarbageCollection no_gc;
DisallowJavascriptExecution no_js(isolate());
@@ -310,8 +310,8 @@ bool ContextSerializer::SerializeJSObjectWithEmbedderFields(
void ContextSerializer::CheckRehashability(HeapObject obj) {
if (!can_be_rehashed_) return;
- if (!obj.NeedsRehashing()) return;
- if (obj.CanBeRehashed()) return;
+ if (!obj.NeedsRehashing(cage_base())) return;
+ if (obj.CanBeRehashed(cage_base())) return;
can_be_rehashed_ = false;
}
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 8d75b3696b..fb3c41888e 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -249,9 +249,9 @@ Deserializer<IsolateT>::Deserializer(IsolateT* isolate,
isolate->RegisterDeserializerStarted();
// We start the indices here at 1, so that we can distinguish between an
- // actual index and a nullptr (serialized as kNullRefSentinel) in a
- // deserialized object requiring fix-up.
- STATIC_ASSERT(kNullRefSentinel == 0);
+ // actual index and an empty backing store (serialized as
+ // kEmptyBackingStoreRefSentinel) in a deserialized object requiring fix-up.
+ STATIC_ASSERT(kEmptyBackingStoreRefSentinel == 0);
backing_stores_.push_back({});
#ifdef DEBUG
@@ -395,7 +395,7 @@ template <typename IsolateT>
void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
Handle<HeapObject> obj,
SnapshotSpace space) {
- DCHECK_EQ(*map, obj->map());
+ DCHECK_EQ(*map, obj->map(isolate_));
DisallowGarbageCollection no_gc;
InstanceType instance_type = map->instance_type();
@@ -431,6 +431,7 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
isolate()->string_table()->LookupKey(isolate(), &key);
if (*result != *string) {
+ DCHECK(!string->IsShared());
string->MakeThin(isolate(), *result);
// Mutate the given object handle so that the backreference entry is
// also updated.
@@ -486,9 +487,9 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
} else if (InstanceTypeChecker::IsJSDataView(instance_type)) {
Handle<JSDataView> data_view = Handle<JSDataView>::cast(obj);
JSArrayBuffer buffer = JSArrayBuffer::cast(data_view->buffer());
- void* backing_store = nullptr;
+ void* backing_store = EmptyBackingStoreBuffer();
uint32_t store_index = buffer.GetBackingStoreRefForDeserialization();
- if (store_index != kNullRefSentinel) {
+ if (store_index != kEmptyBackingStoreRefSentinel) {
// The backing store of the JSArrayBuffer has not been correctly restored
// yet, as that may trigger GC. The backing_store field currently contains
// a numbered reference to an already deserialized backing store.
@@ -501,28 +502,27 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(obj);
// Fixup typed array pointers.
if (typed_array->is_on_heap()) {
- Address raw_external_pointer = typed_array->external_pointer_raw();
- typed_array->SetOnHeapDataPtr(
- main_thread_isolate(), HeapObject::cast(typed_array->base_pointer()),
- raw_external_pointer);
+ typed_array->AddExternalPointerCompensationForDeserialization(
+ main_thread_isolate());
} else {
// Serializer writes backing store ref as a DataPtr() value.
uint32_t store_index =
typed_array->GetExternalBackingStoreRefForDeserialization();
auto backing_store = backing_stores_[store_index];
- auto start = backing_store
- ? reinterpret_cast<byte*>(backing_store->buffer_start())
- : nullptr;
+ void* start = backing_store ? backing_store->buffer_start()
+ : EmptyBackingStoreBuffer();
typed_array->SetOffHeapDataPtr(main_thread_isolate(), start,
typed_array->byte_offset());
}
} else if (InstanceTypeChecker::IsJSArrayBuffer(instance_type)) {
Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(obj);
// Postpone allocation of backing store to avoid triggering the GC.
- if (buffer->GetBackingStoreRefForDeserialization() != kNullRefSentinel) {
+ if (buffer->GetBackingStoreRefForDeserialization() !=
+ kEmptyBackingStoreRefSentinel) {
new_off_heap_array_buffers_.push_back(buffer);
} else {
- buffer->set_backing_store(nullptr);
+ buffer->set_backing_store(main_thread_isolate(),
+ EmptyBackingStoreBuffer());
}
} else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
// TODO(mythria): Remove these once we store the default values for these
@@ -697,7 +697,7 @@ Handle<HeapObject> Deserializer<IsolateT>::ReadMetaMap() {
const int size_in_tagged = size_in_bytes / kTaggedSize;
HeapObject raw_obj =
- Allocate(SpaceToAllocation(space), size_in_bytes, kWordAligned);
+ Allocate(SpaceToAllocation(space), size_in_bytes, kTaggedAligned);
raw_obj.set_map_after_allocation(Map::unchecked_cast(raw_obj));
MemsetTagged(raw_obj.RawField(kTaggedSize),
Smi::uninitialized_deserialization_value(), size_in_tagged - 1);
@@ -1260,7 +1260,7 @@ HeapObject Deserializer<IsolateT>::Allocate(AllocationType allocation, int size,
if (!previous_allocation_obj_.is_null()) {
// Make sure that the previous object is initialized sufficiently to
// be iterated over by the GC.
- int object_size = previous_allocation_obj_->Size();
+ int object_size = previous_allocation_obj_->Size(isolate_);
DCHECK_LE(object_size, previous_allocation_size_);
}
#endif
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 53036951e0..9498925f17 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -49,8 +49,6 @@ class Deserializer : public SerializerDeserializer {
Deserializer(const Deserializer&) = delete;
Deserializer& operator=(const Deserializer&) = delete;
- uint32_t GetChecksum() const { return source_.GetChecksum(); }
-
protected:
// Create a deserializer from a snapshot byte source.
Deserializer(IsolateT* isolate, base::Vector<const byte> payload,
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.cc b/deps/v8/src/snapshot/embedded/embedded-data.cc
index 188ed6e879..3b3653676a 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-data.cc
@@ -49,7 +49,7 @@ Builtin TryLookupCode(const EmbeddedData& d, Address address) {
} // namespace
// static
-bool InstructionStream::PcIsOffHeap(Isolate* isolate, Address pc) {
+bool OffHeapInstructionStream::PcIsOffHeap(Isolate* isolate, Address pc) {
// Mksnapshot calls this while the embedded blob is not available yet.
if (isolate->embedded_blob_code() == nullptr) return false;
DCHECK_NOT_NULL(Isolate::CurrentEmbeddedBlobCode());
@@ -60,9 +60,8 @@ bool InstructionStream::PcIsOffHeap(Isolate* isolate, Address pc) {
}
// static
-bool InstructionStream::TryGetAddressForHashing(Isolate* isolate,
- Address address,
- uint32_t* hashable_address) {
+bool OffHeapInstructionStream::TryGetAddressForHashing(
+ Isolate* isolate, Address address, uint32_t* hashable_address) {
// Mksnapshot calls this while the embedded blob is not available yet.
if (isolate->embedded_blob_code() == nullptr) return false;
DCHECK_NOT_NULL(Isolate::CurrentEmbeddedBlobCode());
@@ -84,7 +83,8 @@ bool InstructionStream::TryGetAddressForHashing(Isolate* isolate,
}
// static
-Builtin InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
+Builtin OffHeapInstructionStream::TryLookupCode(Isolate* isolate,
+ Address address) {
// Mksnapshot calls this while the embedded blob is not available yet.
if (isolate->embedded_blob_code() == nullptr) return Builtin::kNoBuiltinId;
DCHECK_NOT_NULL(Isolate::CurrentEmbeddedBlobCode());
@@ -95,15 +95,27 @@ Builtin InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
!Builtins::IsBuiltinId(builtin)) {
builtin = i::TryLookupCode(EmbeddedData::FromBlob(), address);
}
+
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ if (V8_SHORT_BUILTIN_CALLS_BOOL && !Builtins::IsBuiltinId(builtin)) {
+ // When shared pointer compression cage is enabled and it has the embedded
+ // code blob copy then it could have been used regardless of whether the
+ // isolate uses it or knows about it or not (see
+ // Code::OffHeapInstructionStart()).
+ // So, this blob has to be checked too.
+ CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get();
+ if (code_range && code_range->embedded_blob_code_copy() != nullptr) {
+ builtin = i::TryLookupCode(EmbeddedData::FromBlob(code_range), address);
+ }
+ }
+#endif
return builtin;
}
// static
-void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
- uint8_t** code,
- uint32_t* code_size,
- uint8_t** data,
- uint32_t* data_size) {
+void OffHeapInstructionStream::CreateOffHeapOffHeapInstructionStream(
+ Isolate* isolate, uint8_t** code, uint32_t* code_size, uint8_t** data,
+ uint32_t* data_size) {
// Create the embedded blob from scratch using the current Isolate's heap.
EmbeddedData d = EmbeddedData::FromIsolate(isolate);
@@ -158,10 +170,8 @@ void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
}
// static
-void InstructionStream::FreeOffHeapInstructionStream(uint8_t* code,
- uint32_t code_size,
- uint8_t* data,
- uint32_t data_size) {
+void OffHeapInstructionStream::FreeOffHeapOffHeapInstructionStream(
+ uint8_t* code, uint32_t code_size, uint8_t* data, uint32_t data_size) {
v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
const uint32_t page_size =
static_cast<uint32_t>(page_allocator->AllocatePageSize());
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.h b/deps/v8/src/snapshot/embedded/embedded-data.h
index 9ccacea24c..ba090062b3 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.h
+++ b/deps/v8/src/snapshot/embedded/embedded-data.h
@@ -19,7 +19,7 @@ class Isolate;
// Wraps an off-heap instruction stream.
// TODO(jgruber,v8:6666): Remove this class.
-class InstructionStream final : public AllStatic {
+class OffHeapInstructionStream final : public AllStatic {
public:
// Returns true, iff the given pc points into an off-heap instruction stream.
static bool PcIsOffHeap(Isolate* isolate, Address pc);
@@ -38,12 +38,15 @@ class InstructionStream final : public AllStatic {
// containing all off-heap code. The area is guaranteed to be contiguous.
// Note that this only applies when building the snapshot, e.g. for
// mksnapshot. Otherwise, off-heap code is embedded directly into the binary.
- static void CreateOffHeapInstructionStream(Isolate* isolate, uint8_t** code,
- uint32_t* code_size,
- uint8_t** data,
- uint32_t* data_size);
- static void FreeOffHeapInstructionStream(uint8_t* code, uint32_t code_size,
- uint8_t* data, uint32_t data_size);
+ static void CreateOffHeapOffHeapInstructionStream(Isolate* isolate,
+ uint8_t** code,
+ uint32_t* code_size,
+ uint8_t** data,
+ uint32_t* data_size);
+ static void FreeOffHeapOffHeapInstructionStream(uint8_t* code,
+ uint32_t code_size,
+ uint8_t* data,
+ uint32_t data_size);
};
class EmbeddedData final {
@@ -98,6 +101,22 @@ class EmbeddedData final {
// the un-embedded one.
if (global_d.IsInCodeRange(maybe_builtin_pc)) return global_d;
}
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ if (V8_SHORT_BUILTIN_CALLS_BOOL && !d.IsInCodeRange(maybe_builtin_pc)) {
+ // When shared pointer compression cage is enabled and it has the embedded
+ // code blob copy then it could have been used regardless of whether the
+ // isolate uses it or knows about it or not (see
+ // Code::OffHeapInstructionStart()).
+ // So, this blob has to be checked too.
+ CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get();
+ if (code_range && code_range->embedded_blob_code_copy() != nullptr) {
+ EmbeddedData remapped_d = EmbeddedData::FromBlob(code_range);
+ // If the pc does not belong to the embedded code blob we should be
+ // using the un-embedded one.
+ if (remapped_d.IsInCodeRange(maybe_builtin_pc)) return remapped_d;
+ }
+ }
+#endif
return d;
}
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index 86b0304fb0..5687172e60 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -308,6 +308,6 @@ int main(int argc, char** argv) {
i::FreeCurrentEmbeddedBlob();
v8::V8::Dispose();
- v8::V8::ShutdownPlatform();
+ v8::V8::DisposePlatform();
return 0;
}
diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc
index 7376050dd0..029f24c300 100644
--- a/deps/v8/src/snapshot/read-only-serializer.cc
+++ b/deps/v8/src/snapshot/read-only-serializer.cc
@@ -114,7 +114,7 @@ bool ReadOnlySerializer::MustBeDeferred(HeapObject object) {
}
// Defer objects with special alignment requirements until the filler roots
// are serialized.
- return HeapObject::RequiredAlignment(object.map()) != kWordAligned;
+ return HeapObject::RequiredAlignment(object.map()) != kTaggedAligned;
}
bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache(
diff --git a/deps/v8/src/snapshot/roots-serializer.cc b/deps/v8/src/snapshot/roots-serializer.cc
index 7e459ee811..3b9a7b84ba 100644
--- a/deps/v8/src/snapshot/roots-serializer.cc
+++ b/deps/v8/src/snapshot/roots-serializer.cc
@@ -60,8 +60,8 @@ void RootsSerializer::VisitRootPointers(Root root, const char* description,
void RootsSerializer::CheckRehashability(HeapObject obj) {
if (!can_be_rehashed_) return;
- if (!obj.NeedsRehashing()) return;
- if (obj.CanBeRehashed()) return;
+ if (!obj.NeedsRehashing(cage_base())) return;
+ if (obj.CanBeRehashed(cage_base())) return;
can_be_rehashed_ = false;
}
diff --git a/deps/v8/src/snapshot/serializer-deserializer.h b/deps/v8/src/snapshot/serializer-deserializer.h
index 601edd8981..5bc23cc7bf 100644
--- a/deps/v8/src/snapshot/serializer-deserializer.h
+++ b/deps/v8/src/snapshot/serializer-deserializer.h
@@ -259,9 +259,9 @@ class SerializerDeserializer : public RootVisitor {
RootIndex>;
using HotObject = BytecodeValueEncoder<kHotObject, 0, kHotObjectCount - 1>;
- // This backing store reference value represents nullptr values during
+ // This backing store reference value represents empty backing stores during
// serialization/deserialization.
- static const uint32_t kNullRefSentinel = 0;
+ static const uint32_t kEmptyBackingStoreRefSentinel = 0;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 460e088036..2ae6fc17b1 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -26,6 +26,9 @@ namespace internal {
Serializer::Serializer(Isolate* isolate, Snapshot::SerializerFlags flags)
: isolate_(isolate),
+#if V8_COMPRESS_POINTERS
+ cage_base_(isolate),
+#endif // V8_COMPRESS_POINTERS
hot_objects_(isolate->heap()),
reference_map_(isolate),
external_reference_encoder_(isolate),
@@ -528,21 +531,21 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
ArrayBufferExtension* extension = buffer->extension();
- // The embedder-allocated backing store only exists for the off-heap case.
- if (backing_store != nullptr) {
+ // Only serialize non-empty backing stores.
+ if (buffer->IsEmpty()) {
+ buffer->SetBackingStoreRefForSerialization(kEmptyBackingStoreRefSentinel);
+ } else {
uint32_t ref = SerializeBackingStore(backing_store, byte_length);
buffer->SetBackingStoreRefForSerialization(ref);
// Ensure deterministic output by setting extension to null during
// serialization.
buffer->set_extension(nullptr);
- } else {
- buffer->SetBackingStoreRefForSerialization(kNullRefSentinel);
}
SerializeObject();
- buffer->set_backing_store(backing_store);
+ buffer->set_backing_store(isolate(), backing_store);
buffer->set_extension(extension);
}
@@ -764,8 +767,8 @@ SnapshotSpace GetSnapshotSpace(Handle<HeapObject> object) {
} // namespace
void Serializer::ObjectSerializer::SerializeObject() {
- int size = object_->Size();
- Map map = object_->map();
+ Map map = object_->map(serializer_->cage_base());
+ int size = object_->SizeFromMap(map);
// Descriptor arrays have complex element weakness, that is dependent on the
// maps pointing to them. During deserialization, this can cause them to get
@@ -900,7 +903,7 @@ void Serializer::ObjectSerializer::VisitCodePointer(HeapObject host,
HandleScope scope(isolate());
DisallowGarbageCollection no_gc;
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
PtrComprCageBase code_cage_base(isolate()->code_cage_base());
#else
PtrComprCageBase code_cage_base(isolate());
@@ -1046,7 +1049,7 @@ void Serializer::ObjectSerializer::VisitOffHeapTarget(Code host,
Address addr = rinfo->target_off_heap_target();
CHECK_NE(kNullAddress, addr);
- Builtin builtin = InstructionStream::TryLookupCode(isolate(), addr);
+ Builtin builtin = OffHeapInstructionStream::TryLookupCode(isolate(), addr);
CHECK(Builtins::IsBuiltinId(builtin));
CHECK(Builtins::IsIsolateIndependent(builtin));
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index de1ed7bfd4..b049af5776 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -152,7 +152,7 @@ class ObjectCacheIndexMap {
return find_result.already_exists;
}
- bool Lookup(Handle<HeapObject> obj, int* index_out) const {
+ bool Lookup(HeapObject obj, int* index_out) const {
int* index = map_.Find(obj);
if (index == nullptr) {
return false;
@@ -183,6 +183,16 @@ class Serializer : public SerializerDeserializer {
Isolate* isolate() const { return isolate_; }
+ // The pointer compression cage base value used for decompression of all
+ // tagged values except references to Code objects.
+ PtrComprCageBase cage_base() const {
+#if V8_COMPRESS_POINTERS
+ return cage_base_;
+#else
+ return PtrComprCageBase{};
+#endif // V8_COMPRESS_POINTERS
+ }
+
int TotalAllocationSize() const;
protected:
@@ -350,9 +360,12 @@ class Serializer : public SerializerDeserializer {
// Disallow GC during serialization.
// TODO(leszeks, v8:10815): Remove this constraint.
- DISALLOW_GARBAGE_COLLECTION(no_gc)
+ DISALLOW_GARBAGE_COLLECTION(no_gc_)
Isolate* isolate_;
+#if V8_COMPRESS_POINTERS
+ const PtrComprCageBase cage_base_;
+#endif // V8_COMPRESS_POINTERS
HotObjectsList hot_objects_;
SerializerReferenceMap reference_map_;
ExternalReferenceEncoder external_reference_encoder_;
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index e14e76cb04..5a88fb7eb2 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -103,10 +103,6 @@ class SnapshotByteSource final {
int position() { return position_; }
void set_position(int position) { position_ = position; }
- uint32_t GetChecksum() const {
- return Checksum(base::Vector<const byte>(data_, length_));
- }
-
private:
const byte* data_;
int length_;
diff --git a/deps/v8/src/snapshot/snapshot.cc b/deps/v8/src/snapshot/snapshot.cc
index 717b9a51cb..db1cb36087 100644
--- a/deps/v8/src/snapshot/snapshot.cc
+++ b/deps/v8/src/snapshot/snapshot.cc
@@ -120,8 +120,11 @@ class SnapshotImpl : public AllStatic {
} // namespace
-SnapshotData MaybeDecompress(const base::Vector<const byte>& snapshot_data) {
+SnapshotData MaybeDecompress(Isolate* isolate,
+ const base::Vector<const byte>& snapshot_data) {
#ifdef V8_SNAPSHOT_COMPRESSION
+ TRACE_EVENT0("v8", "V8.SnapshotDecompress");
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kSnapshotDecompress);
return SnapshotCompression::Decompress(snapshot_data);
#else
return SnapshotData(snapshot_data);
@@ -158,15 +161,14 @@ bool Snapshot::VersionIsValid(const v8::StartupData* data) {
bool Snapshot::Initialize(Isolate* isolate) {
if (!isolate->snapshot_available()) return false;
+ TRACE_EVENT0("v8", "V8.DeserializeIsolate");
RCS_SCOPE(isolate, RuntimeCallCounterId::kDeserializeIsolate);
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
const v8::StartupData* blob = isolate->snapshot_blob();
SnapshotImpl::CheckVersion(blob);
- if (!FLAG_skip_snapshot_checksum) {
- CHECK(VerifyChecksum(blob));
- }
+ if (FLAG_verify_snapshot_checksum) CHECK(VerifyChecksum(blob));
base::Vector<const byte> startup_data =
SnapshotImpl::ExtractStartupData(blob);
base::Vector<const byte> read_only_data =
@@ -180,9 +182,11 @@ bool Snapshot::Initialize(Isolate* isolate) {
decompress_histogram.emplace(isolate->counters()->snapshot_decompress());
}
#endif
- SnapshotData startup_snapshot_data(MaybeDecompress(startup_data));
- SnapshotData read_only_snapshot_data(MaybeDecompress(read_only_data));
- SnapshotData shared_heap_snapshot_data(MaybeDecompress(shared_heap_data));
+ SnapshotData startup_snapshot_data(MaybeDecompress(isolate, startup_data));
+ SnapshotData read_only_snapshot_data(
+ MaybeDecompress(isolate, read_only_data));
+ SnapshotData shared_heap_snapshot_data(
+ MaybeDecompress(isolate, shared_heap_data));
#ifdef V8_SNAPSHOT_COMPRESSION
decompress_histogram.reset();
#endif
@@ -202,6 +206,7 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
Isolate* isolate, Handle<JSGlobalProxy> global_proxy, size_t context_index,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
if (!isolate->snapshot_available()) return Handle<Context>();
+ TRACE_EVENT0("v8", "V8.DeserializeContext");
RCS_SCOPE(isolate, RuntimeCallCounterId::kDeserializeContext);
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
@@ -219,7 +224,7 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
isolate->counters()->context_snapshot_decompress());
}
#endif
- snapshot_data.emplace(MaybeDecompress(context_data));
+ snapshot_data.emplace(MaybeDecompress(isolate, context_data));
}
MaybeHandle<Context> maybe_result = ContextDeserializer::DeserializeContext(
@@ -248,8 +253,6 @@ void Snapshot::ClearReconstructableDataForSerialization(
HandleScope scope(isolate);
std::vector<i::Handle<i::SharedFunctionInfo>> sfis_to_clear;
{
- // Heap allocation is disallowed within this scope.
- DisallowGarbageCollection disallow_gc;
i::HeapObjectIterator it(isolate->heap());
for (i::HeapObject o = it.Next(); !o.is_null(); o = it.Next()) {
if (o.IsSharedFunctionInfo(cage_base)) {
@@ -329,6 +332,7 @@ void Snapshot::SerializeDeserializeAndVerifyForTesting(
// Test serialization.
{
+ GlobalSafepointScope global_safepoint(isolate);
DisallowGarbageCollection no_gc;
Snapshot::SerializerFlags flags(
@@ -337,7 +341,8 @@ void Snapshot::SerializeDeserializeAndVerifyForTesting(
(ReadOnlyHeap::IsReadOnlySpaceShared()
? Snapshot::kReconstructReadOnlyObjectCacheForTesting
: 0));
- serialized_data = Snapshot::Create(isolate, *default_context, no_gc, flags);
+ serialized_data = Snapshot::Create(isolate, *default_context,
+ global_safepoint, no_gc, flags);
auto_delete_serialized_data.reset(serialized_data.data);
}
@@ -375,17 +380,16 @@ v8::StartupData Snapshot::Create(
Isolate* isolate, std::vector<Context>* contexts,
const std::vector<SerializeInternalFieldsCallback>&
embedder_fields_serializers,
+ const GlobalSafepointScope& global_safepoint,
const DisallowGarbageCollection& no_gc, SerializerFlags flags) {
+ TRACE_EVENT0("v8", "V8.SnapshotCreate");
DCHECK_EQ(contexts->size(), embedder_fields_serializers.size());
DCHECK_GT(contexts->size(), 0);
HandleScope scope(isolate);
- // Enter a safepoint so that the heap is safe to iterate.
- // TODO(leszeks): This safepoint's scope could be tightened to just string
- // table iteration, as that iteration relies on there not being any concurrent
- // threads mutating the string table. But, there's currently no harm in
- // holding it for the entire snapshot serialization.
- SafepointScope safepoint(isolate->heap());
+ // The GlobalSafepointScope ensures we are in a safepoint scope so that the
+ // string table is safe to iterate. Unlike mksnapshot, embedders may have
+ // background threads running.
ReadOnlySerializer read_only_serializer(isolate, flags);
read_only_serializer.SerializeReadOnlyRoots();
@@ -460,11 +464,13 @@ v8::StartupData Snapshot::Create(
// static
v8::StartupData Snapshot::Create(Isolate* isolate, Context default_context,
+ const GlobalSafepointScope& global_safepoint,
const DisallowGarbageCollection& no_gc,
SerializerFlags flags) {
std::vector<Context> contexts{default_context};
std::vector<SerializeInternalFieldsCallback> callbacks{{}};
- return Snapshot::Create(isolate, &contexts, callbacks, no_gc, flags);
+ return Snapshot::Create(isolate, &contexts, callbacks, global_safepoint,
+ no_gc, flags);
}
v8::StartupData SnapshotImpl::CreateSnapshotBlob(
@@ -473,6 +479,7 @@ v8::StartupData SnapshotImpl::CreateSnapshotBlob(
const SnapshotData* shared_heap_snapshot_in,
const std::vector<SnapshotData*>& context_snapshots_in,
bool can_be_rehashed) {
+ TRACE_EVENT0("v8", "V8.SnapshotCompress");
// Have these separate from snapshot_in for compression, since we need to
// access the compressed data as well as the uncompressed reservations.
const SnapshotData* startup_snapshot;
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index f176faa607..bfa03fd478 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -13,6 +13,7 @@ namespace v8 {
namespace internal {
class Context;
+class GlobalSafepointScope;
class Isolate;
class SnapshotData;
class JSGlobalProxy;
@@ -64,12 +65,14 @@ class Snapshot : public AllStatic {
Isolate* isolate, std::vector<Context>* contexts,
const std::vector<SerializeInternalFieldsCallback>&
embedder_fields_serializers,
+ const GlobalSafepointScope& global_safepoint,
const DisallowGarbageCollection& no_gc,
SerializerFlags flags = kDefaultSerializerFlags);
// Convenience helper for the above when only serializing a single context.
static v8::StartupData Create(
Isolate* isolate, Context default_context,
+ const GlobalSafepointScope& global_safepoint,
const DisallowGarbageCollection& no_gc,
SerializerFlags flags = kDefaultSerializerFlags);
diff --git a/deps/v8/src/strings/string-builder-inl.h b/deps/v8/src/strings/string-builder-inl.h
index e2b8b0b441..5194b4513c 100644
--- a/deps/v8/src/strings/string-builder-inl.h
+++ b/deps/v8/src/strings/string-builder-inl.h
@@ -130,6 +130,24 @@ class IncrementalStringBuilder {
}
}
+ template <int N>
+ V8_INLINE void AppendCStringLiteral(const char (&literal)[N]) {
+ // Note that the literal contains the zero char.
+ const int length = N - 1;
+ STATIC_ASSERT(length > 0);
+ if (length == 1) return AppendCharacter(literal[0]);
+ if (encoding_ == String::ONE_BYTE_ENCODING && CurrentPartCanFit(N)) {
+ const uint8_t* chars = reinterpret_cast<const uint8_t*>(literal);
+ SeqOneByteString::cast(*current_part_)
+ .SeqOneByteStringSetChars(current_index_, chars, length);
+ current_index_ += length;
+ if (current_index_ == part_length_) Extend();
+ DCHECK(HasValidCurrentIndex());
+ return;
+ }
+ return AppendCString(literal);
+ }
+
V8_INLINE void AppendCString(const char* s) {
const uint8_t* u = reinterpret_cast<const uint8_t*>(s);
if (encoding_ == String::ONE_BYTE_ENCODING) {
@@ -190,19 +208,38 @@ class IncrementalStringBuilder {
template <typename DestChar>
class NoExtend {
public:
- NoExtend(Handle<String> string, int offset,
+ NoExtend(String string, int offset,
const DisallowGarbageCollection& no_gc) {
- DCHECK(string->IsSeqOneByteString() || string->IsSeqTwoByteString());
+ DCHECK(string.IsSeqOneByteString() || string.IsSeqTwoByteString());
if (sizeof(DestChar) == 1) {
start_ = reinterpret_cast<DestChar*>(
- Handle<SeqOneByteString>::cast(string)->GetChars(no_gc) + offset);
+ SeqOneByteString::cast(string).GetChars(no_gc) + offset);
} else {
start_ = reinterpret_cast<DestChar*>(
- Handle<SeqTwoByteString>::cast(string)->GetChars(no_gc) + offset);
+ SeqTwoByteString::cast(string).GetChars(no_gc) + offset);
}
cursor_ = start_;
+#ifdef DEBUG
+ string_ = string;
+#endif
}
+#ifdef DEBUG
+ ~NoExtend() {
+ DestChar* end;
+ if (sizeof(DestChar) == 1) {
+ auto one_byte_string = SeqOneByteString::cast(string_);
+ end = reinterpret_cast<DestChar*>(one_byte_string.GetChars(no_gc_) +
+ one_byte_string.length());
+ } else {
+ auto two_byte_string = SeqTwoByteString::cast(string_);
+ end = reinterpret_cast<DestChar*>(two_byte_string.GetChars(no_gc_) +
+ two_byte_string.length());
+ }
+ DCHECK_LE(cursor_, end + 1);
+ }
+#endif
+
V8_INLINE void Append(DestChar c) { *(cursor_++) = c; }
V8_INLINE void AppendCString(const char* s) {
const uint8_t* u = reinterpret_cast<const uint8_t*>(s);
@@ -214,6 +251,9 @@ class IncrementalStringBuilder {
private:
DestChar* start_;
DestChar* cursor_;
+#ifdef DEBUG
+ String string_;
+#endif
DISALLOW_GARBAGE_COLLECTION(no_gc_)
};
@@ -242,14 +282,15 @@ class IncrementalStringBuilder {
public:
NoExtendBuilder(IncrementalStringBuilder* builder, int required_length,
const DisallowGarbageCollection& no_gc)
- : NoExtend<DestChar>(builder->current_part(), builder->current_index_,
- no_gc),
+ : NoExtend<DestChar>(*(builder->current_part()),
+ builder->current_index_, no_gc),
builder_(builder) {
DCHECK(builder->CurrentPartCanFit(required_length));
}
~NoExtendBuilder() {
builder_->current_index_ += NoExtend<DestChar>::written();
+ DCHECK(builder_->HasValidCurrentIndex());
}
private:
@@ -277,6 +318,8 @@ class IncrementalStringBuilder {
// Finish the current part and allocate a new part.
void Extend();
+ bool HasValidCurrentIndex() const;
+
// Shrink current part to the right size.
void ShrinkCurrentPart() {
DCHECK(current_index_ < part_length_);
@@ -314,6 +357,7 @@ void IncrementalStringBuilder::Append(SrcChar c) {
.SeqTwoByteStringSet(current_index_++, c);
}
if (current_index_ == part_length_) Extend();
+ DCHECK(HasValidCurrentIndex());
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/strings/string-builder.cc b/deps/v8/src/strings/string-builder.cc
index 7135d556bc..9d1e3a9574 100644
--- a/deps/v8/src/strings/string-builder.cc
+++ b/deps/v8/src/strings/string-builder.cc
@@ -246,6 +246,10 @@ int IncrementalStringBuilder::Length() const {
return accumulator_->length() + current_index_;
}
+bool IncrementalStringBuilder::HasValidCurrentIndex() const {
+ return current_index_ < part_length_;
+}
+
void IncrementalStringBuilder::Accumulate(Handle<String> new_part) {
Handle<String> new_accumulator;
if (accumulator()->length() + new_part->length() > String::kMaxLength) {
diff --git a/deps/v8/src/strings/string-stream.cc b/deps/v8/src/strings/string-stream.cc
index 39494a7827..b918e3c36d 100644
--- a/deps/v8/src/strings/string-stream.cc
+++ b/deps/v8/src/strings/string-stream.cc
@@ -303,7 +303,7 @@ void StringStream::PrintUsingMap(JSObject js_object) {
for (InternalIndex i : map.IterateOwnDescriptors()) {
PropertyDetails details = descs.GetDetails(i);
if (details.location() == PropertyLocation::kField) {
- DCHECK_EQ(kData, details.kind());
+ DCHECK_EQ(PropertyKind::kData, details.kind());
Object key = descs.GetKey(i);
if (key.IsString() || key.IsNumber()) {
int len = 3;
diff --git a/deps/v8/src/temporal/OWNERS b/deps/v8/src/temporal/OWNERS
new file mode 100644
index 0000000000..0026177c67
--- /dev/null
+++ b/deps/v8/src/temporal/OWNERS
@@ -0,0 +1,2 @@
+ftang@chromium.org
+syg@chromium.org
diff --git a/deps/v8/src/temporal/temporal-parser.cc b/deps/v8/src/temporal/temporal-parser.cc
new file mode 100644
index 0000000000..a4468b05d4
--- /dev/null
+++ b/deps/v8/src/temporal/temporal-parser.cc
@@ -0,0 +1,1220 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/temporal/temporal-parser.h"
+
+#include "src/base/bounds.h"
+#include "src/objects/string-inl.h"
+#include "src/strings/char-predicates-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// Temporal #prod-NonzeroDigit
+inline constexpr bool IsNonZeroDecimalDigit(base::uc32 c) {
+ return base::IsInRange(c, '1', '9');
+}
+
+// Temporal #prod-TZLeadingChar
+inline constexpr bool IsTZLeadingChar(base::uc32 c) {
+ return base::IsInRange(AsciiAlphaToLower(c), 'a', 'z') || c == '.' ||
+ c == '_';
+}
+
+// Temporal #prod-TZChar
+inline constexpr bool IsTZChar(base::uc32 c) {
+ return IsTZLeadingChar(c) || c == '-';
+}
+
+// Temporal #prod-DecimalSeparator
+inline constexpr bool IsDecimalSeparator(base::uc32 c) {
+ return c == '.' || c == ',';
+}
+
+// Temporal #prod-DateTimeSeparator
+inline constexpr bool IsDateTimeSeparator(base::uc32 c) {
+ return c == ' ' || AsciiAlphaToLower(c) == 't';
+}
+
+// Temporal #prod-ASCIISign
+inline constexpr bool IsAsciiSign(base::uc32 c) { return c == '-' || c == '+'; }
+
+// Temporal #prod-Sign
+inline constexpr bool IsSign(base::uc32 c) {
+ return c == 0x2212 || IsAsciiSign(c);
+}
+
+inline constexpr base::uc32 CanonicalSign(base::uc32 c) {
+ return c == 0x2212 ? '-' : c;
+}
+
+inline constexpr int32_t ToInt(base::uc32 c) { return c - '0'; }
+
+/**
+ * The TemporalParser use two types of internal routine:
+ * - Scan routines: Follow the function signature below:
+ * template <typename Char> int32_t Scan$ProductionName(
+ * base::Vector<Char> str, int32_t s, R* out)
+ *
+ * These routine scan the next item from position s in str and store the
+ * parsed result into out if the expected string is successfully scanned.
+ * It return the length of matched text from s or 0 to indicate no
+ * expected item matched.
+ *
+ * - Satisfy routines: Follow the function sigature below:
+ * template <typename Char>
+ * bool Satisfy$ProductionName(base::Vector<Char> str, R* r);
+ * It scan from the beginning of the str by calling Scan routines to put
+ * parsed result into r and return true if the entire str satisfy the
+ * production. It internally use Scan routines.
+ *
+ * TODO(ftang) investigate refactoring to class before shipping
+ * Reference to RegExpParserImpl by encapsulating the cursor position and
+ * only manipulating the current character and position with Next(),
+ * Advance(), current(), etc
+ */
+
+// For Hour Production
+// Hour:
+// [0 1] Digit
+// 2 [0 1 2 3]
+template <typename Char>
+bool IsHour(base::Vector<Char> str, int32_t s) {
+ return (str.length() >= (s + 2)) &&
+ ((base::IsInRange(str[s], '0', '1') && IsDecimalDigit(str[s + 1])) ||
+ ((str[s] == '2') && base::IsInRange(str[s + 1], '0', '3')));
+}
+
+template <typename Char>
+int32_t ScanHour(base::Vector<Char> str, int32_t s, int32_t* out) {
+ if (!IsHour(str, s)) return 0;
+ *out = ToInt(str[s]) * 10 + ToInt(str[s + 1]);
+ return 2;
+}
+
+// MinuteSecond:
+// [0 1 2 3 4 5] Digit
+template <typename Char>
+bool IsMinuteSecond(base::Vector<Char> str, int32_t s) {
+ return (str.length() >= (s + 2)) &&
+ (base::IsInRange(str[s], '0', '5') && IsDecimalDigit(str[s + 1]));
+}
+
+template <typename Char>
+int32_t ScanMinuteSecond(base::Vector<Char> str, int32_t s, int32_t* out) {
+ if (!IsMinuteSecond(str, s)) return 0;
+ *out = ToInt(str[s]) * 10 + ToInt(str[s + 1]);
+ return 2;
+}
+
+// For the forward production in the grammar such as
+// ProductionB:
+// ProductionT
+#define SCAN_FORWARD(B, T, R) \
+ template <typename Char> \
+ int32_t Scan##B(base::Vector<Char> str, int32_t s, R* r) { \
+ return Scan##T(str, s, r); \
+ }
+
+// Same as above but store the result into a particular field in R
+
+// For the forward production in the grammar such as
+// ProductionB:
+// ProductionT1
+// ProductionT2
+#define SCAN_EITHER_FORWARD(B, T1, T2, R) \
+ template <typename Char> \
+ int32_t Scan##B(base::Vector<Char> str, int32_t s, R* r) { \
+ int32_t len; \
+ if ((len = Scan##T1(str, s, r)) > 0) return len; \
+ return Scan##T2(str, s, r); \
+ }
+
+// TimeHour: Hour
+SCAN_FORWARD(TimeHour, Hour, int32_t)
+
+// TimeMinute: MinuteSecond
+SCAN_FORWARD(TimeMinute, MinuteSecond, int32_t)
+
+// TimeSecond:
+// MinuteSecond
+// 60
+template <typename Char>
+int32_t ScanTimeSecond(base::Vector<Char> str, int32_t s, int32_t* out) {
+ int32_t len = ScanMinuteSecond(str, s, out);
+ // MinuteSecond
+ if (len > 0) return len;
+ if ((str.length() < (s + 2)) || (str[s] != '6') || (str[s + 1] != '0')) {
+ return 0;
+ }
+ // 60
+ *out = 60;
+ return 2;
+}
+
+constexpr int kPowerOfTen[] = {1, 10, 100, 1000, 10000,
+ 100000, 1000000, 10000000, 100000000};
+
+// FractionalPart : Digit{1,9}
+template <typename Char>
+int32_t ScanFractionalPart(base::Vector<Char> str, int32_t s, int32_t* out) {
+ int32_t cur = s;
+ if ((str.length() < (cur + 1)) || !IsDecimalDigit(str[cur])) return 0;
+ *out = ToInt(str[cur++]);
+ while ((cur < str.length()) && ((cur - s) < 9) && IsDecimalDigit(str[cur])) {
+ *out = 10 * (*out) + ToInt(str[cur++]);
+ }
+ *out *= kPowerOfTen[9 - (cur - s)];
+ return cur - s;
+}
+
+template <typename Char>
+int32_t ScanFractionalPart(base::Vector<Char> str, int32_t s, int64_t* out) {
+ int32_t out32;
+ int32_t len = ScanFractionalPart(str, s, &out32);
+ *out = out32;
+ return len;
+}
+
+// TimeFraction: FractionalPart
+SCAN_FORWARD(TimeFractionalPart, FractionalPart, int32_t)
+
+// Fraction: DecimalSeparator FractionalPart
+// DecimalSeparator: one of , .
+template <typename Char>
+int32_t ScanFraction(base::Vector<Char> str, int32_t s, int32_t* out) {
+ if ((str.length() < (s + 2)) || (!IsDecimalSeparator(str[s]))) return 0;
+ int32_t len;
+ if ((len = ScanFractionalPart(str, s + 1, out)) == 0) return 0;
+ return len + 1;
+}
+
+// TimeFraction: DecimalSeparator TimeFractionalPart
+// DecimalSeparator: one of , .
+template <typename Char>
+int32_t ScanTimeFraction(base::Vector<Char> str, int32_t s, int32_t* out) {
+ if ((str.length() < (s + 2)) || (!IsDecimalSeparator(str[s]))) return 0;
+ int32_t len;
+ if ((len = ScanTimeFractionalPart(str, s + 1, out)) == 0) return 0;
+ return len + 1;
+}
+
+template <typename Char>
+int32_t ScanTimeFraction(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ return ScanTimeFraction(str, s, &(r->time_nanosecond));
+}
+
+// TimeSpec:
+// TimeHour
+// TimeHour : TimeMinute
+// TimeHour : TimeMinute : TimeSecond [TimeFraction]
+// TimeHour TimeMinute
+// TimeHour TimeMinute TimeSecond [TimeFraction]
+template <typename Char>
+int32_t ScanTimeSpec(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ int32_t time_hour, time_minute, time_second;
+ int32_t len;
+ int32_t cur = s;
+ if ((len = ScanTimeHour(str, cur, &time_hour)) == 0) return 0;
+ cur += len;
+ if ((cur + 1) > str.length()) {
+ // TimeHour
+ r->time_hour = time_hour;
+ return cur - s;
+ }
+ if (str[cur] == ':') {
+ cur++;
+ if ((len = ScanTimeMinute(str, cur, &time_minute)) == 0) return 0;
+ cur += len;
+ if ((cur + 1) > str.length() || (str[cur] != ':')) {
+ // TimeHour : TimeMinute
+ r->time_hour = time_hour;
+ r->time_minute = time_minute;
+ return cur - s;
+ }
+ cur++;
+ if ((len = ScanTimeSecond(str, cur, &time_second)) == 0) return 0;
+ } else {
+ if ((len = ScanTimeMinute(str, cur, &time_minute)) == 0) {
+ // TimeHour
+ r->time_hour = time_hour;
+ return cur - s;
+ }
+ cur += len;
+ if ((len = ScanTimeSecond(str, cur, &time_second)) == 0) {
+ // TimeHour TimeMinute
+ r->time_hour = time_hour;
+ r->time_minute = time_minute;
+ return cur - s;
+ }
+ }
+ cur += len;
+ len = ScanTimeFraction(str, cur, r);
+ r->time_hour = time_hour;
+ r->time_minute = time_minute;
+ r->time_second = time_second;
+ return cur + len - s;
+}
+
+// TimeSpecSeparator: DateTimeSeparator TimeSpec
+// DateTimeSeparator: SPACE, 't', or 'T'
+template <typename Char>
+int32_t ScanTimeSpecSeparator(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ if (!(((s + 1) < str.length()) && IsDateTimeSeparator(str[s]))) return 0;
+ int32_t len = ScanTimeSpec(str, s + 1, r);
+ return (len == 0) ? 0 : len + 1;
+}
+
+// DateExtendedYear: Sign Digit Digit Digit Digit Digit Digit
+template <typename Char>
+int32_t ScanDateExtendedYear(base::Vector<Char> str, int32_t s, int32_t* out) {
+ if (str.length() < (s + 7)) return 0;
+ if (IsSign(str[s]) && IsDecimalDigit(str[s + 1]) &&
+ IsDecimalDigit(str[s + 2]) && IsDecimalDigit(str[s + 3]) &&
+ IsDecimalDigit(str[s + 4]) && IsDecimalDigit(str[s + 5]) &&
+ IsDecimalDigit(str[s + 6])) {
+ int32_t sign = (CanonicalSign(str[s]) == '-') ? -1 : 1;
+ *out = sign * (ToInt(str[s + 1]) * 100000 + ToInt(str[s + 2]) * 10000 +
+ ToInt(str[s + 3]) * 1000 + ToInt(str[s + 4]) * 100 +
+ ToInt(str[s + 5]) * 10 + ToInt(str[s + 6]));
+ return 7;
+ }
+ return 0;
+}
+
+// DateFourDigitYear: Digit Digit Digit Digit
+template <typename Char>
+int32_t ScanDateFourDigitYear(base::Vector<Char> str, int32_t s, int32_t* out) {
+ if (str.length() < (s + 4)) return 0;
+ if (IsDecimalDigit(str[s]) && IsDecimalDigit(str[s + 1]) &&
+ IsDecimalDigit(str[s + 2]) && IsDecimalDigit(str[s + 3])) {
+ *out = ToInt(str[s]) * 1000 + ToInt(str[s + 1]) * 100 +
+ ToInt(str[s + 2]) * 10 + ToInt(str[s + 3]);
+ return 4;
+ }
+ return 0;
+}
+
+// DateYear:
+// DateFourDigitYear
+// DateExtendedYear
+// The lookahead is at most 1 char.
+SCAN_EITHER_FORWARD(DateYear, DateFourDigitYear, DateExtendedYear, int32_t)
+
+// DateMonth:
+// 0 NonzeroDigit
+// 10
+// 11
+// 12
+template <typename Char>
+int32_t ScanDateMonth(base::Vector<Char> str, int32_t s, int32_t* out) {
+ if (str.length() < (s + 2)) return 0;
+ if (((str[s] == '0') && IsNonZeroDecimalDigit(str[s + 1])) ||
+ ((str[s] == '1') && base::IsInRange(str[s + 1], '0', '2'))) {
+ *out = ToInt(str[s]) * 10 + ToInt(str[s + 1]);
+ return 2;
+ }
+ return 0;
+}
+
+// DateDay:
+// 0 NonzeroDigit
+// 1 Digit
+// 2 Digit
+// 30
+// 31
+template <typename Char>
+int32_t ScanDateDay(base::Vector<Char> str, int32_t s, int32_t* out) {
+ if (str.length() < (s + 2)) return 0;
+ if (((str[s] == '0') && IsNonZeroDecimalDigit(str[s + 1])) ||
+ (base::IsInRange(str[s], '1', '2') && IsDecimalDigit(str[s + 1])) ||
+ ((str[s] == '3') && base::IsInRange(str[s + 1], '0', '1'))) {
+ *out = ToInt(str[s]) * 10 + ToInt(str[s + 1]);
+ return 2;
+ }
+ return 0;
+}
+
+// Date:
+// DateYear - DateMonth - DateDay
+// DateYear DateMonth DateDay
+template <typename Char>
+int32_t ScanDate(base::Vector<Char> str, int32_t s, ParsedISO8601Result* r) {
+ int32_t date_year, date_month, date_day;
+ int32_t cur = s;
+ int32_t len;
+ if ((len = ScanDateYear(str, cur, &date_year)) == 0) return 0;
+ if (((cur += len) + 1) > str.length()) return 0;
+ if (str[cur] == '-') {
+ cur++;
+ if ((len = ScanDateMonth(str, cur, &date_month)) == 0) return 0;
+ cur += len;
+ if (((cur + 1) > str.length()) || (str[cur++] != '-')) return 0;
+ } else {
+ if ((len = ScanDateMonth(str, cur, &date_month)) == 0) return 0;
+ cur += len;
+ }
+ if ((len = ScanDateDay(str, cur, &date_day)) == 0) return 0;
+ r->date_year = date_year;
+ r->date_month = date_month;
+ r->date_day = date_day;
+ return cur + len - s;
+}
+
+// TimeZoneUTCOffsetHour: Hour
+SCAN_FORWARD(TimeZoneUTCOffsetHour, Hour, int32_t)
+
+// TimeZoneUTCOffsetMinute
+SCAN_FORWARD(TimeZoneUTCOffsetMinute, MinuteSecond, int32_t)
+
+// TimeZoneUTCOffsetSecond
+SCAN_FORWARD(TimeZoneUTCOffsetSecond, MinuteSecond, int32_t)
+
+// TimeZoneUTCOffsetFractionalPart: FractionalPart
+// See PR1796
+SCAN_FORWARD(TimeZoneUTCOffsetFractionalPart, FractionalPart, int32_t)
+
+// TimeZoneUTCOffsetFraction: DecimalSeparator TimeZoneUTCOffsetFractionalPart
+// See PR1796
+template <typename Char>
+int32_t ScanTimeZoneUTCOffsetFraction(base::Vector<Char> str, int32_t s,
+ int32_t* out) {
+ if ((str.length() < (s + 2)) || (!IsDecimalSeparator(str[s]))) return 0;
+ int32_t len;
+ if ((len = ScanTimeZoneUTCOffsetFractionalPart(str, s + 1, out)) > 0) {
+ return len + 1;
+ }
+ return 0;
+}
+
+// Note: "TimeZoneUTCOffset" is abbreviated as "TZUO" below
+// TimeZoneNumericUTCOffset:
+// TZUOSign TZUOHour
+// TZUOSign TZUOHour : TZUOMinute
+// TZUOSign TZUOHour : TZUOMinute : TZUOSecond [TZUOFraction]
+// TZUOSign TZUOHour TZUOMinute
+// TZUOSign TZUOHour TZUOMinute TZUOSecond [TZUOFraction]
+template <typename Char>
+int32_t ScanTimeZoneNumericUTCOffset(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ int32_t len, hour, minute, second, nanosecond;
+ int32_t cur = s;
+ if ((str.length() < (cur + 1)) || (!IsSign(str[cur]))) return 0;
+ int32_t sign = (CanonicalSign(str[cur++]) == '-') ? -1 : 1;
+ if ((len = ScanTimeZoneUTCOffsetHour(str, cur, &hour)) == 0) return 0;
+ cur += len;
+ if ((cur + 1) > str.length()) {
+ // TZUOSign TZUOHour
+ r->tzuo_sign = sign;
+ r->tzuo_hour = hour;
+ return cur - s;
+ }
+ if (str[cur] == ':') {
+ cur++;
+ if ((len = ScanTimeZoneUTCOffsetMinute(str, cur, &minute)) == 0) return 0;
+ cur += len;
+ if ((cur + 1) > str.length() || str[cur] != ':') {
+ // TZUOSign TZUOHour : TZUOMinute
+ r->tzuo_sign = sign;
+ r->tzuo_hour = hour;
+ r->tzuo_minute = minute;
+ return cur - s;
+ }
+ cur++;
+ if ((len = ScanTimeZoneUTCOffsetSecond(str, cur, &second)) == 0) return 0;
+ } else {
+ if ((len = ScanTimeZoneUTCOffsetMinute(str, cur, &minute)) == 0) {
+ // TZUOSign TZUOHour
+ r->tzuo_sign = sign;
+ r->tzuo_hour = hour;
+ return cur - s;
+ }
+ cur += len;
+ if ((len = ScanTimeZoneUTCOffsetSecond(str, cur, &second)) == 0) {
+ // TZUOSign TZUOHour TZUOMinute
+ r->tzuo_sign = sign;
+ r->tzuo_hour = hour;
+ r->tzuo_minute = minute;
+ return cur - s;
+ }
+ }
+ cur += len;
+ len = ScanTimeZoneUTCOffsetFraction(str, cur, &nanosecond);
+ r->tzuo_sign = sign;
+ r->tzuo_hour = hour;
+ r->tzuo_minute = minute;
+ r->tzuo_second = second;
+ if (len > 0) r->tzuo_nanosecond = nanosecond;
+ return cur + len - s;
+}
+
+// TimeZoneUTCOffset:
+// TimeZoneNumericUTCOffset
+// UTCDesignator
+template <typename Char>
+int32_t ScanTimeZoneUTCOffset(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ if (str.length() < (s + 1)) return 0;
+ if (AsciiAlphaToLower(str[s]) == 'z') {
+ // UTCDesignator
+ r->utc_designator = true;
+ return 1;
+ }
+ // TimeZoneNumericUTCOffset
+ return ScanTimeZoneNumericUTCOffset(str, s, r);
+}
+
+// TimeZoneIANANameComponent :
+// TZLeadingChar TZChar{0,13} but not one of . or ..
+template <typename Char>
+int32_t ScanTimeZoneIANANameComponent(base::Vector<Char> str, int32_t s) {
+ int32_t cur = s;
+ if (str.length() < (cur + 1) || !IsTZLeadingChar(str[cur++])) return 0;
+ while (((cur) < str.length()) && ((cur - s) < 14) && IsTZChar(str[cur])) {
+ cur++;
+ }
+ if ((cur - s) == 1 && str[s] == '.') return 0;
+ if ((cur - s) == 2 && str[s] == '.' && str[s + 1] == '.') return 0;
+ return cur - s;
+}
+
+// TimeZoneIANANameTail :
+// TimeZoneIANANameComponent
+// TimeZoneIANANameComponent / TimeZoneIANANameTail
+// TimeZoneIANAName :
+// TimeZoneIANANameTail
+// The spec text use tail recusion with TimeZoneIANANameComponent and
+// TimeZoneIANANameTail. In our implementation, we use an iteration loop
+// instead.
+template <typename Char>
+int32_t ScanTimeZoneIANAName(base::Vector<Char> str, int32_t s) {
+ int32_t cur = s;
+ int32_t len;
+ if ((len = ScanTimeZoneIANANameComponent(str, cur)) == 0) return 0;
+ cur += len;
+ while ((str.length() > (cur + 1)) && (str[cur] == '/')) {
+ cur++;
+ if ((len = ScanTimeZoneIANANameComponent(str, cur)) == 0) {
+ return 0;
+ }
+ // TimeZoneIANANameComponent / TimeZoneIANAName
+ cur += len;
+ }
+ return cur - s;
+}
+
+template <typename Char>
+int32_t ScanTimeZoneIANAName(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ int32_t len;
+ if ((len = ScanTimeZoneIANAName(str, s)) == 0) return 0;
+ r->tzi_name_start = s;
+ r->tzi_name_length = len;
+ return len;
+}
+
+// TimeZoneUTCOffsetName
+// Sign Hour
+// Sign Hour : MinuteSecond
+// Sign Hour MinuteSecond
+// Sign Hour : MinuteSecond : MinuteSecond [Fraction]
+// Sign Hour MinuteSecond MinuteSecond [Fraction]
+//
+template <typename Char>
+int32_t ScanTimeZoneUTCOffsetName(base::Vector<Char> str, int32_t s) {
+ int32_t cur = s;
+ int32_t len;
+ if ((str.length() < (s + 3)) || !IsSign(str[cur++])) return 0;
+ int32_t hour, minute, second, fraction;
+ if ((len = ScanHour(str, cur, &hour)) == 0) return 0;
+ cur += len;
+ if ((cur + 1) > str.length()) {
+ // Sign Hour
+ return cur - s;
+ }
+ if (str[cur] == ':') {
+ // Sign Hour :
+ cur++;
+ if ((len = ScanMinuteSecond(str, cur, &minute)) == 0) return 0;
+ cur += len;
+ if ((cur + 1) > str.length() || (str[cur] != ':')) {
+ // Sign Hour : MinuteSecond
+ return cur - s;
+ }
+ cur++;
+ // Sign Hour : MinuteSecond :
+ if ((len = ScanMinuteSecond(str, cur, &second)) == 0) return 0;
+ cur += len;
+ len = ScanFraction(str, cur, &fraction);
+ return cur + len - s;
+ } else {
+ if ((len = ScanMinuteSecond(str, cur, &minute)) == 0) {
+ // Sign Hour
+ return cur - s;
+ }
+ cur += len;
+ if ((len = ScanMinuteSecond(str, cur, &second)) == 0) {
+ // Sign Hour MinuteSecond
+ return cur - s;
+ }
+ cur += len;
+ len = ScanFraction(str, cur, &fraction);
+ // Sign Hour MinuteSecond MinuteSecond [Fraction]
+ return cur + len - s;
+ }
+}
+
+// TimeZoneBracketedName
+// TimeZoneIANAName
+// "Etc/GMT" ASCIISign Hour
+// TimeZoneUTCOffsetName
+// Since "Etc/GMT" also fit TimeZoneIANAName so we need to try
+// "Etc/GMT" ASCIISign Hour first.
+template <typename Char>
+int32_t ScanEtcGMTAsciiSignHour(base::Vector<Char> str, int32_t s) {
+ if ((s + 10) > str.length()) return 0;
+ int32_t cur = s;
+ if ((str[cur++] != 'E') || (str[cur++] != 't') || (str[cur++] != 'c') ||
+ (str[cur++] != '/') || (str[cur++] != 'G') || (str[cur++] != 'M') ||
+ (str[cur++] != 'T')) {
+ return 0;
+ }
+ Char sign = str[cur++];
+ if (!IsAsciiSign(sign)) return 0;
+ int32_t hour;
+ int32_t len = ScanHour(str, cur, &hour);
+ if (len == 0) return 0;
+ // "Etc/GMT" ASCIISign Hour
+ return 10;
+}
+
+template <typename Char>
+int32_t ScanTimeZoneBracketedName(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ int32_t len;
+ if ((len = ScanEtcGMTAsciiSignHour(str, s)) > 0) return len;
+ if ((len = ScanTimeZoneIANAName(str, s)) > 0) {
+ r->tzi_name_start = s;
+ r->tzi_name_length = len;
+ return len;
+ }
+ return ScanTimeZoneUTCOffsetName(str, s);
+}
+
+// TimeZoneBracketedAnnotation: '[' TimeZoneBracketedName ']'
+template <typename Char>
+int32_t ScanTimeZoneBracketedAnnotation(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ if ((str.length() < (s + 3)) || (str[s] != '[')) return 0;
+ int32_t cur = s + 1;
+ cur += ScanTimeZoneBracketedName(str, cur, r);
+ if ((cur - s == 1) || str.length() < (cur + 1) || (str[cur++] != ']')) {
+ return 0;
+ }
+ return cur - s;
+}
+
+// TimeZoneOffsetRequired:
+// TimeZoneUTCOffset [TimeZoneBracketedAnnotation]
+template <typename Char>
+int32_t ScanTimeZoneOffsetRequired(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ int32_t cur = s;
+ cur += ScanTimeZoneUTCOffset(str, cur, r);
+ if (cur == s) return 0;
+ return cur + ScanTimeZoneBracketedAnnotation(str, cur, r) - s;
+}
+
+// TimeZoneNameRequired:
+// [TimeZoneUTCOffset] TimeZoneBracketedAnnotation
+template <typename Char>
+int32_t ScanTimeZoneNameRequired(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ int32_t cur = s;
+ cur += ScanTimeZoneUTCOffset(str, cur, r);
+ int32_t len = ScanTimeZoneBracketedAnnotation(str, cur, r);
+ if (len == 0) return 0;
+ return cur + len - s;
+}
+
+// TimeZone:
+// TimeZoneOffsetRequired
+// TimeZoneNameRequired
+// The lookahead is at most 1 char.
+SCAN_EITHER_FORWARD(TimeZone, TimeZoneOffsetRequired, TimeZoneNameRequired,
+ ParsedISO8601Result)
+
+// CalendarNameComponent:
+// CalChar {3,8}
+template <typename Char>
+int32_t ScanCalendarNameComponent(base::Vector<Char> str, int32_t s) {
+ int32_t cur = s;
+ while ((cur < str.length()) && IsAlphaNumeric(str[cur])) cur++;
+ if ((cur - s) < 3 || (cur - s) > 8) return 0;
+ return (cur - s);
+}
+
+// CalendarNameTail :
+// CalendarNameComponent
+// CalendarNameComponent - CalendarNameTail
+// CalendarName :
+// CalendarNameTail
+// The spec text use tail recusion with CalendarNameComponent and
+// CalendarNameTail. In our implementation, we use an iteration loop instead.
+template <typename Char>
+int32_t ScanCalendarName(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ int32_t cur = s;
+ int32_t len;
+ if ((len = ScanCalendarNameComponent(str, cur)) == 0) return 0;
+ cur += len;
+ while ((str.length() > (cur + 1)) && (str[cur++] == '-')) {
+ if ((len = ScanCalendarNameComponent(str, cur)) == 0) return 0;
+ // CalendarNameComponent - CalendarName
+ cur += len;
+ }
+ r->calendar_name_start = s;
+ r->calendar_name_length = cur - s;
+ return cur - s;
+}
+
+// Calendar: '[u-ca=' CalendarName ']'
+template <typename Char>
+int32_t ScanCalendar(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ if (str.length() < (s + 7)) return 0;
+ int32_t cur = s;
+ // "[u-ca="
+ if ((str[cur++] != '[') || (str[cur++] != 'u') || (str[cur++] != '-') ||
+ (str[cur++] != 'c') || (str[cur++] != 'a') || (str[cur++] != '=')) {
+ return 0;
+ }
+ int32_t len = ScanCalendarName(str, cur, r);
+ if (len == 0) return 0;
+ if ((str.length() < (cur + len + 1)) || (str[cur + len] != ']')) {
+ return 0;
+ }
+ return 6 + len + 1;
+}
+
+// CalendarTime: TimeSpec [TimeZone] [Calendar]
+template <typename Char>
+int32_t ScanCalendarTime(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ int32_t cur = s;
+ cur += ScanTimeSpec(str, cur, r);
+ if (cur - s == 0) return 0;
+ cur += ScanTimeZone(str, cur, r);
+ cur += ScanCalendar(str, cur, r);
+ return cur - s;
+}
+
+// DateTime: Date [TimeSpecSeparator][TimeZone]
+template <typename Char>
+int32_t ScanDateTime(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ int32_t cur = s;
+ cur += ScanDate(str, cur, r);
+ if (cur == s) return 0;
+ cur += ScanTimeSpecSeparator(str, cur, r);
+ return cur + ScanTimeZone(str, cur, r) - s;
+}
+
+// DateSpecYearMonth: DateYear ['-'] DateMonth
+template <typename Char>
+int32_t ScanDateSpecYearMonth(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ int32_t date_year, date_month;
+ int32_t cur = s;
+ cur += ScanDateYear(str, cur, &date_year);
+ if (cur == s) return 0;
+ if (str.length() < (cur + 1)) return 0;
+ if (str[cur] == '-') cur++;
+ int32_t len = ScanDateMonth(str, cur, &date_month);
+ if (len == 0) return 0;
+ r->date_year = date_year;
+ r->date_month = date_month;
+ return cur + len - s;
+}
+
+// DateSpecMonthDay:
+// TwoDashopt DateMonth -opt DateDay
+template <typename Char>
+int32_t ScanDateSpecMonthDay(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ if (str.length() < (s + 4)) return 0;
+ int32_t cur = s;
+ if (str[cur] == '-') {
+ // The first two dash are optional together
+ if (str[++cur] != '-') return 0;
+ // TwoDash
+ cur++;
+ }
+ int32_t date_month, date_day;
+ int32_t len = ScanDateMonth(str, cur, &date_month);
+ if (len == 0) return 0;
+ cur += len;
+ if (str.length() < (cur + 1)) return 0;
+ // '-'
+ if (str[cur] == '-') cur++;
+ len = ScanDateDay(str, cur, &date_day);
+ if (len == 0) return 0;
+ r->date_month = date_month;
+ r->date_day = date_day;
+ return cur + len - s;
+}
+
+// TemporalTimeZoneIdentifier:
+// TimeZoneNumericUTCOffset
+// TimeZoneIANAName
+template <typename Char>
+int32_t ScanTemporalTimeZoneIdentifier(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ int32_t len;
+ if ((len = ScanTimeZoneNumericUTCOffset(str, s, r)) > 0) return len;
+ if ((len = ScanTimeZoneIANAName(str, s)) == 0) return 0;
+ r->tzi_name_start = s;
+ r->tzi_name_length = len;
+ return len;
+}
+
+// CalendarDateTime: DateTime [Calendar]
+template <typename Char>
+int32_t ScanCalendarDateTime(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ int32_t len = ScanDateTime(str, 0, r);
+ if (len == 0) return 0;
+ return len + ScanCalendar(str, len, r);
+}
+
+// TemporalZonedDateTimeString:
+// Date [TimeSpecSeparator] TimeZoneNameRequired [Calendar]
+template <typename Char>
+int32_t ScanTemporalZonedDateTimeString(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ // Date
+ int32_t cur = s;
+ cur += ScanDate(str, cur, r);
+ if (cur == s) return 0;
+
+ // TimeSpecSeparator
+ cur += ScanTimeSpecSeparator(str, cur, r);
+
+ // TimeZoneNameRequired
+ int32_t len = ScanTimeZoneNameRequired(str, cur, r);
+ if (len == 0) return 0;
+ cur += len;
+
+ // Calendar
+ return cur + ScanCalendar(str, cur, r) - s;
+}
+
+SCAN_FORWARD(TemporalDateString, CalendarDateTime, ParsedISO8601Result)
+SCAN_FORWARD(TemporalDateTimeString, CalendarDateTime, ParsedISO8601Result)
+
+// TemporalTimeZoneString:
+// TemporalTimeZoneIdentifier
+// Date [TimeSpecSeparator] TimeZone [Calendar]
+template <typename Char>
+int32_t ScanDate_TimeSpecSeparator_TimeZone_Calendar(base::Vector<Char> str,
+ int32_t s,
+ ParsedISO8601Result* r) {
+ int32_t cur = s;
+ cur += ScanDate(str, cur, r);
+ if (cur == s) return 0;
+ cur += ScanTimeSpecSeparator(str, cur, r);
+ int32_t len = ScanTimeZone(str, cur, r);
+ if (len == 0) return 0;
+ cur += len;
+ return cur + ScanCalendar(str, cur, r) - s;
+}
+
+// The lookahead is at most 8 chars.
+SCAN_EITHER_FORWARD(TemporalTimeZoneString, TemporalTimeZoneIdentifier,
+ Date_TimeSpecSeparator_TimeZone_Calendar,
+ ParsedISO8601Result)
+
+// TemporalTimeString
+// CalendarTime
+// CalendarDateTime
+// The lookahead is at most 7 chars.
+SCAN_EITHER_FORWARD(TemporalTimeString, CalendarTime, CalendarDateTime,
+ ParsedISO8601Result)
+
+// TemporalYearMonthString:
+// DateSpecYearMonth
+// CalendarDateTime
+// The lookahead is at most 11 chars.
+SCAN_EITHER_FORWARD(TemporalYearMonthString, DateSpecYearMonth,
+ CalendarDateTime, ParsedISO8601Result)
+
+// TemporalMonthDayString
+// DateSpecMonthDay
+// CalendarDateTime
+// The lookahead is at most 5 chars.
+SCAN_EITHER_FORWARD(TemporalMonthDayString, DateSpecMonthDay, CalendarDateTime,
+ ParsedISO8601Result)
+
+// TemporalRelativeToString:
+// TemporalDateTimeString
+// TemporalZonedDateTimeString
+// TemporalZonedDateTimeString is subset of TemporalDateTimeString
+// See https://github.com/tc39/proposal-temporal/issues/1939
+SCAN_FORWARD(TemporalRelativeToString, TemporalDateTimeString,
+ ParsedISO8601Result)
+
+// TemporalInstantString
+// Date TimeZoneOffsetRequired
+// Date DateTimeSeparator TimeSpec TimeZoneOffsetRequired
+template <typename Char>
+int32_t ScanTemporalInstantString(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Result* r) {
+ // Date
+ int32_t cur = s;
+ cur += ScanDate(str, cur, r);
+ if (cur == s) return 0;
+
+ // TimeZoneOffsetRequired
+ int32_t len = ScanTimeZoneOffsetRequired(str, cur, r);
+ if (len > 0) return cur + len - s;
+
+ // DateTimeSeparator
+ if (!(((cur + 1) < str.length()) && IsDateTimeSeparator(str[cur++]))) {
+ return 0;
+ }
+ // TimeSpec
+ len = ScanTimeSpec(str, cur, r);
+ if (len == 0) return 0;
+ cur += len;
+
+ // TimeZoneOffsetRequired
+ len = ScanTimeZoneOffsetRequired(str, cur, r);
+ if (len == 0) return 0;
+ return cur + len - s;
+}
+
+// ==============================================================================
+#define SATISIFY(T, R) \
+ template <typename Char> \
+ bool Satisfy##T(base::Vector<Char> str, R* r) { \
+ R ret; \
+ int32_t len = Scan##T(str, 0, &ret); \
+ if ((len > 0) && (len == str.length())) { \
+ *r = ret; \
+ return true; \
+ } \
+ return false; \
+ }
+
+#define IF_SATISFY_RETURN(T) \
+ { \
+ if (Satisfy##T(str, r)) return true; \
+ }
+
+#define SATISIFY_EITHER(T1, T2, T3, R) \
+ template <typename Char> \
+ bool Satisfy##T1(base::Vector<Char> str, R* r) { \
+ IF_SATISFY_RETURN(T2) \
+ IF_SATISFY_RETURN(T3) \
+ return false; \
+ }
+
+SATISIFY(TemporalDateTimeString, ParsedISO8601Result)
+SATISIFY(TemporalDateString, ParsedISO8601Result)
+SATISIFY(CalendarTime, ParsedISO8601Result)
+SATISIFY(DateTime, ParsedISO8601Result)
+SATISIFY(DateSpecYearMonth, ParsedISO8601Result)
+SATISIFY(DateSpecMonthDay, ParsedISO8601Result)
+SATISIFY(Date_TimeSpecSeparator_TimeZone_Calendar, ParsedISO8601Result)
+SATISIFY(CalendarDateTime, ParsedISO8601Result)
+SATISIFY_EITHER(TemporalTimeString, CalendarTime, CalendarDateTime,
+ ParsedISO8601Result)
+SATISIFY_EITHER(TemporalYearMonthString, DateSpecYearMonth, CalendarDateTime,
+ ParsedISO8601Result)
+SATISIFY_EITHER(TemporalMonthDayString, DateSpecMonthDay, CalendarDateTime,
+ ParsedISO8601Result)
+SATISIFY(TimeZoneNumericUTCOffset, ParsedISO8601Result)
+SATISIFY(TimeZoneIANAName, ParsedISO8601Result)
+SATISIFY_EITHER(TemporalTimeZoneIdentifier, TimeZoneNumericUTCOffset,
+ TimeZoneIANAName, ParsedISO8601Result)
+SATISIFY_EITHER(TemporalTimeZoneString, TemporalTimeZoneIdentifier,
+ Date_TimeSpecSeparator_TimeZone_Calendar, ParsedISO8601Result)
+SATISIFY(TemporalInstantString, ParsedISO8601Result)
+SATISIFY(TemporalZonedDateTimeString, ParsedISO8601Result)
+
+SATISIFY_EITHER(TemporalRelativeToString, TemporalDateTimeString,
+ TemporalZonedDateTimeString, ParsedISO8601Result)
+
+SATISIFY(CalendarName, ParsedISO8601Result)
+
+template <typename Char>
+bool SatisfyTemporalCalendarString(base::Vector<Char> str,
+ ParsedISO8601Result* r) {
+ IF_SATISFY_RETURN(CalendarName)
+ IF_SATISFY_RETURN(TemporalInstantString)
+ IF_SATISFY_RETURN(CalendarDateTime)
+ IF_SATISFY_RETURN(CalendarTime)
+ IF_SATISFY_RETURN(DateSpecYearMonth)
+ IF_SATISFY_RETURN(DateSpecMonthDay)
+ return false;
+}
+
+// Duration
+
+SCAN_FORWARD(TimeFractionalPart, FractionalPart, int64_t)
+
+template <typename Char>
+int32_t ScanFraction(base::Vector<Char> str, int32_t s, int64_t* out) {
+ if (str.length() < (s + 2) || !IsDecimalSeparator(str[s])) return 0;
+ int32_t len = ScanTimeFractionalPart(str, s + 1, out);
+ return (len == 0) ? 0 : len + 1;
+}
+
+SCAN_FORWARD(TimeFraction, Fraction, int64_t)
+
+// Digits : Digit [Digits]
+
+template <typename Char>
+int32_t ScanDigits(base::Vector<Char> str, int32_t s, int64_t* out) {
+ if (str.length() < (s + 1) || !IsDecimalDigit(str[s])) return 0;
+ *out = ToInt(str[s]);
+ int32_t len = 1;
+ while (s + len + 1 <= str.length() && IsDecimalDigit(str[s + len])) {
+ *out = 10 * (*out) + ToInt(str[s + len]);
+ len++;
+ }
+ return len;
+}
+
+SCAN_FORWARD(DurationYears, Digits, int64_t)
+SCAN_FORWARD(DurationMonths, Digits, int64_t)
+SCAN_FORWARD(DurationWeeks, Digits, int64_t)
+SCAN_FORWARD(DurationDays, Digits, int64_t)
+
+// DurationWholeHours : Digits
+SCAN_FORWARD(DurationWholeHours, Digits, int64_t)
+
+// DurationWholeMinutes : Digits
+SCAN_FORWARD(DurationWholeMinutes, Digits, int64_t)
+
+// DurationWholeSeconds : Digits
+SCAN_FORWARD(DurationWholeSeconds, Digits, int64_t)
+
+// DurationHoursFraction : TimeFraction
+SCAN_FORWARD(DurationHoursFraction, TimeFraction, int64_t)
+
+// DurationMinutesFraction : TimeFraction
+SCAN_FORWARD(DurationMinutesFraction, TimeFraction, int64_t)
+
+// DurationSecondsFraction : TimeFraction
+SCAN_FORWARD(DurationSecondsFraction, TimeFraction, int64_t)
+
+#define DURATION_WHOLE_FRACTION_DESIGNATOR(Name, name, d) \
+ template <typename Char> \
+ int32_t ScanDurationWhole##Name##FractionDesignator( \
+ base::Vector<Char> str, int32_t s, ParsedISO8601Duration* r) { \
+ int32_t cur = s; \
+ int64_t whole = 0; \
+ cur += ScanDurationWhole##Name(str, cur, &whole); \
+ if (cur == s) return 0; \
+ int64_t fraction = 0; \
+ int32_t len = ScanDuration##Name##Fraction(str, cur, &fraction); \
+ cur += len; \
+ if (str.length() < (cur + 1) || AsciiAlphaToLower(str[cur++]) != (d)) \
+ return 0; \
+ r->whole_##name = whole; \
+ r->name##_fraction = fraction; \
+ return cur - s; \
+ }
+
+DURATION_WHOLE_FRACTION_DESIGNATOR(Seconds, seconds, 's')
+DURATION_WHOLE_FRACTION_DESIGNATOR(Minutes, minutes, 'm')
+DURATION_WHOLE_FRACTION_DESIGNATOR(Hours, hours, 'h')
+
+// DurationSecondsPart :
+// DurationWholeSeconds DurationSecondsFractionopt SecondsDesignator
+SCAN_FORWARD(DurationSecondsPart, DurationWholeSecondsFractionDesignator,
+ ParsedISO8601Duration)
+
+// DurationMinutesPart :
+// DurationWholeMinutes DurationMinutesFractionopt MinutesDesignator
+// [DurationSecondsPart]
+template <typename Char>
+int32_t ScanDurationMinutesPart(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Duration* r) {
+ int32_t cur = s + ScanDurationWholeMinutesFractionDesignator(str, s, r);
+ if (cur == s) return 0;
+ return cur + ScanDurationSecondsPart(str, cur, r) - s;
+}
+
+// DurationHoursPart :
+// DurationWholeHours DurationHoursFractionopt HoursDesignator
+// DurationMinutesPart
+//
+// DurationWholeHours DurationHoursFractionopt HoursDesignator
+// [DurationSecondsPart]
+template <typename Char>
+int32_t ScanDurationHoursPart(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Duration* r) {
+ int32_t cur = s + ScanDurationWholeHoursFractionDesignator(str, s, r);
+ if (cur == s) return 0;
+ int32_t len = ScanDurationMinutesPart(str, cur, r);
+ if (len > 0) return cur + len - s;
+ return cur + ScanDurationSecondsPart(str, cur, r) - s;
+}
+
+// DurationTime :
+// DurationTimeDesignator DurationHoursPart
+// DurationTimeDesignator DurationMinutesPart
+// DurationTimeDesignator DurationSecondsPart
+template <typename Char>
+int32_t ScanDurationTime(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Duration* r) {
+ int32_t cur = s;
+ if (str.length() < (s + 1)) return 0;
+ if (AsciiAlphaToLower(str[cur++]) != 't') return 0;
+ if ((cur += ScanDurationHoursPart(str, cur, r)) - s > 1) return cur - s;
+ if ((cur += ScanDurationMinutesPart(str, cur, r)) - s > 1) return cur - s;
+ if ((cur += ScanDurationSecondsPart(str, cur, r)) - s > 1) return cur - s;
+ return 0;
+}
+
+#define DURATION_AND_DESIGNATOR(Name, name, d) \
+ template <typename Char> \
+ int32_t ScanDuration##Name##Designator(base::Vector<Char> str, int32_t s, \
+ ParsedISO8601Duration* r) { \
+ int32_t cur = s; \
+ int64_t name; \
+ if ((cur += ScanDuration##Name(str, cur, &name)) == s) return 0; \
+ if (str.length() < (cur + 1) || AsciiAlphaToLower(str[cur++]) != (d)) { \
+ return 0; \
+ } \
+ r->name = name; \
+ return cur - s; \
+ }
+
+DURATION_AND_DESIGNATOR(Days, days, 'd')
+DURATION_AND_DESIGNATOR(Weeks, weeks, 'w')
+DURATION_AND_DESIGNATOR(Months, months, 'm')
+DURATION_AND_DESIGNATOR(Years, years, 'y')
+
+// DurationDaysPart : DurationDays DaysDesignator
+SCAN_FORWARD(DurationDaysPart, DurationDaysDesignator, ParsedISO8601Duration)
+
+// DurationWeeksPart : DurationWeeks WeeksDesignator [DurationDaysPart]
+template <typename Char>
+int32_t ScanDurationWeeksPart(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Duration* r) {
+ int32_t cur = s;
+ if ((cur += ScanDurationWeeksDesignator(str, cur, r)) == s) return 0;
+ return cur + ScanDurationDaysPart(str, cur, r) - s;
+}
+
+// DurationMonthsPart :
+// DurationMonths MonthsDesignator DurationWeeksPart
+// DurationMonths MonthsDesignator [DurationDaysPart]
+template <typename Char>
+int32_t ScanDurationMonthsPart(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Duration* r) {
+ int32_t cur = s;
+ int32_t len;
+ if ((cur += ScanDurationMonthsDesignator(str, cur, r)) == s) return 0;
+ if ((len = ScanDurationWeeksPart(str, cur, r)) > 0) return cur + len - s;
+ return cur + ScanDurationDaysPart(str, cur, r) - s;
+}
+
+// DurationYearsPart :
+// DurationYears YearsDesignator DurationMonthsPart
+// DurationYears YearsDesignator DurationWeeksPart
+// DurationYears YearsDesignator [DurationDaysPart]
+template <typename Char>
+int32_t ScanDurationYearsPart(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Duration* r) {
+ int32_t cur = s;
+ int32_t len;
+ if ((cur += ScanDurationYearsDesignator(str, cur, r)) == s) return 0;
+ if ((len = ScanDurationMonthsPart(str, cur, r)) > 0) return cur + len - s;
+ if ((len = ScanDurationWeeksPart(str, cur, r)) > 0) return cur + len - s;
+ return cur + ScanDurationDaysPart(str, cur, r) - s;
+}
+
+// DurationDate :
+// DurationYearsPart [DurationTime]
+// DurationMonthsPart [DurationTime]
+// DurationWeeksPart [DurationTime]
+// DurationDaysPart [DurationTime]
+template <typename Char>
+int32_t ScanDurationDate(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Duration* r) {
+ int32_t cur = s;
+ do {
+ if ((cur += ScanDurationYearsPart(str, cur, r)) > s) break;
+ if ((cur += ScanDurationMonthsPart(str, cur, r)) > s) break;
+ if ((cur += ScanDurationWeeksPart(str, cur, r)) > s) break;
+ if ((cur += ScanDurationDaysPart(str, cur, r)) > s) break;
+ return 0;
+ } while (false);
+ return cur + ScanDurationTime(str, cur, r) - s;
+}
+
+// Duration :
+// Signopt DurationDesignator DurationDate
+// Signopt DurationDesignator DurationTime
+template <typename Char>
+int32_t ScanDuration(base::Vector<Char> str, int32_t s,
+ ParsedISO8601Duration* r) {
+ if (str.length() < (s + 2)) return 0;
+ int32_t cur = s;
+ int32_t sign =
+ (IsSign(str[cur]) && CanonicalSign(str[cur++]) == '-') ? -1 : 1;
+ if (AsciiAlphaToLower(str[cur++]) != 'p') return 0;
+ int32_t len = ScanDurationDate(str, cur, r);
+ if (len == 0) len = ScanDurationTime(str, cur, r);
+ if (len == 0) return 0;
+ r->sign = sign;
+ return cur + len - s;
+}
+SCAN_FORWARD(TemporalDurationString, Duration, ParsedISO8601Duration)
+
+SATISIFY(TemporalDurationString, ParsedISO8601Duration)
+
+} // namespace
+
+#define IMPL_PARSE_METHOD(R, NAME) \
+ Maybe<R> TemporalParser::Parse##NAME( \
+ Isolate* isolate, Handle<String> iso_string, bool* valid) { \
+ R parsed; \
+ iso_string = String::Flatten(isolate, iso_string); \
+ { \
+ DisallowGarbageCollection no_gc; \
+ String::FlatContent str_content = iso_string->GetFlatContent(no_gc); \
+ if (str_content.IsOneByte()) { \
+ *valid = Satisfy##NAME(str_content.ToOneByteVector(), &parsed); \
+ } else { \
+ *valid = Satisfy##NAME(str_content.ToUC16Vector(), &parsed); \
+ } \
+ } \
+ return Just(parsed); \
+ }
+
+IMPL_PARSE_METHOD(ParsedISO8601Result, TemporalDateTimeString)
+IMPL_PARSE_METHOD(ParsedISO8601Result, TemporalDateString)
+IMPL_PARSE_METHOD(ParsedISO8601Result, TemporalYearMonthString)
+IMPL_PARSE_METHOD(ParsedISO8601Result, TemporalMonthDayString)
+IMPL_PARSE_METHOD(ParsedISO8601Result, TemporalTimeString)
+IMPL_PARSE_METHOD(ParsedISO8601Result, TemporalInstantString)
+IMPL_PARSE_METHOD(ParsedISO8601Result, TemporalZonedDateTimeString)
+IMPL_PARSE_METHOD(ParsedISO8601Result, TemporalTimeZoneString)
+IMPL_PARSE_METHOD(ParsedISO8601Result, TemporalRelativeToString)
+IMPL_PARSE_METHOD(ParsedISO8601Result, TemporalCalendarString)
+IMPL_PARSE_METHOD(ParsedISO8601Result, TimeZoneNumericUTCOffset)
+IMPL_PARSE_METHOD(ParsedISO8601Duration, TemporalDurationString)
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/temporal/temporal-parser.h b/deps/v8/src/temporal/temporal-parser.h
new file mode 100644
index 0000000000..d3bc43a5a7
--- /dev/null
+++ b/deps/v8/src/temporal/temporal-parser.h
@@ -0,0 +1,147 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEMPORAL_TEMPORAL_PARSER_H_
+#define V8_TEMPORAL_TEMPORAL_PARSER_H_
+
+#include "src/execution/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+/**
+ * ParsedISO8601Result contains the parsed result of ISO 8601 grammar
+ * documented in #sec-temporal-iso8601grammar
+ * for TemporalInstantString, TemporalZonedDateTimeString,
+ * TemporalCalendarString, TemporalDateString, TemporalDateTimeString,
+ * TemporalMonthDayString, TemporalRelativeToString, TemporalTimeString,
+ * TemporalTimeZoneString, and TemporalYearMonthString. For all the fields
+ * represented by int32_t, a special value kMinInt31 is used to represent the
+ * field is "undefined" after parsing.
+ */
+struct ParsedISO8601Result {
+ int32_t date_year; // DateYear production
+ int32_t date_month; // DateMonth production
+ int32_t date_day; // DateDay production
+ int32_t time_hour; // TimeHour production
+ int32_t time_minute; // TimeMinute production
+ int32_t time_second; // TimeSecond production
+ int32_t
+ time_nanosecond; // TimeFractionalPart production stored in nanosecond
+ int32_t tzuo_sign; // TimeZoneUTCOffsetSign production
+ int32_t tzuo_hour; // TimeZoneUTCOffsetHour production
+ int32_t tzuo_minute; // TimeZoneUTCOffsetMinute production
+ int32_t tzuo_second; // TimeZoneUTCOffsetSecond production
+ int32_t
+ tzuo_nanosecond; // TimeZoneUTCOffsetFractionalPart stored in nanosecond
+ bool utc_designator; // UTCDesignator is presented
+ int32_t tzi_name_start; // Starting offset of TimeZoneIANAName in the input
+ // string.
+ int32_t tzi_name_length; // Length of TimeZoneIANAName production
+ int32_t calendar_name_start; // Starting offset of CalendarName production in
+ // the input string.
+ int32_t calendar_name_length; // Length of CalendarName production.
+
+ ParsedISO8601Result()
+ : date_year(kMinInt31),
+ date_month(kMinInt31),
+ date_day(kMinInt31),
+ time_hour(kMinInt31),
+ time_minute(kMinInt31),
+ time_second(kMinInt31),
+ time_nanosecond(kMinInt31),
+ tzuo_sign(kMinInt31),
+ tzuo_hour(kMinInt31),
+ tzuo_minute(kMinInt31),
+ tzuo_second(kMinInt31),
+ tzuo_nanosecond(kMinInt31),
+ utc_designator(false),
+ tzi_name_start(0),
+ tzi_name_length(0),
+ calendar_name_start(0),
+ calendar_name_length(0) {}
+
+ bool date_year_is_undefined() const { return date_year == kMinInt31; }
+ bool date_month_is_undefined() const { return date_month == kMinInt31; }
+ bool date_day_is_undefined() const { return date_day == kMinInt31; }
+ bool time_hour_is_undefined() const { return time_hour == kMinInt31; }
+ bool time_minute_is_undefined() const { return time_minute == kMinInt31; }
+ bool time_second_is_undefined() const { return time_second == kMinInt31; }
+ bool time_nanosecond_is_undefined() const {
+ return time_nanosecond == kMinInt31;
+ }
+ bool tzuo_hour_is_undefined() const { return tzuo_hour == kMinInt31; }
+ bool tzuo_minute_is_undefined() const { return tzuo_minute == kMinInt31; }
+ bool tzuo_second_is_undefined() const { return tzuo_second == kMinInt31; }
+ bool tzuo_sign_is_undefined() const { return tzuo_sign == kMinInt31; }
+ bool tzuo_nanosecond_is_undefined() const {
+ return tzuo_nanosecond == kMinInt31;
+ }
+};
+
+/**
+ * ParsedISO8601Duration contains the parsed result of ISO 8601 grammar
+ * documented in #prod-TemporalDurationString
+ * for TemporalDurationString.
+ */
+struct ParsedISO8601Duration {
+ int64_t sign; // Sign production
+ int64_t years; // DurationYears production
+ int64_t months; // DurationMonths production
+ int64_t weeks; // DurationWeeks production
+ int64_t days; // DurationDays production
+ int64_t whole_hours; // DurationWholeHours production
+ int64_t hours_fraction; // DurationHoursFraction, in unit of 1e-9 hours
+ int64_t whole_minutes; // DurationWholeMinutes production
+ int64_t minutes_fraction; // DurationMinuteFraction, in unit of 1e-9 minutes
+ int64_t whole_seconds; // DurationWholeSeconds production
+ int64_t seconds_fraction; // DurationSecondFraction, in unit of nanosecond (
+ // 1e-9 seconds).
+
+ ParsedISO8601Duration()
+ : sign(1),
+ years(0),
+ months(0),
+ weeks(0),
+ days(0),
+ whole_hours(0),
+ hours_fraction(0),
+ whole_minutes(0),
+ minutes_fraction(0),
+ whole_seconds(0),
+ seconds_fraction(0) {}
+};
+
+/**
+ * TemporalParser is low level parsing functions to support the implementation
+ * of various ParseTemporal*String Abstract Operations listed after
+ * #sec-temporal-parsetemporalinstantstring.
+ * All the methods take an Isolate, a Handle<String> as input, and also a
+ * pointer to a bool to answer the "satisfy the syntax of a Temporal*String"
+ * question and return the parsed result.
+ */
+class V8_EXPORT_PRIVATE TemporalParser {
+ public:
+#define DEFINE_PARSE_METHOD(R, NAME) \
+ V8_WARN_UNUSED_RESULT static Maybe<R> Parse##NAME( \
+ Isolate* isolate, Handle<String> iso_string, bool* satisfy)
+ DEFINE_PARSE_METHOD(ParsedISO8601Result, TemporalDateString);
+ DEFINE_PARSE_METHOD(ParsedISO8601Result, TemporalDateTimeString);
+ DEFINE_PARSE_METHOD(ParsedISO8601Result, TemporalTimeString);
+ DEFINE_PARSE_METHOD(ParsedISO8601Result, TemporalYearMonthString);
+ DEFINE_PARSE_METHOD(ParsedISO8601Result, TemporalMonthDayString);
+ DEFINE_PARSE_METHOD(ParsedISO8601Result, TemporalInstantString);
+ DEFINE_PARSE_METHOD(ParsedISO8601Result, TemporalZonedDateTimeString);
+ DEFINE_PARSE_METHOD(ParsedISO8601Result, TemporalTimeZoneString);
+ DEFINE_PARSE_METHOD(ParsedISO8601Result, TemporalRelativeToString);
+ DEFINE_PARSE_METHOD(ParsedISO8601Result, TemporalCalendarString);
+ DEFINE_PARSE_METHOD(ParsedISO8601Duration, TemporalDurationString);
+ DEFINE_PARSE_METHOD(ParsedISO8601Result, TimeZoneNumericUTCOffset);
+};
+#undef DEFINE_PARSE_METHOD
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TEMPORAL_TEMPORAL_PARSER_H_
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index d5c99d4890..a4ccefb304 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -937,7 +937,7 @@ struct ClassFieldExpression {
NameAndTypeExpression name_and_type;
base::Optional<ClassFieldIndexInfo> index;
std::vector<ConditionalAnnotation> conditions;
- bool weak;
+ bool custom_weak_marking;
bool const_qualified;
FieldSynchronization read_synchronization;
FieldSynchronization write_synchronization;
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index 6490a30d38..63cddf6e0a 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -102,6 +102,9 @@ static const char* const ANNOTATION_IF = "@if";
static const char* const ANNOTATION_IFNOT = "@ifnot";
static const char* const ANNOTATION_GENERATE_BODY_DESCRIPTOR =
"@generateBodyDescriptor";
+static const char* const ANNOTATION_GENERATE_UNIQUE_MAP = "@generateUniqueMap";
+static const char* const ANNOTATION_GENERATE_FACTORY_FUNCTION =
+ "@generateFactoryFunction";
static const char* const ANNOTATION_EXPORT = "@export";
static const char* const ANNOTATION_DO_NOT_GENERATE_CAST = "@doNotGenerateCast";
static const char* const ANNOTATION_USE_PARENT_TYPE_CHECKER =
@@ -115,6 +118,8 @@ static const char* const ANNOTATION_CPP_RELAXED_LOAD = "@cppRelaxedLoad";
static const char* const ANNOTATION_CPP_RELEASE_STORE = "@cppReleaseStore";
// Generate C++ accessors with acquire load semantics.
static const char* const ANNOTATION_CPP_ACQUIRE_LOAD = "@cppAcquireLoad";
+// Generate BodyDescriptor using IterateCustomWeakPointers.
+static const char* const ANNOTATION_CUSTOM_WEAK_MARKING = "@customWeakMarking";
inline bool IsConstexprName(const std::string& name) {
return name.substr(0, std::strlen(CONSTEXPR_TYPE_PREFIX)) ==
@@ -147,14 +152,14 @@ enum class ClassFlag {
kIsShape = 1 << 3,
kHasSameInstanceTypeAsParent = 1 << 4,
kGenerateCppClassDefinitions = 1 << 5,
- kCustomCppClass = 1 << 6,
- kHighestInstanceTypeWithinParent = 1 << 7,
- kLowestInstanceTypeWithinParent = 1 << 8,
- kUndefinedLayout = 1 << 9,
- kGenerateBodyDescriptor = 1 << 10,
- kExport = 1 << 11,
- kDoNotGenerateCast = 1 << 12,
- kCustomMap = 1 << 13,
+ kHighestInstanceTypeWithinParent = 1 << 6,
+ kLowestInstanceTypeWithinParent = 1 << 7,
+ kUndefinedLayout = 1 << 8,
+ kGenerateBodyDescriptor = 1 << 9,
+ kExport = 1 << 10,
+ kDoNotGenerateCast = 1 << 11,
+ kGenerateUniqueMap = 1 << 12,
+ kGenerateFactoryFunction = 1 << 13,
};
using ClassFlags = base::Flags<ClassFlag>;
diff --git a/deps/v8/src/torque/earley-parser.cc b/deps/v8/src/torque/earley-parser.cc
index 7326996c70..f99424b1a6 100644
--- a/deps/v8/src/torque/earley-parser.cc
+++ b/deps/v8/src/torque/earley-parser.cc
@@ -54,7 +54,10 @@ base::Optional<ParseResult> Rule::RunAction(const Item* completed_item,
MatchedInput matched_input = completed_item->GetMatchedInput(tokens);
CurrentSourcePosition::Scope pos_scope(matched_input.pos);
ParseResultIterator iterator(std::move(results), matched_input);
- return action_(&iterator);
+ auto result = action_(&iterator);
+ // Make sure the parse action consumed all the child results.
+ CHECK(!iterator.HasNext());
+ return result;
}
Symbol& Symbol::operator=(std::initializer_list<Rule> rules) {
@@ -279,6 +282,7 @@ const Item* RunEarleyAlgorithm(
}
// static
+DISABLE_CFI_ICALL
bool Grammar::MatchChar(int (*char_class)(int), InputPosition* pos) {
if (**pos && char_class(static_cast<unsigned char>(**pos))) {
++*pos;
diff --git a/deps/v8/src/torque/earley-parser.h b/deps/v8/src/torque/earley-parser.h
index 8a94ff861b..bca3cf5fb1 100644
--- a/deps/v8/src/torque/earley-parser.h
+++ b/deps/v8/src/torque/earley-parser.h
@@ -163,10 +163,7 @@ class ParseResultIterator {
explicit ParseResultIterator(std::vector<ParseResult> results,
MatchedInput matched_input)
: results_(std::move(results)), matched_input_(matched_input) {}
- ~ParseResultIterator() {
- // Check that all parse results have been used.
- CHECK_EQ(results_.size(), i_);
- }
+
ParseResultIterator(const ParseResultIterator&) = delete;
ParseResultIterator& operator=(const ParseResultIterator&) = delete;
@@ -248,7 +245,7 @@ class Rule final {
// used in the parser.
class Symbol {
public:
- Symbol() : Symbol({}) {}
+ Symbol() = default;
Symbol(std::initializer_list<Rule> rules) { *this = rules; }
// Disallow copying and moving to ensure Symbol has a stable address.
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index ccd8274677..b8ffb2905e 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -3421,12 +3421,21 @@ void ImplementationVisitor::GenerateCatchBlock(
if (catch_block) {
base::Optional<Binding<LocalLabel>*> catch_handler =
TryLookupLabel(kCatchLabelName);
+ // Reset the local scopes to prevent the macro calls below from using the
+ // current catch handler.
+ BindingsManagersScope bindings_managers_scope;
if (assembler().CurrentBlockIsComplete()) {
assembler().Bind(*catch_block);
- assembler().Goto((*catch_handler)->block, 1);
+ GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
+ "GetAndResetPendingMessage"),
+ Arguments{{}, {}}, {}, false);
+ assembler().Goto((*catch_handler)->block, 2);
} else {
CfgAssemblerScopedTemporaryBlock temp(&assembler(), *catch_block);
- assembler().Goto((*catch_handler)->block, 1);
+ GenerateCall(QualifiedName({TORQUE_INTERNAL_NAMESPACE_STRING},
+ "GetAndResetPendingMessage"),
+ Arguments{{}, {}}, {}, false);
+ assembler().Goto((*catch_handler)->block, 2);
}
}
}
@@ -3728,7 +3737,18 @@ class FieldOffsetsGenerator {
if (auto field_as_struct = field_type->StructSupertype()) {
struct_contents = (*field_as_struct)->ClassifyContents();
}
- if (struct_contents == StructType::ClassificationFlag::kMixed) {
+ if ((struct_contents & StructType::ClassificationFlag::kStrongTagged) &&
+ (struct_contents & StructType::ClassificationFlag::kWeakTagged)) {
+ // It's okay for a struct to contain both strong and weak data. We'll just
+ // treat the whole thing as weak. This is required for DescriptorEntry.
+ struct_contents &= ~StructType::Classification(
+ StructType::ClassificationFlag::kStrongTagged);
+ }
+ bool struct_contains_tagged_fields =
+ (struct_contents & StructType::ClassificationFlag::kStrongTagged) ||
+ (struct_contents & StructType::ClassificationFlag::kWeakTagged);
+ if (struct_contains_tagged_fields &&
+ (struct_contents & StructType::ClassificationFlag::kUntagged)) {
// We can't declare what section a struct goes in if it has multiple
// categories of data within.
Error(
@@ -3736,15 +3756,13 @@ class FieldOffsetsGenerator {
"tagged and untagged data.")
.Position(f.pos);
}
- // Currently struct-valued fields are only allowed to have tagged data; see
- // TypeVisitor::VisitClassFieldsAndMethods.
- if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) ||
- struct_contents == StructType::ClassificationFlag::kTagged) {
- if (f.is_weak) {
- return FieldSectionType::kWeakSection;
- } else {
- return FieldSectionType::kStrongSection;
- }
+ if ((field_type->IsSubtypeOf(TypeOracle::GetStrongTaggedType()) ||
+ struct_contents == StructType::ClassificationFlag::kStrongTagged) &&
+ !f.custom_weak_marking) {
+ return FieldSectionType::kStrongSection;
+ } else if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType()) ||
+ struct_contains_tagged_fields) {
+ return FieldSectionType::kWeakSection;
} else {
return FieldSectionType::kScalarSection;
}
@@ -4688,8 +4706,7 @@ void ImplementationVisitor::GenerateClassDefinitions(
structs_used_in_classes.insert(*field_as_struct);
}
}
- if (type->ShouldExport() && !type->IsAbstract() &&
- !type->HasCustomMap()) {
+ if (type->ShouldGenerateFactoryFunction()) {
std::string return_type = type->HandlifiedCppTypeName();
std::string function_name = "New" + type->name();
std::stringstream parameters;
diff --git a/deps/v8/src/torque/instance-type-generator.cc b/deps/v8/src/torque/instance-type-generator.cc
index 922192f15b..b4ae333495 100644
--- a/deps/v8/src/torque/instance-type-generator.cc
+++ b/deps/v8/src/torque/instance-type-generator.cc
@@ -460,20 +460,23 @@ void ImplementationVisitor::GenerateInstanceTypes(
std::string instance_type_name =
CapifyStringWithUnderscores(type->name()) + "_TYPE";
- if (type->IsExtern()) continue;
- torque_defined_class_list << " V(" << upper_case_name << ") \\\n";
-
- if (type->IsAbstract() || type->HasCustomMap()) continue;
- torque_defined_map_csa_list << " V(_, " << upper_case_name << "Map, "
- << lower_case_name << "_map, "
- << upper_case_name << ") \\\n";
- torque_defined_map_root_list << " V(Map, " << lower_case_name << "_map, "
- << upper_case_name << "Map) \\\n";
- std::stringstream& list = type->HasStaticSize()
- ? torque_defined_fixed_instance_type_list
- : torque_defined_varsize_instance_type_list;
- list << " V(" << instance_type_name << ", " << upper_case_name << ", "
- << lower_case_name << ") \\\n";
+ if (!type->IsExtern()) {
+ torque_defined_class_list << " V(" << upper_case_name << ") \\\n";
+ }
+
+ if (type->ShouldGenerateUniqueMap()) {
+ torque_defined_map_csa_list << " V(_, " << upper_case_name << "Map, "
+ << lower_case_name << "_map, "
+ << upper_case_name << ") \\\n";
+ torque_defined_map_root_list << " V(Map, " << lower_case_name
+ << "_map, " << upper_case_name
+ << "Map) \\\n";
+ std::stringstream& list =
+ type->HasStaticSize() ? torque_defined_fixed_instance_type_list
+ : torque_defined_varsize_instance_type_list;
+ list << " V(" << instance_type_name << ", " << upper_case_name << ", "
+ << lower_case_name << ") \\\n";
+ }
}
header << "// Fully Torque-defined classes (both internal and exported).\n";
diff --git a/deps/v8/src/torque/source-positions.h b/deps/v8/src/torque/source-positions.h
index 2468d963a6..32f60e06e0 100644
--- a/deps/v8/src/torque/source-positions.h
+++ b/deps/v8/src/torque/source-positions.h
@@ -109,7 +109,8 @@ inline std::string PositionAsString(SourcePosition pos) {
}
inline std::ostream& operator<<(std::ostream& out, SourcePosition pos) {
- return out << SourceFileMap::PathFromV8Root(pos.source)
+ return out << "https://source.chromium.org/chromium/chromium/src/+/main:v8/"
+ << SourceFileMap::PathFromV8Root(pos.source)
<< "?l=" << (pos.start.line + 1)
<< "&c=" << (pos.start.column + 1);
}
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 5f83a62abc..7578fe7fff 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -55,6 +55,11 @@ class BuildFlags : public ContextualClass<BuildFlags> {
#else
build_flags_["V8_SCRIPTORMODULE_LEGACY_LIFETIME"] = false;
#endif
+#ifdef V8_ENABLE_WEBASSEMBLY
+ build_flags_["V8_ENABLE_WEBASSEMBLY"] = true;
+#else
+ build_flags_["V8_ENABLE_WEBASSEMBLY"] = false;
+#endif
}
static bool GetFlag(const std::string& name, const char* production) {
auto it = Get().build_flags_.find(name);
@@ -899,6 +904,7 @@ base::Optional<ParseResult> MakeClassDeclaration(
ANNOTATION_DO_NOT_GENERATE_CPP_CLASS, ANNOTATION_CUSTOM_CPP_CLASS,
ANNOTATION_CUSTOM_MAP, ANNOTATION_GENERATE_BODY_DESCRIPTOR,
ANNOTATION_EXPORT, ANNOTATION_DO_NOT_GENERATE_CAST,
+ ANNOTATION_GENERATE_UNIQUE_MAP, ANNOTATION_GENERATE_FACTORY_FUNCTION,
ANNOTATION_HIGHEST_INSTANCE_TYPE_WITHIN_PARENT,
ANNOTATION_LOWEST_INSTANCE_TYPE_WITHIN_PARENT},
{ANNOTATION_RESERVE_BITS_IN_INSTANCE_TYPE,
@@ -913,10 +919,16 @@ base::Optional<ParseResult> MakeClassDeclaration(
bool do_not_generate_cpp_class =
annotations.Contains(ANNOTATION_DO_NOT_GENERATE_CPP_CLASS);
if (annotations.Contains(ANNOTATION_CUSTOM_CPP_CLASS)) {
- flags |= ClassFlag::kCustomCppClass;
+ Error(
+ "@customCppClass is deprecated. Use 'extern' instead. "
+ "@generateBodyDescriptor, @generateUniqueMap, and "
+ "@generateFactoryFunction accomplish most of what '@export "
+ "@customCppClass' used to.");
}
if (annotations.Contains(ANNOTATION_CUSTOM_MAP)) {
- flags |= ClassFlag::kCustomMap;
+ Error(
+ "@customMap is deprecated. Generating a unique map is opt-in now using "
+ "@generateUniqueMap.");
}
if (annotations.Contains(ANNOTATION_DO_NOT_GENERATE_CAST)) {
flags |= ClassFlag::kDoNotGenerateCast;
@@ -924,6 +936,12 @@ base::Optional<ParseResult> MakeClassDeclaration(
if (annotations.Contains(ANNOTATION_GENERATE_BODY_DESCRIPTOR)) {
flags |= ClassFlag::kGenerateBodyDescriptor;
}
+ if (annotations.Contains(ANNOTATION_GENERATE_UNIQUE_MAP)) {
+ flags |= ClassFlag::kGenerateUniqueMap;
+ }
+ if (annotations.Contains(ANNOTATION_GENERATE_FACTORY_FUNCTION)) {
+ flags |= ClassFlag::kGenerateFactoryFunction;
+ }
if (annotations.Contains(ANNOTATION_EXPORT)) {
flags |= ClassFlag::kExport;
}
@@ -1722,16 +1740,32 @@ base::Optional<ParseResult> MakeLabelBlock(ParseResultIterator* child_results) {
}
base::Optional<ParseResult> MakeCatchBlock(ParseResultIterator* child_results) {
- auto variable = child_results->NextAs<std::string>();
+ auto parameter_names = child_results->NextAs<std::vector<std::string>>();
auto body = child_results->NextAs<Statement*>();
- if (!IsLowerCamelCase(variable)) {
- NamingConventionError("Exception", variable, "lowerCamelCase");
+ for (const std::string& variable : parameter_names) {
+ if (!IsLowerCamelCase(variable)) {
+ NamingConventionError("Exception", variable, "lowerCamelCase");
+ }
+ }
+ if (parameter_names.size() != 2) {
+ ReportError(
+ "A catch clause needs to have exactly two parameters: The exception "
+ "and the message. How about: \"catch (exception, message) { ...\".");
}
ParameterList parameters;
- parameters.names.push_back(MakeNode<Identifier>(variable));
+
+ parameters.names.push_back(MakeNode<Identifier>(parameter_names[0]));
parameters.types.push_back(MakeNode<BasicTypeExpression>(
std::vector<std::string>{}, MakeNode<Identifier>("JSAny"),
std::vector<TypeExpression*>{}));
+ parameters.names.push_back(MakeNode<Identifier>(parameter_names[1]));
+ parameters.types.push_back(MakeNode<UnionTypeExpression>(
+ MakeNode<BasicTypeExpression>(std::vector<std::string>{},
+ MakeNode<Identifier>("JSMessageObject"),
+ std::vector<TypeExpression*>{}),
+ MakeNode<BasicTypeExpression>(std::vector<std::string>{},
+ MakeNode<Identifier>("TheHole"),
+ std::vector<TypeExpression*>{})));
parameters.has_varargs = false;
TryHandler* result = MakeNode<TryHandler>(
TryHandler::HandlerKind::kCatch, MakeNode<Identifier>(kCatchLabelName),
@@ -1974,7 +2008,8 @@ base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
AnnotationSet annotations(
child_results,
{ANNOTATION_CPP_RELAXED_STORE, ANNOTATION_CPP_RELAXED_LOAD,
- ANNOTATION_CPP_RELEASE_STORE, ANNOTATION_CPP_ACQUIRE_LOAD},
+ ANNOTATION_CPP_RELEASE_STORE, ANNOTATION_CPP_ACQUIRE_LOAD,
+ ANNOTATION_CUSTOM_WEAK_MARKING},
{ANNOTATION_IF, ANNOTATION_IFNOT});
FieldSynchronization write_synchronization = FieldSynchronization::kNone;
if (annotations.Contains(ANNOTATION_CPP_RELEASE_STORE)) {
@@ -2000,7 +2035,16 @@ base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
conditions.push_back(
{*ifnot_condition, ConditionalAnnotationType::kNegative});
}
- auto weak = child_results->NextAs<bool>();
+ bool custom_weak_marking =
+ annotations.Contains(ANNOTATION_CUSTOM_WEAK_MARKING);
+ auto deprecated_weak = child_results->NextAs<bool>();
+ if (deprecated_weak) {
+ Error(
+ "The keyword 'weak' is deprecated. For a field that can contain a "
+ "normal weak pointer, use type Weak<T>. For a field that should be "
+ "marked in some custom way, use @customWeakMarking.");
+ custom_weak_marking = true;
+ }
auto const_qualified = child_results->NextAs<bool>();
auto name = child_results->NextAs<Identifier*>();
auto optional = child_results->NextAs<bool>();
@@ -2025,7 +2069,7 @@ base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
return ParseResult{ClassFieldExpression{{name, type},
index_info,
std::move(conditions),
- weak,
+ custom_weak_marking,
const_qualified,
read_synchronization,
write_synchronization}};
@@ -2156,6 +2200,49 @@ struct TorqueGrammar : Grammar {
return true;
}
+ template <class T, bool first>
+ static base::Optional<ParseResult> MakeExtendedVectorIfAnnotation(
+ ParseResultIterator* child_results) {
+ std::vector<T> l = {};
+ if (!first) l = child_results->NextAs<std::vector<T>>();
+ AnnotationSet annotations(child_results, {},
+ {ANNOTATION_IF, ANNOTATION_IFNOT});
+ bool skipped = false;
+ if (base::Optional<std::string> condition =
+ annotations.GetStringParam(ANNOTATION_IF)) {
+ if (!BuildFlags::GetFlag(*condition, ANNOTATION_IF)) skipped = true;
+ }
+ if (base::Optional<std::string> condition =
+ annotations.GetStringParam(ANNOTATION_IFNOT)) {
+ if (BuildFlags::GetFlag(*condition, ANNOTATION_IFNOT)) skipped = true;
+ }
+ T x = child_results->NextAs<T>();
+
+ if (skipped) return ParseResult{std::move(l)};
+ l.push_back(std::move(x));
+ return ParseResult{std::move(l)};
+ }
+
+ template <class T>
+ Symbol* NonemptyListAllowIfAnnotation(
+ Symbol* element, base::Optional<Symbol*> separator = {}) {
+ Symbol* list = NewSymbol();
+ *list = {
+ Rule({annotations, element}, MakeExtendedVectorIfAnnotation<T, true>),
+ separator ? Rule({list, annotations, *separator, element},
+ MakeExtendedVectorIfAnnotation<T, false>)
+ : Rule({list, annotations, element},
+ MakeExtendedVectorIfAnnotation<T, false>)};
+ return list;
+ }
+
+ template <class T>
+ Symbol* ListAllowIfAnnotation(Symbol* element,
+ base::Optional<Symbol*> separator = {}) {
+ return TryOrDefault<std::vector<T>>(
+ NonemptyListAllowIfAnnotation<T>(element, separator));
+ }
+
TorqueGrammar() : Grammar(&file) { SetWhitespace(MatchWhitespace); }
// Result: Expression*
@@ -2492,16 +2579,18 @@ struct TorqueGrammar : Grammar {
MakeAssignmentExpression)};
// Result: Statement*
- Symbol block = {Rule({CheckIf(Token("deferred")), Token("{"),
- List<Statement*>(&statement), Token("}")},
- MakeBlockStatement)};
+ Symbol block = {
+ Rule({CheckIf(Token("deferred")), Token("{"),
+ ListAllowIfAnnotation<Statement*>(&statement), Token("}")},
+ MakeBlockStatement)};
// Result: TryHandler*
Symbol tryHandler = {
Rule({Token("label"), &name,
TryOrDefault<ParameterList>(&parameterListNoVararg), &block},
MakeLabelBlock),
- Rule({Token("catch"), Token("("), &identifier, Token(")"), &block},
+ Rule({Token("catch"), Token("("),
+ List<std::string>(&identifier, Token(",")), Token(")"), &block},
MakeCatchBlock)};
// Result: ExpressionWithSource
@@ -2554,7 +2643,7 @@ struct TorqueGrammar : Grammar {
expression,
Token(")"),
Token("{"),
- NonemptyList<TypeswitchCase>(&typeswitchCase),
+ NonemptyListAllowIfAnnotation<TypeswitchCase>(&typeswitchCase),
Token("}"),
},
MakeTypeswitchStatement),
@@ -2614,11 +2703,13 @@ struct TorqueGrammar : Grammar {
MakeClassDeclaration),
Rule({annotations, Token("struct"), &name,
TryOrDefault<GenericParameters>(&genericParameters), Token("{"),
- List<Declaration*>(&method),
- List<StructFieldExpression>(&structField), Token("}")},
+ ListAllowIfAnnotation<Declaration*>(&method),
+ ListAllowIfAnnotation<StructFieldExpression>(&structField),
+ Token("}")},
AsSingletonVector<Declaration*, MakeStructDeclaration>()),
Rule({Token("bitfield"), Token("struct"), &name, Token("extends"), &type,
- Token("{"), List<BitFieldDeclaration>(&bitFieldDeclaration),
+ Token("{"),
+ ListAllowIfAnnotation<BitFieldDeclaration>(&bitFieldDeclaration),
Token("}")},
AsSingletonVector<Declaration*, MakeBitFieldStructDeclaration>()),
Rule({annotations, CheckIf(Token("transient")), Token("type"), &name,
@@ -2675,7 +2766,8 @@ struct TorqueGrammar : Grammar {
Optional<TypeExpression*>(Sequence({Token("extends"), &type})),
Optional<std::string>(
Sequence({Token("constexpr"), &externalString})),
- Token("{"), NonemptyList<EnumEntry>(&enumEntry, Token(",")),
+ Token("{"),
+ NonemptyListAllowIfAnnotation<EnumEntry>(&enumEntry, Token(",")),
CheckIf(Sequence({Token(","), Token("...")})), Token("}")},
MakeEnumDeclaration),
Rule({Token("namespace"), &identifier, Token("{"), &declarationList,
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
index d7b107dbe3..aaae9e559c 100644
--- a/deps/v8/src/torque/type-visitor.cc
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -287,15 +287,23 @@ const ClassType* TypeVisitor::ComputeType(
Error("Class \"", decl->name->value,
"\" requires a layout but doesn't have one");
}
- if (flags & ClassFlag::kCustomCppClass) {
- if (!(flags & ClassFlag::kExport)) {
- Error("Only exported classes can have a custom C++ class.");
+ if (flags & ClassFlag::kGenerateUniqueMap) {
+ if (!(flags & ClassFlag::kExtern)) {
+ Error("No need to specify ", ANNOTATION_GENERATE_UNIQUE_MAP,
+ ", non-extern classes always have a unique map.");
}
- if (flags & ClassFlag::kExtern) {
- Error("No need to specify ", ANNOTATION_CUSTOM_CPP_CLASS,
- ", extern classes always have a custom C++ class.");
+ if (flags & ClassFlag::kAbstract) {
+ Error(ANNOTATION_ABSTRACT, " and ", ANNOTATION_GENERATE_UNIQUE_MAP,
+ " shouldn't be used together, because abstract classes are never "
+ "instantiated.");
}
}
+ if ((flags & ClassFlag::kGenerateFactoryFunction) &&
+ (flags & ClassFlag::kAbstract)) {
+ Error(ANNOTATION_ABSTRACT, " and ", ANNOTATION_GENERATE_FACTORY_FUNCTION,
+ " shouldn't be used together, because abstract classes are never "
+ "instantiated.");
+ }
if (flags & ClassFlag::kExtern) {
if (decl->generates) {
bool enforce_tnode_type = true;
@@ -427,8 +435,8 @@ void TypeVisitor::VisitClassFieldsAndMethods(
"found type ",
*field_type);
}
- if (field_expression.weak) {
- ReportError("in-object properties cannot be weak");
+ if (field_expression.custom_weak_marking) {
+ ReportError("in-object properties cannot use @customWeakMarking");
}
}
base::Optional<ClassFieldIndexInfo> array_length = field_expression.index;
@@ -438,7 +446,7 @@ void TypeVisitor::VisitClassFieldsAndMethods(
array_length,
{field_expression.name_and_type.name->value, field_type},
class_offset.SingleValue(),
- field_expression.weak,
+ field_expression.custom_weak_marking,
field_expression.const_qualified,
field_expression.read_synchronization,
field_expression.write_synchronization});
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index 43bdef1203..c69986e407 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -432,8 +432,10 @@ StructType::Classification StructType::ClassifyContents() const {
Classification result = ClassificationFlag::kEmpty;
for (const Field& struct_field : fields()) {
const Type* field_type = struct_field.name_and_type.type;
- if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- result |= ClassificationFlag::kTagged;
+ if (field_type->IsSubtypeOf(TypeOracle::GetStrongTaggedType())) {
+ result |= ClassificationFlag::kStrongTagged;
+ } else if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ result |= ClassificationFlag::kWeakTagged;
} else if (auto field_as_struct = field_type->StructSupertype()) {
result |= (*field_as_struct)->ClassifyContents();
} else {
@@ -618,13 +620,13 @@ void ComputeSlotKindsHelper(std::vector<ObjectSlotKind>* slots,
} else {
ObjectSlotKind kind;
if (type->IsSubtypeOf(TypeOracle::GetObjectType())) {
- if (field.is_weak) {
+ if (field.custom_weak_marking) {
kind = ObjectSlotKind::kCustomWeakPointer;
} else {
kind = ObjectSlotKind::kStrongPointer;
}
} else if (type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- DCHECK(!field.is_weak);
+ DCHECK(!field.custom_weak_marking);
kind = ObjectSlotKind::kMaybeObjectPointer;
} else {
kind = ObjectSlotKind::kNoPointer;
@@ -985,8 +987,8 @@ std::ostream& operator<<(std::ostream& os, const NameAndType& name_and_type) {
std::ostream& operator<<(std::ostream& os, const Field& field) {
os << field.name_and_type;
- if (field.is_weak) {
- os << " (weak)";
+ if (field.custom_weak_marking) {
+ os << " (custom weak)";
}
return os;
}
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index d14dfaf7b2..45b7390f2b 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -226,7 +226,7 @@ struct Field {
// because we don't support the struct field for on-heap layouts.
base::Optional<size_t> offset;
- bool is_weak;
+ bool custom_weak_marking;
bool const_qualified;
FieldSynchronization read_synchronization;
FieldSynchronization write_synchronization;
@@ -618,9 +618,9 @@ class StructType final : public AggregateType {
enum class ClassificationFlag {
kEmpty = 0,
- kTagged = 1 << 0,
- kUntagged = 1 << 1,
- kMixed = kTagged | kUntagged,
+ kStrongTagged = 1 << 0,
+ kWeakTagged = 1 << 1,
+ kUntagged = 1 << 2,
};
using Classification = base::Flags<ClassificationFlag>;
@@ -691,11 +691,15 @@ class ClassType final : public AggregateType {
bool ShouldGenerateCppClassDefinitions() const {
return (flags_ & ClassFlag::kGenerateCppClassDefinitions) || !IsExtern();
}
- bool ShouldGenerateFullClassDefinition() const {
- return !IsExtern() && !(flags_ & ClassFlag::kCustomCppClass);
+ bool ShouldGenerateFullClassDefinition() const { return !IsExtern(); }
+ bool ShouldGenerateUniqueMap() const {
+ return (flags_ & ClassFlag::kGenerateUniqueMap) ||
+ (!IsExtern() && !IsAbstract());
+ }
+ bool ShouldGenerateFactoryFunction() const {
+ return (flags_ & ClassFlag::kGenerateFactoryFunction) ||
+ (ShouldExport() && !IsAbstract());
}
- // Class with multiple or non-standard maps, do not auto-generate map.
- bool HasCustomMap() const { return flags_ & ClassFlag::kCustomMap; }
bool ShouldExport() const { return flags_ & ClassFlag::kExport; }
bool IsShape() const { return flags_ & ClassFlag::kIsShape; }
bool HasStaticSize() const;
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index d83f88ebb9..79ddf56653 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -27,7 +27,7 @@ namespace trap_handler {
#define V8_TRAP_HANDLER_SUPPORTED true
// Arm64 simulator on x64 on Linux, Mac, or Windows.
#elif V8_TARGET_ARCH_ARM64 && V8_HOST_ARCH_X64 && \
- (V8_OS_LINUX || V8_OS_MACOSX)
+ (V8_OS_LINUX || V8_OS_MACOSX || V8_OS_WIN)
#define V8_TRAP_HANDLER_VIA_SIMULATOR
#define V8_TRAP_HANDLER_SUPPORTED true
// Everything else is unsupported.
diff --git a/deps/v8/src/utils/allocation.cc b/deps/v8/src/utils/allocation.cc
index f7bf9af2fb..569c67fd25 100644
--- a/deps/v8/src/utils/allocation.cc
+++ b/deps/v8/src/utils/allocation.cc
@@ -14,7 +14,9 @@
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/base/sanitizer/lsan-page-allocator.h"
+#include "src/base/sanitizer/lsan-virtual-address-space.h"
#include "src/base/vector.h"
+#include "src/base/virtual-address-space.h"
#include "src/flags/flags.h"
#include "src/init/v8.h"
#include "src/security/vm-cage.h"
@@ -84,6 +86,16 @@ v8::PageAllocator* GetPlatformPageAllocator() {
return GetPageAllocatorInitializer()->page_allocator();
}
+v8::VirtualAddressSpace* GetPlatformVirtualAddressSpace() {
+#if defined(LEAK_SANITIZER)
+ static base::LeakyObject<base::LsanVirtualAddressSpace> vas(
+ std::make_unique<base::VirtualAddressSpace>());
+#else
+ static base::LeakyObject<base::VirtualAddressSpace> vas;
+#endif
+ return vas.get();
+}
+
#ifdef V8_VIRTUAL_MEMORY_CAGE
v8::PageAllocator* GetVirtualMemoryCagePageAllocator() {
// TODO(chromium:1218005) remove this code once the cage is no longer
@@ -189,7 +201,7 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
DCHECK_EQ(hint, AlignedAddress(hint, alignment));
DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
if (FLAG_randomize_all_allocations) {
- hint = page_allocator->GetRandomMmapAddr();
+ hint = AlignedAddress(page_allocator->GetRandomMmapAddr(), alignment);
}
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
@@ -424,10 +436,11 @@ bool VirtualMemoryCage::InitReservation(
// The reservation could still be somewhere else but we can accept it
// if it has the required alignment.
- Address address = VirtualMemoryCageStart(reservation.address(), params);
- if (reservation.address() == address) {
+ Address start_address =
+ VirtualMemoryCageStart(reservation.address(), params);
+ if (reservation.address() == start_address) {
reservation_ = std::move(reservation);
- base_ = address + params.base_bias_size;
+ base_ = start_address + params.base_bias_size;
CHECK_EQ(reservation_.size(), params.reservation_size);
break;
}
diff --git a/deps/v8/src/utils/allocation.h b/deps/v8/src/utils/allocation.h
index 7127b8efe8..623214db7b 100644
--- a/deps/v8/src/utils/allocation.h
+++ b/deps/v8/src/utils/allocation.h
@@ -102,6 +102,10 @@ V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
+// Returns platfrom virtual memory space instance. Guaranteed to be a valid
+// pointer.
+V8_EXPORT_PRIVATE v8::VirtualAddressSpace* GetPlatformVirtualAddressSpace();
+
#ifdef V8_VIRTUAL_MEMORY_CAGE
// Returns the virtual memory cage page allocator instance for allocating pages
// inside the virtual memory cage. Guaranteed to be a valid pointer.
diff --git a/deps/v8/src/utils/identity-map.h b/deps/v8/src/utils/identity-map.h
index b5db881f3f..cb002acbb9 100644
--- a/deps/v8/src/utils/identity-map.h
+++ b/deps/v8/src/utils/identity-map.h
@@ -174,6 +174,7 @@ class IdentityMap : public IdentityMapBase {
V* operator*() { return entry(); }
V* operator->() { return entry(); }
bool operator!=(const Iterator& other) { return index_ != other.index_; }
+ bool operator==(const Iterator& other) { return index_ == other.index_; }
private:
Iterator(IdentityMap* map, int index) : map_(map), index_(index) {}
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 9b113ff886..19e4833ebc 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -442,7 +442,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
int LiftoffAssembler::PrepareStackFrame() {
if (!CpuFeatures::IsSupported(ARMv7)) {
- bailout(kUnsupportedArchitecture, "Liftoff needs ARMv7");
+ bailout(kUnsupportedArchitecture, "Armv6 not supported");
return 0;
}
uint32_t offset = static_cast<uint32_t>(pc_offset());
@@ -584,7 +584,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
TurboAssembler::Move(reg.gp(), Operand(value.to_i32(), rmode));
break;
case kI64: {
- DCHECK(RelocInfo::IsNone(rmode));
+ DCHECK(RelocInfo::IsNoInfo(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
TurboAssembler::Move(reg.low_gp(), Operand(low_word));
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 87592d6564..f976e76c6d 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -327,7 +327,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
TurboAssembler::Move(reg.gp(), Immediate(value.to_i32(), rmode));
break;
case kI64: {
- DCHECK(RelocInfo::IsNone(rmode));
+ DCHECK(RelocInfo::IsNoInfo(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
TurboAssembler::Move(reg.low_gp(), Immediate(low_word));
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index f834f62e69..a1a08d9a29 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -694,7 +694,7 @@ class LiftoffAssembler : public TurboAssembler {
inline static bool NeedsAlignment(ValueKind kind);
inline void LoadConstant(LiftoffRegister, WasmValue,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NO_INFO);
inline void LoadInstanceFromFrame(Register dst);
inline void LoadFromInstance(Register dst, Register instance, int offset,
int size);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 5df90b7ffc..089996884d 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -316,13 +316,6 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
return;
#endif
-#if V8_TARGET_ARCH_ARM
- // Allow bailout for missing ARMv7 support.
- if (!CpuFeatures::IsSupported(ARMv7) && reason == kUnsupportedArchitecture) {
- return;
- }
-#endif
-
#define LIST_FEATURE(name, ...) kFeature_##name,
constexpr WasmFeatures kExperimentalFeatures{
FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(LIST_FEATURE)};
@@ -728,14 +721,16 @@ class LiftoffCompiler {
}
void TierupCheck(FullDecoder* decoder, WasmCodePosition position,
- int budget_used) {
+ int budget_used, Register scratch_reg) {
if (for_debugging_ != kNoDebugging) return;
CODE_COMMENT("tierup check");
// We never want to blow the entire budget at once.
const int kMax = FLAG_wasm_tiering_budget / 4;
if (budget_used > kMax) budget_used = kMax;
- LiftoffRegister budget_reg = __ GetUnusedRegister(kGpReg, {});
+ LiftoffRegister budget_reg = scratch_reg == no_reg
+ ? __ GetUnusedRegister(kGpReg, {})
+ : LiftoffRegister(scratch_reg);
__ Fill(budget_reg, liftoff::kTierupBudgetOffset, ValueKind::kI32);
LiftoffRegList regs_to_save = __ cache_state()->used_registers;
// The cached instance will be reloaded separately.
@@ -778,11 +773,6 @@ class LiftoffCompiler {
return false;
}
- void TierUpFunction(FullDecoder* decoder) {
- __ CallRuntimeStub(WasmCode::kWasmTriggerTierUp);
- DefineSafepoint();
- }
-
void TraceFunctionEntry(FullDecoder* decoder) {
CODE_COMMENT("trace function entry");
__ SpillAllRegisters();
@@ -792,6 +782,13 @@ class LiftoffCompiler {
DefineSafepoint();
}
+ bool dynamic_tiering() {
+ return env_->dynamic_tiering == DynamicTiering::kEnabled &&
+ for_debugging_ == kNoDebugging &&
+ (FLAG_wasm_tier_up_filter == -1 ||
+ FLAG_wasm_tier_up_filter == func_index_);
+ }
+
void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder, __ local_kind(i), "param")) return;
@@ -841,11 +838,11 @@ class LiftoffCompiler {
} else {
__ Spill(liftoff::kFeedbackVectorOffset, WasmValue::ForUintPtr(0));
}
- if (FLAG_new_wasm_dynamic_tiering) {
+ if (dynamic_tiering()) {
LiftoffRegList pinned = parameter_registers;
LiftoffRegister tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LOAD_INSTANCE_FIELD(tmp.gp(), NumLiftoffFunctionCallsArray,
- kSystemPointerSize, pinned);
+ LOAD_INSTANCE_FIELD(tmp.gp(), TieringBudgetArray, kSystemPointerSize,
+ pinned);
uint32_t offset =
kInt32Size * declared_function_index(env_->module, func_index_);
__ Load(tmp, tmp.gp(), no_reg, offset, LoadType::kI32Load, pinned);
@@ -911,49 +908,6 @@ class LiftoffCompiler {
// is never a position of any instruction in the function.
StackCheck(decoder, 0);
- if (env_->dynamic_tiering == DynamicTiering::kEnabled &&
- for_debugging_ == kNoDebugging) {
- // TODO(arobin): Avoid spilling registers unconditionally.
- __ SpillAllRegisters();
- CODE_COMMENT("dynamic tiering");
- LiftoffRegList pinned;
-
- // Get the number of calls array address.
- LiftoffRegister array_address =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LOAD_INSTANCE_FIELD(array_address.gp(), NumLiftoffFunctionCallsArray,
- kSystemPointerSize, pinned);
-
- // Compute the correct offset in the array.
- uint32_t offset =
- kInt32Size * declared_function_index(env_->module, func_index_);
-
- // Get the number of calls and update it.
- LiftoffRegister old_number_of_calls =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister new_number_of_calls =
- pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ Load(old_number_of_calls, array_address.gp(), no_reg, offset,
- LoadType::kI32Load, pinned);
- __ emit_i32_addi(new_number_of_calls.gp(), old_number_of_calls.gp(), 1);
- __ Store(array_address.gp(), no_reg, offset, new_number_of_calls,
- StoreType::kI32Store, pinned);
-
- // Emit the runtime call if necessary.
- Label no_tierup;
- // Check if the number of calls is a power of 2.
- __ emit_i32_and(old_number_of_calls.gp(), old_number_of_calls.gp(),
- new_number_of_calls.gp());
- __ emit_cond_jump(kNotEqualZero, &no_tierup, kI32,
- old_number_of_calls.gp());
- TierUpFunction(decoder);
- // After the runtime call, the instance cache register is clobbered (we
- // reset it already in {SpillAllRegisters} above, but then we still access
- // the instance afterwards).
- __ cache_state()->ClearCachedInstanceRegister();
- __ bind(&no_tierup);
- }
-
if (FLAG_trace_wasm) TraceFunctionEntry(decoder);
}
@@ -1222,7 +1176,7 @@ class LiftoffCompiler {
PushControl(loop);
- if (!FLAG_new_wasm_dynamic_tiering) {
+ if (!dynamic_tiering()) {
// When the budget-based tiering mechanism is enabled, use that to
// check for interrupt requests; otherwise execute a stack check in the
// loop header.
@@ -1784,7 +1738,10 @@ class LiftoffCompiler {
__ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
nullptr);
});
- case kExprRefIsNull: {
+ case kExprRefIsNull:
+ // We abuse ref.as_non_null, which isn't otherwise used in this switch, as
+ // a sentinel for the negation of ref.is_null.
+ case kExprRefAsNonNull: {
LiftoffRegList pinned;
LiftoffRegister ref = pinned.set(__ PopToRegister());
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
@@ -1792,7 +1749,8 @@ class LiftoffCompiler {
// Prefer to overwrite one of the input registers with the result
// of the comparison.
LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {});
- __ emit_ptrsize_set_cond(kEqual, dst.gp(), ref, null);
+ __ emit_ptrsize_set_cond(opcode == kExprRefIsNull ? kEqual : kUnequal,
+ dst.gp(), ref, null);
__ PushRegister(kI32, dst);
return;
}
@@ -2253,8 +2211,12 @@ class LiftoffCompiler {
ValueKind return_kind = decoder->sig_->GetReturn(0).kind();
LiftoffRegister return_reg =
__ LoadToRegister(__ cache_state()->stack_state.back(), pinned);
- __ Store(info.gp(), no_reg, 0, return_reg,
- StoreType::ForValueKind(return_kind), pinned);
+ if (is_reference(return_kind)) {
+ __ StoreTaggedPointer(info.gp(), no_reg, 0, return_reg, pinned);
+ } else {
+ __ Store(info.gp(), no_reg, 0, return_reg,
+ StoreType::ForValueKind(return_kind), pinned);
+ }
}
// Put the parameter in its place.
WasmTraceExitDescriptor descriptor;
@@ -2274,13 +2236,13 @@ class LiftoffCompiler {
}
void TierupCheckOnExit(FullDecoder* decoder) {
- if (!FLAG_new_wasm_dynamic_tiering) return;
- TierupCheck(decoder, decoder->position(), __ pc_offset());
+ if (!dynamic_tiering()) return;
+ TierupCheck(decoder, decoder->position(), __ pc_offset(), no_reg);
LiftoffRegList pinned;
LiftoffRegister budget = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister array = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LOAD_INSTANCE_FIELD(array.gp(), NumLiftoffFunctionCallsArray,
- kSystemPointerSize, pinned);
+ LOAD_INSTANCE_FIELD(array.gp(), TieringBudgetArray, kSystemPointerSize,
+ pinned);
uint32_t offset =
kInt32Size * declared_function_index(env_->module, func_index_);
__ Fill(budget, liftoff::kTierupBudgetOffset, ValueKind::kI32);
@@ -2620,17 +2582,12 @@ class LiftoffCompiler {
__ PushRegister(kind, dst);
}
- void BrImpl(FullDecoder* decoder, Control* target) {
- if (!target->br_merge()->reached) {
- target->label_state.InitMerge(
- *__ cache_state(), __ num_locals(), target->br_merge()->arity,
- target->stack_depth + target->num_exceptions);
- }
- if (FLAG_new_wasm_dynamic_tiering) {
+ void BrImpl(FullDecoder* decoder, Control* target, Register scratch_reg) {
+ if (dynamic_tiering()) {
if (target->is_loop()) {
DCHECK(target->label.get()->is_bound());
int jump_distance = __ pc_offset() - target->label.get()->pos();
- TierupCheck(decoder, decoder->position(), jump_distance);
+ TierupCheck(decoder, decoder->position(), jump_distance, scratch_reg);
} else {
// To estimate time spent in this function more accurately, we could
// increment the tiering budget on forward jumps. However, we don't
@@ -2638,6 +2595,11 @@ class LiftoffCompiler {
// and found to not make a difference.
}
}
+ if (!target->br_merge()->reached) {
+ target->label_state.InitMerge(
+ *__ cache_state(), __ num_locals(), target->br_merge()->arity,
+ target->stack_depth + target->num_exceptions);
+ }
__ MergeStackWith(target->label_state, target->br_merge()->arity,
target->is_loop() ? LiftoffAssembler::kBackwardJump
: LiftoffAssembler::kForwardJump);
@@ -2646,10 +2608,14 @@ class LiftoffCompiler {
void BrOrRet(FullDecoder* decoder, uint32_t depth,
uint32_t /* drop_values */) {
+ BrOrRetImpl(decoder, depth, no_reg);
+ }
+
+ void BrOrRetImpl(FullDecoder* decoder, uint32_t depth, Register scratch_reg) {
if (depth == decoder->control_depth() - 1) {
DoReturn(decoder, 0);
} else {
- BrImpl(decoder, decoder->control_at(depth));
+ BrImpl(decoder, decoder->control_at(depth), scratch_reg);
}
}
@@ -2662,12 +2628,16 @@ class LiftoffCompiler {
decoder->control_at(depth)->br_merge()->arity);
}
+ Register scratch_reg = no_reg;
+ if (dynamic_tiering()) {
+ scratch_reg = __ GetUnusedRegister(kGpReg, {}).gp();
+ }
Label cont_false;
// Test the condition on the value stack, jump to {cont_false} if zero.
JumpIfFalse(decoder, &cont_false);
- BrOrRet(decoder, depth, 0);
+ BrOrRetImpl(decoder, depth, scratch_reg);
__ bind(&cont_false);
}
@@ -3391,7 +3361,9 @@ class LiftoffCompiler {
CallRef(decoder, func_ref.type, sig, kTailCall);
}
- void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
+ void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth,
+ bool pass_null_along_branch,
+ Value* /* result_on_fallthrough */) {
// Before branching, materialize all constants. This avoids repeatedly
// materializing them for each conditional branch.
if (depth != decoder->control_depth() - 1) {
@@ -3406,7 +3378,7 @@ class LiftoffCompiler {
LoadNullValue(null, pinned);
__ emit_cond_jump(kUnequal, &cont_false, ref_object.type.kind(), ref.gp(),
null);
-
+ if (pass_null_along_branch) LoadNullValue(null, pinned);
BrOrRet(decoder, depth, 0);
__ bind(&cont_false);
__ PushRegister(kRef, ref);
@@ -5474,13 +5446,12 @@ class LiftoffCompiler {
obj.type.kind(), obj_reg.gp(), tmp1.gp());
}
- // Perform a regular type check. Check for exact match first.
__ LoadMap(tmp1.gp(), obj_reg.gp());
// {tmp1} now holds the object's map.
if (decoder->module_->has_signature(rtt.type.ref_index())) {
- // Function case: currently, the only way for a function to match an rtt
- // is if its map is equal to that rtt.
+ // Function case: currently, the only way for the type check to succeed is
+ // that the function's map equals the rtt.
__ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
rtt_reg.gp());
__ bind(&match);
@@ -5499,17 +5470,19 @@ class LiftoffCompiler {
constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged(
Map::kConstructorOrBackPointerOrNativeContextOffset);
__ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kTypeInfoOffset, pinned);
- // Step 2: load the super types list into {tmp1}.
+ // Step 2: load the supertypes list into {tmp1}.
constexpr int kSuperTypesOffset =
wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset);
__ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kSuperTypesOffset,
pinned);
- // Step 3: check the list's length.
- LiftoffRegister list_length = tmp2;
- __ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
if (rtt.type.has_depth()) {
- __ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(),
- rtt.type.depth());
+ // Step 3: check the list's length if needed.
+ if (rtt.type.depth() >= kMinimumSupertypeArraySize) {
+ LiftoffRegister list_length = tmp2;
+ __ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
+ __ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(),
+ rtt.type.depth());
+ }
// Step 4: load the candidate list slot into {tmp1}, and compare it.
__ LoadTaggedPointer(
tmp1.gp(), tmp1.gp(), no_reg,
@@ -5518,6 +5491,9 @@ class LiftoffCompiler {
__ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
rtt_reg.gp());
} else {
+ // Step 3: if rtt's depth is unknown, we invoke a builtin to compute the
+ // result, as we might not have enough available registers.
+
// Preserve {obj_reg} across the call.
LiftoffRegList saved_regs = LiftoffRegList::ForRegs(obj_reg);
__ PushRegisters(saved_regs);
@@ -5664,7 +5640,8 @@ class LiftoffCompiler {
__ Load(tmp1, tmp1.gp(), no_reg,
wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset),
LoadType::kI32Load16U, pinned);
- __ emit_i32_cond_jumpi(kUnequal, no_match, tmp1.gp(), JS_FUNCTION_TYPE);
+ __ emit_i32_cond_jumpi(kUnequal, no_match, tmp1.gp(),
+ WASM_INTERNAL_FUNCTION_TYPE);
return obj_reg;
}
@@ -6072,7 +6049,8 @@ class LiftoffCompiler {
__ LoadConstant(index, WasmValue::ForUintPtr(vector_slot));
LiftoffAssembler::VarState index_var(kIntPtrKind, index, 0);
- // CallRefIC(vector: FixedArray, index: intptr, funcref: JSFunction)
+ // CallRefIC(vector: FixedArray, index: intptr,
+ // funcref: WasmInternalFunction)
CallRuntimeStub(WasmCode::kCallRefIC,
MakeSig::Returns(kPointerKind, kPointerKind)
.Params(kPointerKind, kIntPtrKind, kPointerKind),
@@ -6102,32 +6080,22 @@ class LiftoffCompiler {
LiftoffRegister target = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister temp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- // Load the WasmFunctionData.
- LiftoffRegister func_data = func_ref;
- __ LoadTaggedPointer(
- func_data.gp(), func_ref.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(JSFunction::kSharedFunctionInfoOffset),
- pinned);
- __ LoadTaggedPointer(
- func_data.gp(), func_data.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset),
- pinned);
-
// Load "ref" (instance or WasmApiFunctionRef) and target.
__ LoadTaggedPointer(
- instance.gp(), func_data.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(WasmFunctionData::kRefOffset), pinned);
+ instance.gp(), func_ref.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset),
+ pinned);
#ifdef V8_HEAP_SANDBOX
LOAD_INSTANCE_FIELD(temp.gp(), IsolateRoot, kSystemPointerSize, pinned);
- __ LoadExternalPointer(target.gp(), func_data.gp(),
- WasmFunctionData::kForeignAddressOffset,
+ __ LoadExternalPointer(target.gp(), func_ref.gp(),
+ WasmInternalFunction::kForeignAddressOffset,
kForeignForeignAddressTag, temp.gp());
#else
- __ Load(
- target, func_data.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(WasmFunctionData::kForeignAddressOffset),
- kPointerLoadType, pinned);
+ __ Load(target, func_ref.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmInternalFunction::kForeignAddressOffset),
+ kPointerLoadType, pinned);
#endif
Label perform_call;
@@ -6137,10 +6105,10 @@ class LiftoffCompiler {
__ emit_cond_jump(kUnequal, &perform_call, kRef, target.gp(),
null_address.gp());
// The cached target can only be null for WasmJSFunctions.
- __ LoadTaggedPointer(target.gp(), func_data.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmJSFunctionData::kWasmToJsWrapperCodeOffset),
- pinned);
+ __ LoadTaggedPointer(
+ target.gp(), func_ref.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(WasmInternalFunction::kCodeOffset),
+ pinned);
#ifdef V8_EXTERNAL_CODE_SPACE
__ LoadCodeDataContainerEntry(target.gp(), target.gp());
#else
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index f2f81464cf..7dec2ea677 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -447,7 +447,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break;
case kI64: {
- DCHECK(RelocInfo::IsNone(rmode));
+ DCHECK(RelocInfo::IsNoInfo(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
TurboAssembler::li(reg.low_gp(), Operand(low_word));
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index dba186c66c..1de2817563 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -410,14 +410,32 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break;
case LoadType::kF32Load:
if (is_load_mem) {
- LoadF32LE(dst.fp(), src_op, r0, ip);
+ // `ip` could be used as offset_reg.
+ Register scratch = ip;
+ if (offset_reg == ip) {
+ scratch = GetRegisterThatIsNotOneOf(src_addr);
+ push(scratch);
+ }
+ LoadF32LE(dst.fp(), src_op, r0, scratch);
+ if (offset_reg == ip) {
+ pop(scratch);
+ }
} else {
LoadF32(dst.fp(), src_op, r0);
}
break;
case LoadType::kF64Load:
if (is_load_mem) {
- LoadF64LE(dst.fp(), src_op, r0, ip);
+ // `ip` could be used as offset_reg.
+ Register scratch = ip;
+ if (offset_reg == ip) {
+ scratch = GetRegisterThatIsNotOneOf(src_addr);
+ push(scratch);
+ }
+ LoadF64LE(dst.fp(), src_op, r0, scratch);
+ if (offset_reg == ip) {
+ pop(scratch);
+ }
} else {
LoadF64(dst.fp(), src_op, r0);
}
@@ -511,76 +529,91 @@ constexpr bool is_be = true;
constexpr bool is_be = false;
#endif
-#define ATOMIC_OP(instr) \
- { \
- Register offset = r0; \
- if (offset_imm != 0) { \
- mov(ip, Operand(offset_imm)); \
- if (offset_reg != no_reg) { \
- add(ip, ip, offset_reg); \
- } \
- offset = ip; \
- } else { \
- if (offset_reg != no_reg) { \
- offset = offset_reg; \
- } \
- } \
- \
- MemOperand dst = MemOperand(offset, dst_addr); \
- \
- switch (type.value()) { \
- case StoreType::kI32Store8: \
- case StoreType::kI64Store8: { \
- auto op_func = [&](Register dst, Register lhs, Register rhs) { \
- instr(dst, lhs, rhs); \
- }; \
- AtomicOps<uint8_t>(dst, value.gp(), result.gp(), r0, op_func); \
- break; \
- } \
- case StoreType::kI32Store16: \
- case StoreType::kI64Store16: { \
- auto op_func = [&](Register dst, Register lhs, Register rhs) { \
- if (is_be) { \
- ByteReverseU16(dst, lhs); \
- instr(dst, dst, rhs); \
- ByteReverseU16(dst, dst); \
- } else { \
- instr(dst, lhs, rhs); \
- } \
- }; \
- AtomicOps<uint16_t>(dst, value.gp(), result.gp(), r0, op_func); \
- break; \
- } \
- case StoreType::kI32Store: \
- case StoreType::kI64Store32: { \
- auto op_func = [&](Register dst, Register lhs, Register rhs) { \
- if (is_be) { \
- ByteReverseU32(dst, lhs); \
- instr(dst, dst, rhs); \
- ByteReverseU32(dst, dst); \
- } else { \
- instr(dst, lhs, rhs); \
- } \
- }; \
- AtomicOps<uint32_t>(dst, value.gp(), result.gp(), r0, op_func); \
- break; \
- } \
- case StoreType::kI64Store: { \
- auto op_func = [&](Register dst, Register lhs, Register rhs) { \
- if (is_be) { \
- ByteReverseU64(dst, lhs); \
- instr(dst, dst, rhs); \
- ByteReverseU64(dst, dst); \
- } else { \
- instr(dst, lhs, rhs); \
- } \
- }; \
- AtomicOps<uint64_t>(dst, value.gp(), result.gp(), r0, op_func); \
- break; \
- } \
- default: \
- UNREACHABLE(); \
- } \
+#define ATOMIC_OP(instr) \
+ { \
+ Register offset = r0; \
+ if (offset_imm != 0) { \
+ mov(ip, Operand(offset_imm)); \
+ if (offset_reg != no_reg) { \
+ add(ip, ip, offset_reg); \
+ } \
+ offset = ip; \
+ } else { \
+ if (offset_reg != no_reg) { \
+ offset = offset_reg; \
+ } \
+ } \
+ \
+ MemOperand dst = MemOperand(offset, dst_addr); \
+ \
+ switch (type.value()) { \
+ case StoreType::kI32Store8: \
+ case StoreType::kI64Store8: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ instr(dst, lhs, rhs); \
+ }; \
+ AtomicOps<uint8_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ break; \
+ } \
+ case StoreType::kI32Store16: \
+ case StoreType::kI64Store16: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ if (is_be) { \
+ Register scratch = GetRegisterThatIsNotOneOf(lhs, rhs, dst); \
+ push(scratch); \
+ ByteReverseU16(dst, lhs, scratch); \
+ instr(dst, dst, rhs); \
+ ByteReverseU16(dst, dst, scratch); \
+ pop(scratch); \
+ } else { \
+ instr(dst, lhs, rhs); \
+ } \
+ }; \
+ AtomicOps<uint16_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ if (is_be) { \
+ ByteReverseU16(result.gp(), result.gp(), ip); \
+ } \
+ break; \
+ } \
+ case StoreType::kI32Store: \
+ case StoreType::kI64Store32: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ if (is_be) { \
+ Register scratch = GetRegisterThatIsNotOneOf(lhs, rhs, dst); \
+ push(scratch); \
+ ByteReverseU32(dst, lhs, scratch); \
+ instr(dst, dst, rhs); \
+ ByteReverseU32(dst, dst, scratch); \
+ pop(scratch); \
+ } else { \
+ instr(dst, lhs, rhs); \
+ } \
+ }; \
+ AtomicOps<uint32_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ if (is_be) { \
+ ByteReverseU32(result.gp(), result.gp(), ip); \
+ } \
+ break; \
+ } \
+ case StoreType::kI64Store: { \
+ auto op_func = [&](Register dst, Register lhs, Register rhs) { \
+ if (is_be) { \
+ ByteReverseU64(dst, lhs); \
+ instr(dst, dst, rhs); \
+ ByteReverseU64(dst, dst); \
+ } else { \
+ instr(dst, lhs, rhs); \
+ } \
+ }; \
+ AtomicOps<uint64_t>(dst, value.gp(), result.gp(), r0, op_func); \
+ if (is_be) { \
+ ByteReverseU64(result.gp(), result.gp()); \
+ } \
+ break; \
+ } \
+ default: \
+ UNREACHABLE(); \
+ } \
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
@@ -617,9 +650,6 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
-#if defined(V8_OS_AIX)
- bailout(kUnsupportedArchitecture, "atomic");
-#else
Register offset = r0;
if (offset_imm != 0) {
mov(ip, Operand(offset_imm));
@@ -642,9 +672,12 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
case StoreType::kI32Store16:
case StoreType::kI64Store16: {
if (is_be) {
- ByteReverseU16(r0, value.gp());
+ Register scratch = GetRegisterThatIsNotOneOf(value.gp(), result.gp());
+ push(scratch);
+ ByteReverseU16(r0, value.gp(), scratch);
+ pop(scratch);
TurboAssembler::AtomicExchange<uint16_t>(dst, r0, result.gp());
- ByteReverseU16(result.gp(), result.gp());
+ ByteReverseU16(result.gp(), result.gp(), ip);
} else {
TurboAssembler::AtomicExchange<uint16_t>(dst, value.gp(), result.gp());
}
@@ -653,9 +686,12 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
case StoreType::kI32Store:
case StoreType::kI64Store32: {
if (is_be) {
- ByteReverseU32(r0, value.gp());
+ Register scratch = GetRegisterThatIsNotOneOf(value.gp(), result.gp());
+ push(scratch);
+ ByteReverseU32(r0, value.gp(), scratch);
+ pop(scratch);
TurboAssembler::AtomicExchange<uint32_t>(dst, r0, result.gp());
- ByteReverseU32(result.gp(), result.gp());
+ ByteReverseU32(result.gp(), result.gp(), ip);
} else {
TurboAssembler::AtomicExchange<uint32_t>(dst, value.gp(), result.gp());
}
@@ -674,16 +710,12 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
default:
UNREACHABLE();
}
-#endif
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
-#if defined(V8_OS_AIX)
- bailout(kUnsupportedArchitecture, "atomic");
-#else
Register offset = r0;
if (offset_imm != 0) {
mov(ip, Operand(offset_imm));
@@ -707,13 +739,17 @@ void LiftoffAssembler::AtomicCompareExchange(
case StoreType::kI32Store16:
case StoreType::kI64Store16: {
if (is_be) {
- Push(r3, r4);
- ByteReverseU16(r3, new_value.gp());
- ByteReverseU16(r4, expected.gp());
- TurboAssembler::AtomicCompareExchange<uint16_t>(dst, r4, r3,
- result.gp(), r0);
- ByteReverseU16(result.gp(), result.gp());
- Pop(r3, r4);
+ Push(new_value.gp(), expected.gp());
+ Register scratch = GetRegisterThatIsNotOneOf(
+ new_value.gp(), expected.gp(), result.gp());
+ push(scratch);
+ ByteReverseU16(new_value.gp(), new_value.gp(), scratch);
+ ByteReverseU16(expected.gp(), expected.gp(), scratch);
+ pop(scratch);
+ TurboAssembler::AtomicCompareExchange<uint16_t>(
+ dst, expected.gp(), new_value.gp(), result.gp(), r0);
+ ByteReverseU16(result.gp(), result.gp(), r0);
+ Pop(new_value.gp(), expected.gp());
} else {
TurboAssembler::AtomicCompareExchange<uint16_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
@@ -723,13 +759,17 @@ void LiftoffAssembler::AtomicCompareExchange(
case StoreType::kI32Store:
case StoreType::kI64Store32: {
if (is_be) {
- Push(r3, r4);
- ByteReverseU32(r3, new_value.gp());
- ByteReverseU32(r4, expected.gp());
- TurboAssembler::AtomicCompareExchange<uint32_t>(dst, r4, r3,
- result.gp(), r0);
- ByteReverseU32(result.gp(), result.gp());
- Pop(r3, r4);
+ Push(new_value.gp(), expected.gp());
+ Register scratch = GetRegisterThatIsNotOneOf(
+ new_value.gp(), expected.gp(), result.gp());
+ push(scratch);
+ ByteReverseU32(new_value.gp(), new_value.gp(), scratch);
+ ByteReverseU32(expected.gp(), expected.gp(), scratch);
+ pop(scratch);
+ TurboAssembler::AtomicCompareExchange<uint32_t>(
+ dst, expected.gp(), new_value.gp(), result.gp(), r0);
+ ByteReverseU32(result.gp(), result.gp(), r0);
+ Pop(new_value.gp(), expected.gp());
} else {
TurboAssembler::AtomicCompareExchange<uint32_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
@@ -738,13 +778,13 @@ void LiftoffAssembler::AtomicCompareExchange(
}
case StoreType::kI64Store: {
if (is_be) {
- Push(r3, r4);
- ByteReverseU64(r3, new_value.gp());
- ByteReverseU64(r4, expected.gp());
- TurboAssembler::AtomicCompareExchange<uint64_t>(dst, r4, r3,
- result.gp(), r0);
+ Push(new_value.gp(), expected.gp());
+ ByteReverseU64(new_value.gp(), new_value.gp());
+ ByteReverseU64(expected.gp(), expected.gp());
+ TurboAssembler::AtomicCompareExchange<uint64_t>(
+ dst, expected.gp(), new_value.gp(), result.gp(), r0);
ByteReverseU64(result.gp(), result.gp());
- Pop(r3, r4);
+ Pop(new_value.gp(), expected.gp());
} else {
TurboAssembler::AtomicCompareExchange<uint64_t>(
dst, expected.gp(), new_value.gp(), result.gp(), r0);
@@ -754,7 +794,6 @@ void LiftoffAssembler::AtomicCompareExchange(
default:
UNREACHABLE();
}
-#endif
}
void LiftoffAssembler::AtomicFence() { sync(); }
diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index 0b47e8535f..e53797ff74 100644
--- a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -1691,21 +1691,150 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
LoadType type,
LoadTransformationKind transform,
uint32_t* protected_load_pc) {
- bailout(kSimd, "load extend and load splat unimplemented");
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ VRegister dst_v = dst.fp().toV();
+ *protected_load_pc = pc_offset();
+
+ MachineType memtype = type.mem_type();
+ if (transform == LoadTransformationKind::kExtend) {
+ Ld(scratch, src_op);
+ if (memtype == MachineType::Int8()) {
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128ScratchReg, scratch);
+ VU.set(kScratchReg, E16, m1);
+ vsext_vf2(dst_v, kSimd128ScratchReg);
+ } else if (memtype == MachineType::Uint8()) {
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128ScratchReg, scratch);
+ VU.set(kScratchReg, E16, m1);
+ vzext_vf2(dst_v, kSimd128ScratchReg);
+ } else if (memtype == MachineType::Int16()) {
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128ScratchReg, scratch);
+ VU.set(kScratchReg, E32, m1);
+ vsext_vf2(dst_v, kSimd128ScratchReg);
+ } else if (memtype == MachineType::Uint16()) {
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128ScratchReg, scratch);
+ VU.set(kScratchReg, E32, m1);
+ vzext_vf2(dst_v, kSimd128ScratchReg);
+ } else if (memtype == MachineType::Int32()) {
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128ScratchReg, scratch);
+ vsext_vf2(dst_v, kSimd128ScratchReg);
+ } else if (memtype == MachineType::Uint32()) {
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128ScratchReg, scratch);
+ vzext_vf2(dst_v, kSimd128ScratchReg);
+ }
+ } else if (transform == LoadTransformationKind::kZeroExtend) {
+ vxor_vv(dst_v, dst_v, dst_v);
+ if (memtype == MachineType::Int32()) {
+ VU.set(kScratchReg, E32, m1);
+ Lwu(scratch, src_op);
+ li(kScratchReg, 0x1 << 0);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst_v, scratch, dst_v);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), memtype);
+ VU.set(kScratchReg, E64, m1);
+ Ld(scratch, src_op);
+ li(kScratchReg, 0x1 << 0);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst_v, scratch, dst_v);
+ }
+ } else {
+ DCHECK_EQ(LoadTransformationKind::kSplat, transform);
+ if (memtype == MachineType::Int8()) {
+ VU.set(kScratchReg, E8, m1);
+ Lb(scratch, src_op);
+ vmv_vx(dst_v, scratch);
+ } else if (memtype == MachineType::Int16()) {
+ VU.set(kScratchReg, E16, m1);
+ Lh(scratch, src_op);
+ vmv_vx(dst_v, scratch);
+ } else if (memtype == MachineType::Int32()) {
+ VU.set(kScratchReg, E32, m1);
+ Lw(scratch, src_op);
+ vmv_vx(dst_v, scratch);
+ } else if (memtype == MachineType::Int64()) {
+ VU.set(kScratchReg, E64, m1);
+ Ld(scratch, src_op);
+ vmv_vx(dst_v, scratch);
+ }
+ }
}
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
- bailout(kSimd, "loadlane");
+ MemOperand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
+ MachineType mem_type = type.mem_type();
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ *protected_load_pc = pc_offset();
+ if (mem_type == MachineType::Int8()) {
+ Lbu(scratch, src_op);
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x1 << laneidx);
+ vmv_sx(v0, kScratchReg);
+ VU.set(kScratchReg, E8, m1);
+ vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
+ } else if (mem_type == MachineType::Int16()) {
+ Lhu(scratch, src_op);
+ VU.set(kScratchReg, E16, m1);
+ li(kScratchReg, 0x1 << laneidx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
+ } else if (mem_type == MachineType::Int32()) {
+ Lwu(scratch, src_op);
+ VU.set(kScratchReg, E32, m1);
+ li(kScratchReg, 0x1 << laneidx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
+ } else if (mem_type == MachineType::Int64()) {
+ Ld(scratch, src_op);
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x1 << laneidx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), scratch, dst.fp().toV());
+ } else {
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::StoreLane(Register dst, Register offset,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, uint8_t lane,
uint32_t* protected_store_pc) {
- bailout(kSimd, "StoreLane");
+ MemOperand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ MachineRepresentation rep = type.mem_rep();
+ if (rep == MachineRepresentation::kWord8) {
+ VU.set(kScratchReg, E8, m1);
+ vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), lane);
+ vmv_xs(kScratchReg, kSimd128ScratchReg);
+ Sb(kScratchReg, dst_op);
+ } else if (rep == MachineRepresentation::kWord16) {
+ VU.set(kScratchReg, E16, m1);
+ vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), lane);
+ vmv_xs(kScratchReg, kSimd128ScratchReg);
+ Sh(kScratchReg, dst_op);
+ } else if (rep == MachineRepresentation::kWord32) {
+ VU.set(kScratchReg, E32, m1);
+ vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), lane);
+ vmv_xs(kScratchReg, kSimd128ScratchReg);
+ Sw(kScratchReg, dst_op);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kWord64, rep);
+ VU.set(kScratchReg, E64, m1);
+ vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), lane);
+ vmv_xs(kScratchReg, kSimd128ScratchReg);
+ Sd(kScratchReg, dst_op);
+ }
}
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
@@ -1754,7 +1883,13 @@ void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_swizzle");
+ VU.set(kScratchReg, E8, m1);
+ if (dst == lhs) {
+ vrgather_vv(kSimd128ScratchReg, lhs.fp().toV(), rhs.fp().toV());
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
+ } else {
+ vrgather_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
+ }
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
@@ -1815,31 +1950,163 @@ void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
vmv_vx(dst.fp().toV(), kScratchReg);
}
-#define SIMD_BINOP(name1, name2) \
- void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \
- LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
- bailout(kSimd, "emit_" #name1 "_extmul_low_" #name2); \
- } \
- void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \
- LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
- bailout(kSimd, "emit_" #name1 "_extmul_high_" #name2); \
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ VU.set(kScratchReg, E32, mf2);
+ VRegister dst_v = dst.fp().toV();
+ if (dst == src1 || dst == src2) {
+ dst_v = kSimd128ScratchReg3;
+ }
+ vwmul_vv(dst_v, src2.fp().toV(), src1.fp().toV());
+ if (dst == src1 || dst == src2) {
+ VU.set(kScratchReg, E64, m1);
+ vmv_vv(dst.fp().toV(), dst_v);
+ }
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ VU.set(kScratchReg, E32, mf2);
+ VRegister dst_v = dst.fp().toV();
+ if (dst == src1 || dst == src2) {
+ dst_v = kSimd128ScratchReg3;
+ }
+ vwmulu_vv(dst_v, src2.fp().toV(), src1.fp().toV());
+ if (dst == src1 || dst == src2) {
+ VU.set(kScratchReg, E64, m1);
+ vmv_vv(dst.fp().toV(), dst_v);
+ }
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ VU.set(kScratchReg, E32, m1);
+ vslidedown_vi(kSimd128ScratchReg, src1.fp().toV(), 2);
+ vslidedown_vi(kSimd128ScratchReg2, src2.fp().toV(), 2);
+ VU.set(kScratchReg, E32, mf2);
+ vwmul_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
+}
+
+void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ VU.set(kScratchReg, E32, m1);
+ vslidedown_vi(kSimd128ScratchReg, src1.fp().toV(), 2);
+ vslidedown_vi(kSimd128ScratchReg2, src2.fp().toV(), 2);
+ VU.set(kScratchReg, E32, mf2);
+ vwmulu_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ VU.set(kScratchReg, E16, mf2);
+ VRegister dst_v = dst.fp().toV();
+ if (dst == src1 || dst == src2) {
+ dst_v = kSimd128ScratchReg3;
+ }
+ vwmul_vv(dst_v, src2.fp().toV(), src1.fp().toV());
+ if (dst == src1 || dst == src2) {
+ VU.set(kScratchReg, E16, m1);
+ vmv_vv(dst.fp().toV(), dst_v);
+ }
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ VU.set(kScratchReg, E16, mf2);
+ VRegister dst_v = dst.fp().toV();
+ if (dst == src1 || dst == src2) {
+ dst_v = kSimd128ScratchReg3;
+ }
+ vwmulu_vv(dst_v, src2.fp().toV(), src1.fp().toV());
+ if (dst == src1 || dst == src2) {
+ VU.set(kScratchReg, E16, m1);
+ vmv_vv(dst.fp().toV(), dst_v);
+ }
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ VU.set(kScratchReg, E16, m1);
+ vslidedown_vi(kSimd128ScratchReg, src1.fp().toV(), 4);
+ vslidedown_vi(kSimd128ScratchReg2, src2.fp().toV(), 4);
+ VU.set(kScratchReg, E16, mf2);
+ vwmul_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
+}
+
+void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ VU.set(kScratchReg, E16, m1);
+ vslidedown_vi(kSimd128ScratchReg, src1.fp().toV(), 4);
+ vslidedown_vi(kSimd128ScratchReg2, src2.fp().toV(), 4);
+ VU.set(kScratchReg, E16, mf2);
+ vwmulu_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
+}
+
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ VU.set(kScratchReg, E8, mf2);
+ VRegister dst_v = dst.fp().toV();
+ if (dst == src1 || dst == src2) {
+ dst_v = kSimd128ScratchReg3;
+ }
+ vwmul_vv(dst_v, src2.fp().toV(), src1.fp().toV());
+ if (dst == src1 || dst == src2) {
+ VU.set(kScratchReg, E8, m1);
+ vmv_vv(dst.fp().toV(), dst_v);
}
+}
-SIMD_BINOP(i16x8, i8x16_s)
-SIMD_BINOP(i16x8, i8x16_u)
+void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ VU.set(kScratchReg, E8, mf2);
+ VRegister dst_v = dst.fp().toV();
+ if (dst == src1 || dst == src2) {
+ dst_v = kSimd128ScratchReg3;
+ }
+ vwmulu_vv(dst_v, src2.fp().toV(), src1.fp().toV());
+ if (dst == src1 || dst == src2) {
+ VU.set(kScratchReg, E8, m1);
+ vmv_vv(dst.fp().toV(), dst_v);
+ }
+}
-SIMD_BINOP(i32x4, i16x8_s)
-SIMD_BINOP(i32x4, i16x8_u)
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ VU.set(kScratchReg, E8, m1);
+ vslidedown_vi(kSimd128ScratchReg, src1.fp().toV(), 8);
+ vslidedown_vi(kSimd128ScratchReg2, src2.fp().toV(), 8);
+ VU.set(kScratchReg, E8, mf2);
+ vwmul_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
+}
-SIMD_BINOP(i64x2, i32x4_s)
-SIMD_BINOP(i64x2, i32x4_u)
+void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ VU.set(kScratchReg, E8, m1);
+ vslidedown_vi(kSimd128ScratchReg, src1.fp().toV(), 8);
+ vslidedown_vi(kSimd128ScratchReg2, src2.fp().toV(), 8);
+ VU.set(kScratchReg, E8, mf2);
+ vwmulu_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
+}
#undef SIMD_BINOP
void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- bailout(kSimd, "i16x8_q15mulr_sat_s");
+ VU.set(kScratchReg, E16, m1);
+ vsmul_vv(dst.fp().toV(), src1.fp().toV(), src2.fp().toV());
}
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
@@ -1853,22 +2120,32 @@ void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_sconvert_i32x4_low");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vv(kSimd128ScratchReg, src.fp().toV());
+ vsext_vf2(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_sconvert_i32x4_high");
+ VU.set(kScratchReg, E32, m1);
+ vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), 2);
+ VU.set(kScratchReg, E64, m1);
+ vsext_vf2(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_uconvert_i32x4_low");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vv(kSimd128ScratchReg, src.fp().toV());
+ vzext_vf2(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_uconvert_i32x4_high");
+ VU.set(kScratchReg, E32, m1);
+ vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), 2);
+ VU.set(kScratchReg, E64, m1);
+ vzext_vf2(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1888,7 +2165,7 @@ void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1980,7 +2257,7 @@ void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E32, m1);
- vmflt_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmflt_vv(v0, lhs.fp().toV(), rhs.fp().toV());
vmv_vx(dst.fp().toV(), zero_reg);
vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
@@ -1988,62 +2265,110 @@ void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E32, m1);
- vmfle_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmfle_vv(v0, lhs.fp().toV(), rhs.fp().toV());
vmv_vx(dst.fp().toV(), zero_reg);
vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "f64x2.convert_low_i32x4_s");
+ VU.set(kScratchReg, E32, mf2);
+ if (dst.fp().toV() != src.fp().toV()) {
+ vfwcvt_f_x_v(dst.fp().toV(), src.fp().toV());
+ } else {
+ vfwcvt_f_x_v(kSimd128ScratchReg3, src.fp().toV());
+ VU.set(kScratchReg, E64, m1);
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg3);
+ }
}
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "f64x2.convert_low_i32x4_u");
+ VU.set(kScratchReg, E32, mf2);
+ if (dst.fp().toV() != src.fp().toV()) {
+ vfwcvt_f_xu_v(dst.fp().toV(), src.fp().toV());
+ } else {
+ vfwcvt_f_xu_v(kSimd128ScratchReg3, src.fp().toV());
+ VU.set(kScratchReg, E64, m1);
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg3);
+ }
}
void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "f64x2.promote_low_f32x4");
+ VU.set(kScratchReg, E32, mf2);
+ if (dst.fp().toV() != src.fp().toV()) {
+ vfwcvt_f_f_v(dst.fp().toV(), src.fp().toV());
+ } else {
+ vfwcvt_f_f_v(kSimd128ScratchReg3, src.fp().toV());
+ VU.set(kScratchReg, E64, m1);
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg3);
+ }
}
void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
LiftoffRegister src) {
- VU.set(kScratchReg, E32, m1);
+ VU.set(kScratchReg, E32, mf2);
vfncvt_f_f_w(dst.fp().toV(), src.fp().toV());
+ VU.set(kScratchReg, E32, m1);
vmv_vi(v0, 12);
vmerge_vx(dst.fp().toV(), zero_reg, dst.fp().toV());
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4.trunc_sat_f64x2_s_zero");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128ScratchReg, zero_reg);
+ vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
+ vmv_vv(kSimd128ScratchReg3, src.fp().toV());
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vfncvt_x_f_w(kSimd128ScratchReg, kSimd128ScratchReg3, MaskType::Mask);
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4.trunc_sat_f64x2_u_zero");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128ScratchReg, zero_reg);
+ vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
+ vmv_vv(kSimd128ScratchReg3, src.fp().toV());
+ VU.set(kScratchReg, E32, m1);
+ VU.set(RoundingMode::RTZ);
+ vfncvt_xu_f_w(kSimd128ScratchReg, kSimd128ScratchReg3, MaskType::Mask);
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_eq");
+ VU.set(kScratchReg, E64, m1);
+ vmfeq_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_ne");
+ VU.set(kScratchReg, E64, m1);
+ vmfne_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_lt");
+ VU.set(kScratchReg, E64, m1);
+ vmflt_vv(v0, lhs.fp().toV(), rhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_le");
+ VU.set(kScratchReg, E64, m1);
+ vmfle_vv(v0, lhs.fp().toV(), rhs.fp().toV());
+ vmv_vx(dst.fp().toV(), zero_reg);
+ vmerge_vi(dst.fp().toV(), -1, dst.fp().toV());
}
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
@@ -2136,6 +2461,7 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E8, m1);
+ andi(rhs.gp(), rhs.gp(), 8 - 1);
vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
@@ -2143,29 +2469,35 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
DCHECK(is_uint5(rhs));
VU.set(kScratchReg, E8, m1);
- vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 8);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_shr_s");
+ VU.set(kScratchReg, E8, m1);
+ andi(rhs.gp(), rhs.gp(), 8 - 1);
+ vsra_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "emit_i8x16_shri_s");
+ VU.set(kScratchReg, E8, m1);
+ vsra_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 8);
}
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_shr_u");
+ VU.set(kScratchReg, E8, m1);
+ andi(rhs.gp(), rhs.gp(), 8 - 1);
+ vsrl_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "emit_i8x16_shri_u");
+ VU.set(kScratchReg, E8, m1);
+ vsrl_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 8);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2211,30 +2543,35 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_min_s");
+ VU.set(kScratchReg, E8, m1);
+ vmin_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_min_u");
+ VU.set(kScratchReg, E8, m1);
+ vminu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_max_s");
+ VU.set(kScratchReg, E8, m1);
+ vmax_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_max_u");
+ VU.set(kScratchReg, E8, m1);
+ vmaxu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_neg");
+ VU.set(kScratchReg, E16, m1);
+ vneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
@@ -2262,36 +2599,43 @@ void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E16, m1);
+ andi(rhs.gp(), rhs.gp(), 16 - 1);
vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- DCHECK(is_uint5(rhs));
VU.set(kScratchReg, E16, m1);
- vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 16);
}
void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_shr_s");
+ VU.set(kScratchReg, E16, m1);
+ andi(rhs.gp(), rhs.gp(), 16 - 1);
+ vsra_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "emit_i16x8_shri_s");
+ VU.set(kScratchReg, E16, m1);
+ vsra_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 16);
}
void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_shr_u");
+ VU.set(kScratchReg, E16, m1);
+ andi(rhs.gp(), rhs.gp(), 16 - 1);
+ vsrl_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "emit_i16x8_shri_u");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E16, m1);
+ vsrl_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 16);
}
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2303,13 +2647,15 @@ void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_add_sat_s");
+ VU.set(kScratchReg, E16, m1);
+ vsadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_add_sat_u");
+ VU.set(kScratchReg, E16, m1);
+ vsaddu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2321,47 +2667,55 @@ void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_sub_sat_s");
+ VU.set(kScratchReg, E16, m1);
+ vssub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_sub_sat_u");
+ VU.set(kScratchReg, E16, m1);
+ vssubu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_mul");
+ VU.set(kScratchReg, E16, m1);
+ vmul_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_min_s");
+ VU.set(kScratchReg, E16, m1);
+ vmin_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_min_u");
+ VU.set(kScratchReg, E16, m1);
+ vminu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_max_s");
+ VU.set(kScratchReg, E16, m1);
+ vmax_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_max_u");
+ VU.set(kScratchReg, E16, m1);
+ vmaxu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_neg");
+ VU.set(kScratchReg, E32, m1);
+ vneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
@@ -2388,15 +2742,16 @@ void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E32, m1);
+ andi(rhs.gp(), rhs.gp(), 32 - 1);
vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- if (is_uint5(rhs)) {
- vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
+ if (is_uint5(rhs % 32)) {
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 32);
} else {
- li(kScratchReg, rhs);
+ li(kScratchReg, rhs % 32);
vsll_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
}
}
@@ -2404,23 +2759,39 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_shr_s");
+ VU.set(kScratchReg, E32, m1);
+ andi(rhs.gp(), rhs.gp(), 32 - 1);
+ vsra_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "emit_i32x4_shri_s");
+ VU.set(kScratchReg, E32, m1);
+ if (is_uint5(rhs % 32)) {
+ vsra_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 32);
+ } else {
+ li(kScratchReg, rhs % 32);
+ vsra_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
+ }
}
void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_shr_u");
+ VU.set(kScratchReg, E32, m1);
+ andi(rhs.gp(), rhs.gp(), 32 - 1);
+ vsrl_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "emit_i32x4_shri_u");
+ VU.set(kScratchReg, E32, m1);
+ if (is_uint5(rhs % 32)) {
+ vsrl_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 32);
+ } else {
+ li(kScratchReg, rhs % 32);
+ vsrl_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
+ }
}
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2437,42 +2808,59 @@ void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_mul");
+ VU.set(kScratchReg, E32, m1);
+ vmul_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_min_s");
+ VU.set(kScratchReg, E32, m1);
+ vmin_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_min_u");
+ VU.set(kScratchReg, E32, m1);
+ vminu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_max_s");
+ VU.set(kScratchReg, E32, m1);
+ vmax_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_max_u");
+ VU.set(kScratchReg, E32, m1);
+ vmaxu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_dot_i16x8_s");
+ VU.set(kScratchReg, E16, m1);
+ vwmul_vv(kSimd128ScratchReg3, lhs.fp().toV(), rhs.fp().toV());
+ VU.set(kScratchReg, E32, m2);
+ li(kScratchReg, 0b01010101);
+ vmv_sx(v0, kScratchReg);
+ vcompress_vv(kSimd128ScratchReg, kSimd128ScratchReg3, v0);
+
+ li(kScratchReg, 0b10101010);
+ vmv_sx(kSimd128ScratchReg2, kScratchReg);
+ vcompress_vv(v0, kSimd128ScratchReg3, kSimd128ScratchReg2);
+ VU.set(kScratchReg, E32, m1);
+ vadd_vv(dst.fp().toV(), kSimd128ScratchReg, v0);
}
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i64x2_neg");
+ VU.set(kScratchReg, E64, m1);
+ vneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
@@ -2491,16 +2879,17 @@ void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E64, m1);
+ andi(rhs.gp(), rhs.gp(), 64 - 1);
vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
VU.set(kScratchReg, E64, m1);
- if (is_uint5(rhs)) {
- vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
+ if (is_uint5(rhs % 64)) {
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 64);
} else {
- li(kScratchReg, rhs);
+ li(kScratchReg, rhs % 64);
vsll_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
}
}
@@ -2508,23 +2897,39 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_shr_s");
+ VU.set(kScratchReg, E64, m1);
+ andi(rhs.gp(), rhs.gp(), 64 - 1);
+ vsra_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "emit_i64x2_shri_s");
+ VU.set(kScratchReg, E64, m1);
+ if (is_uint5(rhs % 64)) {
+ vsra_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 64);
+ } else {
+ li(kScratchReg, rhs % 64);
+ vsra_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
+ }
}
void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_shr_u");
+ VU.set(kScratchReg, E64, m1);
+ andi(rhs.gp(), rhs.gp(), 64 - 1);
+ vsrl_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "emit_i64x2_shri_u");
+ VU.set(kScratchReg, E64, m1);
+ if (is_uint5(rhs % 64)) {
+ vsrl_vi(dst.fp().toV(), lhs.fp().toV(), rhs % 64);
+ } else {
+ li(kScratchReg, rhs % 64);
+ vsrl_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
+ }
}
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2541,7 +2946,8 @@ void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_mul");
+ VU.set(kScratchReg, E64, m1);
+ vmul_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
@@ -2558,7 +2964,8 @@ void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_sqrt");
+ VU.set(kScratchReg, E32, m1);
+ vfsqrt_v(dst.fp().toV(), src.fp().toV());
}
bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
@@ -2575,13 +2982,13 @@ bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_trunc");
+ Trunc_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f32x4_nearest_int");
+ Round_f(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
@@ -2601,13 +3008,13 @@ void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E32, m1);
VU.set(RoundingMode::RTZ);
- vfmul_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
+ vfmul_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E32, m1);
- vfdiv_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
+ vfdiv_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2638,12 +3045,18 @@ void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_pmin");
+ VU.set(kScratchReg, E32, m1);
+ // b < a ? b : a
+ vmflt_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmerge_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
}
void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f32x4_pmax");
+ VU.set(kScratchReg, E32, m1);
+ // a < b ? b : a
+ vmflt_vv(v0, lhs.fp().toV(), rhs.fp().toV());
+ vmerge_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
}
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
@@ -2660,7 +3073,8 @@ void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_sqrt");
+ VU.set(kScratchReg, E64, m1);
+ vfsqrt_v(dst.fp().toV(), src.fp().toV());
}
bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
@@ -2677,13 +3091,13 @@ bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_trunc");
+ Trunc_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_f64x2_nearest_int");
+ Round_d(dst.fp().toV(), src.fp().toV(), kScratchReg, kSimd128ScratchReg);
return true;
}
@@ -2701,32 +3115,56 @@ void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_mul");
+ VU.set(kScratchReg, E64, m1);
+ vfmul_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_div");
+ VU.set(kScratchReg, E64, m1);
+ vfdiv_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_min");
+ VU.set(kScratchReg, E64, m1);
+ const int64_t kNaN = 0x7ff8000000000000L;
+ vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
+ vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
+ vand_vv(v0, v0, kSimd128ScratchReg);
+ li(kScratchReg, kNaN);
+ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ vfmin_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_max");
+ VU.set(kScratchReg, E64, m1);
+ const int64_t kNaN = 0x7ff8000000000000L;
+ vmfeq_vv(v0, lhs.fp().toV(), lhs.fp().toV());
+ vmfeq_vv(kSimd128ScratchReg, rhs.fp().toV(), rhs.fp().toV());
+ vand_vv(v0, v0, kSimd128ScratchReg);
+ li(kScratchReg, kNaN);
+ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ vfmax_vv(kSimd128ScratchReg, rhs.fp().toV(), lhs.fp().toV(), Mask);
+ vmv_vv(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_pmin");
+ VU.set(kScratchReg, E64, m1);
+ // b < a ? b : a
+ vmflt_vv(v0, rhs.fp().toV(), lhs.fp().toV());
+ vmerge_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
}
void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_f64x2_pmax");
+ VU.set(kScratchReg, E64, m1);
+ // a < b ? b : a
+ vmflt_vv(v0, lhs.fp().toV(), rhs.fp().toV());
+ vmerge_vv(dst.fp().toV(), rhs.fp().toV(), lhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
@@ -2764,138 +3202,219 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sconvert_i16x8");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vv(v26, lhs.fp().toV());
+ vmv_vv(v27, lhs.fp().toV());
+ VU.set(kScratchReg, E8, m1);
+ VU.set(RoundingMode::RNE);
+ vnclip_vi(dst.fp().toV(), v26, 0);
}
void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_uconvert_i16x8");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vv(v26, lhs.fp().toV());
+ vmv_vv(v27, lhs.fp().toV());
+ VU.set(kScratchReg, E16, m2);
+ vmax_vx(v26, v26, zero_reg);
+ VU.set(kScratchReg, E8, m1);
+ VU.set(RoundingMode::RNE);
+ vnclipu_vi(dst.fp().toV(), v26, 0);
}
void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- VRegister dst_v = dst.fp().toV();
- VRegister lhs_v = lhs.fp().toV();
- VRegister rhs_v = rhs.fp().toV();
- VU.set(kScratchReg, E32, m2);
- VRegister tmp_lo =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(lhs, rhs)).fp().toV();
- VRegister tmp_hi = VRegister::from_code(tmp_lo.code() + 1);
VU.set(kScratchReg, E32, m1);
- vmv_vv(tmp_lo, rhs_v);
- vmv_vv(tmp_hi, lhs_v);
+ vmv_vv(v26, lhs.fp().toV());
+ vmv_vv(v27, lhs.fp().toV());
VU.set(kScratchReg, E16, m1);
VU.set(RoundingMode::RNE);
- vnclip_vi(dst_v, tmp_lo, 0);
+ vnclip_vi(dst.fp().toV(), v26, 0);
}
void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- VRegister dst_v = dst.fp().toV();
- VRegister lhs_v = lhs.fp().toV();
- VRegister rhs_v = rhs.fp().toV();
- VU.set(kScratchReg, E32, m2);
- VRegister tmp_lo =
- GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(lhs, rhs)).fp().toV();
- VRegister tmp_hi = VRegister::from_code(tmp_lo.code() + 1);
VU.set(kScratchReg, E32, m1);
- vmv_vv(tmp_lo, rhs_v);
- vmv_vv(tmp_hi, lhs_v);
+ vmv_vv(v26, lhs.fp().toV());
+ vmv_vv(v27, lhs.fp().toV());
VU.set(kScratchReg, E32, m2);
- vmax_vx(tmp_lo, tmp_lo, zero_reg);
+ vmax_vx(v26, v26, zero_reg);
VU.set(kScratchReg, E16, m1);
VU.set(RoundingMode::RNE);
- vnclipu_vi(dst_v, tmp_lo, 0);
+ vnclipu_vi(dst.fp().toV(), v26, 0);
}
void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_sconvert_i8x16_low");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vv(kSimd128ScratchReg, src.fp().toV());
+ vsext_vf2(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_sconvert_i8x16_high");
+ VU.set(kScratchReg, E8, m1);
+ vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), 8);
+ VU.set(kScratchReg, E16, m1);
+ vsext_vf2(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_uconvert_i8x16_low");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vv(kSimd128ScratchReg, src.fp().toV());
+ vzext_vf2(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_uconvert_i8x16_high");
+ VU.set(kScratchReg, E8, m1);
+ vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), 8);
+ VU.set(kScratchReg, E16, m1);
+ vzext_vf2(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_sconvert_i16x8_low");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vv(kSimd128ScratchReg, src.fp().toV());
+ vsext_vf2(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_sconvert_i16x8_high");
+ VU.set(kScratchReg, E16, m1);
+ vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), 4);
+ VU.set(kScratchReg, E32, m1);
+ vsext_vf2(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_uconvert_i16x8_low");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vv(kSimd128ScratchReg, src.fp().toV());
+ vzext_vf2(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_uconvert_i16x8_high");
+ VU.set(kScratchReg, E16, m1);
+ vslidedown_vi(kSimd128ScratchReg, src.fp().toV(), 4);
+ VU.set(kScratchReg, E32, m1);
+ vzext_vf2(dst.fp().toV(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_rounding_average_u");
+ VU.set(kScratchReg, E8, m1);
+ vwaddu_vv(kSimd128ScratchReg, lhs.fp().toV(), rhs.fp().toV());
+ li(kScratchReg, 1);
+ vwaddu_wx(kSimd128ScratchReg3, kSimd128ScratchReg, kScratchReg);
+ li(kScratchReg, 2);
+ VU.set(kScratchReg2, E16, m2);
+ vdivu_vx(kSimd128ScratchReg3, kSimd128ScratchReg3, kScratchReg);
+ VU.set(kScratchReg2, E8, m1);
+ vnclipu_vi(dst.fp().toV(), kSimd128ScratchReg3, 0);
}
-
void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_rounding_average_u");
+ VU.set(kScratchReg2, E16, m1);
+ vwaddu_vv(kSimd128ScratchReg, lhs.fp().toV(), rhs.fp().toV());
+ li(kScratchReg, 1);
+ vwaddu_wx(kSimd128ScratchReg3, kSimd128ScratchReg, kScratchReg);
+ li(kScratchReg, 2);
+ VU.set(kScratchReg2, E32, m2);
+ vdivu_vx(kSimd128ScratchReg3, kSimd128ScratchReg3, kScratchReg);
+ VU.set(kScratchReg2, E16, m1);
+ vnclipu_vi(dst.fp().toV(), kSimd128ScratchReg3, 0);
}
void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_abs");
+ VU.set(kScratchReg, E8, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
+ vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_abs");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
+ vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i64x2_abs");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
+ vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x0006000400020000);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ li(kScratchReg, 0x0007000500030001);
+ vmv_sx(kSimd128ScratchReg3, kScratchReg);
+ VU.set(kScratchReg, E16, m1);
+ vrgather_vv(kSimd128ScratchReg2, src.fp().toV(), kSimd128ScratchReg);
+ vrgather_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg3);
+ VU.set(kScratchReg, E16, mf2);
+ vwadd_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
}
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x0006000400020000);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ li(kScratchReg, 0x0007000500030001);
+ vmv_sx(kSimd128ScratchReg3, kScratchReg);
+ VU.set(kScratchReg, E16, m1);
+ vrgather_vv(kSimd128ScratchReg2, src.fp().toV(), kSimd128ScratchReg);
+ vrgather_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg3);
+ VU.set(kScratchReg, E16, mf2);
+ vwaddu_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
}
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x0E0C0A0806040200);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ li(kScratchReg, 0x0F0D0B0907050301);
+ vmv_sx(kSimd128ScratchReg3, kScratchReg);
+ VU.set(kScratchReg, E8, m1);
+ vrgather_vv(kSimd128ScratchReg2, src.fp().toV(), kSimd128ScratchReg);
+ vrgather_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg3);
+ VU.set(kScratchReg, E8, mf2);
+ vwadd_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
}
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x0E0C0A0806040200);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ li(kScratchReg, 0x0F0D0B0907050301);
+ vmv_sx(kSimd128ScratchReg3, kScratchReg);
+ VU.set(kScratchReg, E8, m1);
+ vrgather_vv(kSimd128ScratchReg2, src.fp().toV(), kSimd128ScratchReg);
+ vrgather_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg3);
+ VU.set(kScratchReg, E8, mf2);
+ vwaddu_vv(dst.fp().toV(), kSimd128ScratchReg, kSimd128ScratchReg2);
}
void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
@@ -2904,66 +3423,85 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
vmv_vx(kSimd128RegZero, zero_reg);
vmv_vv(dst.fp().toV(), src.fp().toV());
vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
- vsub_vv(dst.fp().toV(), kSimd128RegZero, src.fp().toV(), Mask);
+ vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i8x16_extract_lane_s");
+ VU.set(kScratchReg, E8, m1);
+ vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i8x16_extract_lane_u");
+ VU.set(kScratchReg, E8, m1);
+ vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ slli(dst.gp(), dst.gp(), 64 - 8);
+ srli(dst.gp(), dst.gp(), 64 - 8);
}
void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i16x8_extract_lane_s");
+ VU.set(kScratchReg, E16, m1);
+ vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i16x8_extract_lane_u");
+ VU.set(kScratchReg, E16, m1);
+ vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ slli(dst.gp(), dst.gp(), 64 - 16);
+ srli(dst.gp(), dst.gp(), 64 - 16);
}
void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
VU.set(kScratchReg, E32, m1);
- vslidedown_vi(v31, lhs.fp().toV(), imm_lane_idx);
- vmv_xs(dst.gp(), v31);
+ vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i64x2_extract_lane");
+ VU.set(kScratchReg, E64, m1);
+ vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_f32x4_extract_lane");
+ VU.set(kScratchReg, E32, m1);
+ vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
+ vfmv_fs(dst.fp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_f64x2_extract_lane");
+ VU.set(kScratchReg, E64, m1);
+ vslidedown_vi(kSimd128ScratchReg, lhs.fp().toV(), imm_lane_idx);
+ vfmv_fs(dst.fp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- VU.set(kScratchReg, E8, m1);
+ VU.set(kScratchReg, E64, m1);
li(kScratchReg, 0x1 << imm_lane_idx);
vmv_sx(v0, kScratchReg);
+ VU.set(kScratchReg, E8, m1);
vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
@@ -3001,21 +3539,39 @@ void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_f32x4_replace_lane");
+ VU.set(kScratchReg, E32, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ fmv_x_w(kScratchReg, src2.fp());
+ vmerge_vx(dst.fp().toV(), kScratchReg, src1.fp().toV());
}
void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_f64x2_replace_lane");
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ fmv_x_d(kScratchReg, src2.fp());
+ vmerge_vx(dst.fp().toV(), kScratchReg, src1.fp().toV());
}
void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- bailout(kSimd, "emit_s128_set_if_nan");
+ DoubleRegister tmp_fp = tmp_s128.fp();
+ vfredmax_vs(kSimd128ScratchReg, src.fp().toV(), src.fp().toV());
+ vfmv_fs(tmp_fp, kSimd128ScratchReg);
+ if (lane_kind == kF32) {
+ feq_s(kScratchReg, tmp_fp, tmp_fp); // scratch <- !IsNan(tmp_fp)
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ feq_d(kScratchReg, tmp_fp, tmp_fp); // scratch <- !IsNan(tmp_fp)
+ }
+ not_(kScratchReg, kScratchReg);
+ Sw(kScratchReg, MemOperand(dst));
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 4282812df9..abd3462050 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -614,6 +614,10 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
b(Condition(4), &doadd);
LoadU16(result.gp(), result.gp());
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+ ShiftRightU32(result.gp(), result.gp(), Operand(16));
+#endif
break;
}
case StoreType::kI32Store:
@@ -631,6 +635,9 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
CmpAndSwap(tmp1, tmp2, MemOperand(ip));
b(Condition(4), &doadd);
LoadU32(result.gp(), tmp1);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+#endif
break;
}
case StoreType::kI64Store: {
@@ -647,6 +654,9 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
b(Condition(4), &doadd);
mov(result.gp(), tmp1);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(result.gp(), result.gp());
+#endif
break;
}
default:
@@ -706,6 +716,10 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
b(Condition(4), &do_again);
LoadU16(result.gp(), result.gp());
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+ ShiftRightU32(result.gp(), result.gp(), Operand(16));
+#endif
break;
}
case StoreType::kI32Store:
@@ -723,6 +737,9 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
CmpAndSwap(tmp1, tmp2, MemOperand(ip));
b(Condition(4), &do_again);
LoadU32(result.gp(), tmp1);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+#endif
break;
}
case StoreType::kI64Store: {
@@ -739,6 +756,9 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
b(Condition(4), &do_again);
mov(result.gp(), tmp1);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(result.gp(), result.gp());
+#endif
break;
}
default:
@@ -798,6 +818,10 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
b(Condition(4), &do_again);
LoadU16(result.gp(), result.gp());
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+ ShiftRightU32(result.gp(), result.gp(), Operand(16));
+#endif
break;
}
case StoreType::kI32Store:
@@ -815,6 +839,9 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
CmpAndSwap(tmp1, tmp2, MemOperand(ip));
b(Condition(4), &do_again);
LoadU32(result.gp(), tmp1);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+#endif
break;
}
case StoreType::kI64Store: {
@@ -831,6 +858,9 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
b(Condition(4), &do_again);
mov(result.gp(), tmp1);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(result.gp(), result.gp());
+#endif
break;
}
default:
@@ -890,6 +920,10 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
b(Condition(4), &do_again);
LoadU16(result.gp(), result.gp());
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+ ShiftRightU32(result.gp(), result.gp(), Operand(16));
+#endif
break;
}
case StoreType::kI32Store:
@@ -907,6 +941,9 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
CmpAndSwap(tmp1, tmp2, MemOperand(ip));
b(Condition(4), &do_again);
LoadU32(result.gp(), tmp1);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+#endif
break;
}
case StoreType::kI64Store: {
@@ -923,6 +960,9 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
b(Condition(4), &do_again);
mov(result.gp(), tmp1);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(result.gp(), result.gp());
+#endif
break;
}
default:
@@ -982,6 +1022,10 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
b(Condition(4), &do_again);
LoadU16(result.gp(), result.gp());
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+ ShiftRightU32(result.gp(), result.gp(), Operand(16));
+#endif
break;
}
case StoreType::kI32Store:
@@ -999,6 +1043,9 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
CmpAndSwap(tmp1, tmp2, MemOperand(ip));
b(Condition(4), &do_again);
LoadU32(result.gp(), tmp1);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+#endif
break;
}
case StoreType::kI64Store: {
@@ -1015,6 +1062,9 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
b(Condition(4), &do_again);
mov(result.gp(), tmp1);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(result.gp(), result.gp());
+#endif
break;
}
default:
@@ -1135,6 +1185,10 @@ void LiftoffAssembler::AtomicCompareExchange(
#endif
AtomicCmpExchangeU16(ip, result.gp(), r2, r3, r0, r1);
LoadU16(result.gp(), result.gp());
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+ ShiftRightU32(result.gp(), result.gp(), Operand(16));
+#endif
Pop(r2, r3);
break;
}
@@ -1150,6 +1204,9 @@ void LiftoffAssembler::AtomicCompareExchange(
#endif
CmpAndSwap(r2, r3, MemOperand(ip));
LoadU32(result.gp(), r2);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(result.gp(), result.gp());
+#endif
Pop(r2, r3);
break;
}
@@ -1164,6 +1221,9 @@ void LiftoffAssembler::AtomicCompareExchange(
#endif
CmpAndSwap64(r2, r3, MemOperand(ip));
mov(result.gp(), r2);
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(result.gp(), result.gp());
+#endif
Pop(r2, r3);
break;
}
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 1f7ff53d80..895ba42c86 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -305,14 +305,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
case kI32:
- if (value.to_i32() == 0 && RelocInfo::IsNone(rmode)) {
+ if (value.to_i32() == 0 && RelocInfo::IsNoInfo(rmode)) {
xorl(reg.gp(), reg.gp());
} else {
movl(reg.gp(), Immediate(value.to_i32(), rmode));
}
break;
case kI64:
- if (RelocInfo::IsNone(rmode)) {
+ if (RelocInfo::IsNoInfo(rmode)) {
TurboAssembler::Move(reg.gp(), value.to_i64());
} else {
movq(reg.gp(), Immediate64(value.to_i64(), rmode));
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index a082ec76d2..f501d7e4a9 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -370,7 +370,7 @@ struct EngineImpl {
delete counter_map_;
#endif
v8::V8::Dispose();
- v8::V8::ShutdownPlatform();
+ v8::V8::DisposePlatform();
}
};
@@ -1443,7 +1443,7 @@ auto make_func(Store* store_abs, FuncData* data) -> own<Func> {
isolate, reinterpret_cast<i::Address>(&FuncData::v8_callback),
embedder_data, SignatureHelper::Serialize(isolate, data->type.get()));
i::WasmApiFunctionRef::cast(
- function->shared().wasm_capi_function_data().ref())
+ function->shared().wasm_capi_function_data().internal().ref())
.set_callable(*function);
auto func = implement<Func>::type::make(store, function);
return func;
@@ -1670,7 +1670,7 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
// TODO(v8:11880): avoid roundtrips between cdc and code.
i::Handle<i::CodeT> wrapper_code = i::Handle<i::CodeT>(
i::CodeT::cast(function_data->c_wrapper_code()), isolate);
- i::Address call_target = function_data->foreign_address();
+ i::Address call_target = function_data->internal().foreign_address();
i::wasm::CWasmArgumentsPacker packer(function_data->packed_args_size());
PushArgs(sig, args, &packer, store);
@@ -1996,6 +1996,10 @@ auto Table::get(size_t index) const -> own<Ref> {
i::WasmTableObject::Get(isolate, table, static_cast<uint32_t>(index));
// TODO(jkummerow): If we support both JavaScript and the C-API at the same
// time, we need to handle Smis and other JS primitives here.
+ if (result->IsWasmInternalFunction()) {
+ result = handle(
+ i::Handle<i::WasmInternalFunction>::cast(result)->external(), isolate);
+ }
DCHECK(result->IsNull(isolate) || result->IsJSReceiver());
return V8RefValueToWasm(impl(this)->store(), result);
}
@@ -2006,6 +2010,11 @@ auto Table::set(size_t index, const Ref* ref) -> bool {
i::Isolate* isolate = table->GetIsolate();
i::HandleScope handle_scope(isolate);
i::Handle<i::Object> obj = WasmRefToV8(isolate, ref);
+ // TODO(7748): Generalize the condition if other table types are allowed.
+ if ((table->type() == i::wasm::kWasmFuncRef || table->type().has_index()) &&
+ !obj->IsNull()) {
+ obj = i::WasmInternalFunction::FromExternal(obj, isolate).ToHandleChecked();
+ }
i::WasmTableObject::Set(isolate, table, static_cast<uint32_t>(index), obj);
return true;
}
@@ -2019,9 +2028,14 @@ auto Table::grow(size_t delta, const Ref* ref) -> bool {
i::Handle<i::WasmTableObject> table = impl(this)->v8_object();
i::Isolate* isolate = table->GetIsolate();
i::HandleScope scope(isolate);
- i::Handle<i::Object> init_value = WasmRefToV8(isolate, ref);
- int result = i::WasmTableObject::Grow(
- isolate, table, static_cast<uint32_t>(delta), init_value);
+ i::Handle<i::Object> obj = WasmRefToV8(isolate, ref);
+ // TODO(7748): Generalize the condition if other table types are allowed.
+ if ((table->type() == i::wasm::kWasmFuncRef || table->type().has_index()) &&
+ !obj->IsNull()) {
+ obj = i::WasmInternalFunction::FromExternal(obj, isolate).ToHandleChecked();
+ }
+ int result = i::WasmTableObject::Grow(isolate, table,
+ static_cast<uint32_t>(delta), obj);
return result >= 0;
}
diff --git a/deps/v8/src/wasm/code-space-access.cc b/deps/v8/src/wasm/code-space-access.cc
index 83cb5ddea1..b27a7dbce6 100644
--- a/deps/v8/src/wasm/code-space-access.cc
+++ b/deps/v8/src/wasm/code-space-access.cc
@@ -62,8 +62,7 @@ void CodeSpaceWriteScope::SetExecutable() const {
void CodeSpaceWriteScope::SetWritable() const {
DCHECK_NOT_NULL(native_module_);
auto* code_manager = GetWasmCodeManager();
- if (code_manager->HasMemoryProtectionKeySupport()) {
- DCHECK(FLAG_wasm_memory_protection_keys);
+ if (code_manager->MemoryProtectionKeysEnabled()) {
code_manager->SetThreadWritable(true);
} else if (FLAG_wasm_write_protect_code_memory) {
native_module_->AddWriter();
@@ -72,7 +71,7 @@ void CodeSpaceWriteScope::SetWritable() const {
void CodeSpaceWriteScope::SetExecutable() const {
auto* code_manager = GetWasmCodeManager();
- if (code_manager->HasMemoryProtectionKeySupport()) {
+ if (code_manager->MemoryProtectionKeysEnabled()) {
DCHECK(FLAG_wasm_memory_protection_keys);
code_manager->SetThreadWritable(false);
} else if (FLAG_wasm_write_protect_code_memory) {
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index 574fe25cca..f64ab1cdf7 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -117,12 +117,27 @@ enum class CompilationEvent : uint8_t {
kFinishedRecompilation
};
+class V8_EXPORT_PRIVATE CompilationEventCallback {
+ public:
+ virtual ~CompilationEventCallback() = default;
+
+ virtual void call(CompilationEvent event) = 0;
+
+ enum class ReleaseAfterFinalEvent { kRelease, kKeep };
+
+ // Tells the module compiler whether to keep or to release a callback when the
+ // compilation state finishes all compilation units. Most callbacks should be
+ // released, that's why there is a default implementation, but the callback
+ // for code caching with dynamic tiering has to stay alive.
+ virtual ReleaseAfterFinalEvent release_after_final_event() {
+ return ReleaseAfterFinalEvent::kRelease;
+ }
+};
+
// The implementation of {CompilationState} lives in module-compiler.cc.
// This is the PIMPL interface to that private class.
class V8_EXPORT_PRIVATE CompilationState {
public:
- using callback_t = std::function<void(CompilationEvent)>;
-
~CompilationState();
void InitCompileJob();
@@ -137,7 +152,7 @@ class V8_EXPORT_PRIVATE CompilationState {
std::shared_ptr<WireBytesStorage> GetWireBytesStorage() const;
- void AddCallback(callback_t);
+ void AddCallback(std::unique_ptr<CompilationEventCallback> callback);
void InitializeAfterDeserialization(
base::Vector<const int> missing_functions);
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 3d5ec7f933..0d3517c554 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -1018,7 +1018,8 @@ struct ControlBase : public PcForErrors<validate> {
const Value args[]) \
F(ReturnCallIndirect, const Value& index, \
const CallIndirectImmediate<validate>& imm, const Value args[]) \
- F(BrOnNull, const Value& ref_object, uint32_t depth) \
+ F(BrOnNull, const Value& ref_object, uint32_t depth, \
+ bool pass_null_along_branch, Value* result_on_fallthrough) \
F(BrOnNonNull, const Value& ref_object, uint32_t depth) \
F(SimdOp, WasmOpcode opcode, base::Vector<Value> args, Value* result) \
F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
@@ -2729,8 +2730,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// The result of br_on_null has the same value as the argument (but a
// non-nullable type).
if (V8_LIKELY(current_code_reachable_and_ok_)) {
- CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
- CALL_INTERFACE(Forward, ref_object, &result);
+ CALL_INTERFACE(BrOnNull, ref_object, imm.depth, false, &result);
c->br_merge()->reached = true;
}
// In unreachable code, we still have to push a value of the correct
@@ -4017,9 +4017,21 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
}
- bool ObjectRelatedWithRtt(Value obj, Value rtt) {
- return IsSubtypeOf(ValueType::Ref(rtt.type.ref_index(), kNonNullable),
- obj.type, this->module_) ||
+ // Checks if types are unrelated, thus type checking will always fail. Does
+ // not account for nullability.
+ bool TypeCheckAlwaysFails(Value obj, Value rtt) {
+ return !IsSubtypeOf(ValueType::Ref(rtt.type.ref_index(), kNonNullable),
+ obj.type, this->module_) &&
+ !IsSubtypeOf(obj.type,
+ ValueType::Ref(rtt.type.ref_index(), kNullable),
+ this->module_);
+ }
+
+ // Checks it {obj} is a nominal type which is a subtype of {rtt}'s index, thus
+ // checking will always succeed. Does not account for nullability.
+ bool TypeCheckAlwaysSucceeds(Value obj, Value rtt) {
+ return obj.type.has_index() &&
+ this->module_->has_supertype(obj.type.ref_index()) &&
IsSubtypeOf(obj.type,
ValueType::Ref(rtt.type.ref_index(), kNullable),
this->module_);
@@ -4503,13 +4515,24 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
if (current_code_reachable_and_ok_) {
// This logic ensures that code generation can assume that functions
// can only be cast to function types, and data objects to data types.
- if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
- CALL_INTERFACE(RefTest, obj, rtt, &value);
- } else {
+ if (V8_UNLIKELY(TypeCheckAlwaysSucceeds(obj, rtt))) {
+ // Drop rtt.
+ CALL_INTERFACE(Drop);
+ // Type checking can still fail for null.
+ if (obj.type.is_nullable()) {
+ // We abuse ref.as_non_null, which isn't otherwise used as a unary
+ // operator, as a sentinel for the negation of ref.is_null.
+ CALL_INTERFACE(UnOp, kExprRefAsNonNull, obj, &value);
+ } else {
+ CALL_INTERFACE(Drop);
+ CALL_INTERFACE(I32Const, &value, 1);
+ }
+ } else if (V8_UNLIKELY(TypeCheckAlwaysFails(obj, rtt))) {
CALL_INTERFACE(Drop);
CALL_INTERFACE(Drop);
- // Unrelated types. Will always fail.
CALL_INTERFACE(I32Const, &value, 0);
+ } else {
+ CALL_INTERFACE(RefTest, obj, rtt, &value);
}
}
Drop(2);
@@ -4556,9 +4579,12 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
if (current_code_reachable_and_ok_) {
// This logic ensures that code generation can assume that functions
// can only be cast to function types, and data objects to data types.
- if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
- CALL_INTERFACE(RefCast, obj, rtt, &value);
- } else {
+ if (V8_UNLIKELY(TypeCheckAlwaysSucceeds(obj, rtt))) {
+ // Drop the rtt from the stack, then forward the object value to the
+ // result.
+ CALL_INTERFACE(Drop);
+ CALL_INTERFACE(Forward, obj, &value);
+ } else if (V8_UNLIKELY(TypeCheckAlwaysFails(obj, rtt))) {
// Unrelated types. The only way this will not trap is if the object
// is null.
if (obj.type.is_nullable()) {
@@ -4569,6 +4595,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
CALL_INTERFACE(Trap, TrapReason::kTrapIllegalCast);
EndControl();
}
+ } else {
+ CALL_INTERFACE(RefCast, obj, rtt, &value);
}
}
Drop(2);
@@ -4628,20 +4656,30 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
: ValueType::Ref(rtt.type.ref_index(), kNonNullable));
Push(result_on_branch);
if (!VALIDATE(TypeCheckBranch<true>(c, 0))) return 0;
- // This logic ensures that code generation can assume that functions
- // can only be cast to function types, and data objects to data types.
- if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
- // The {value_on_branch} parameter we pass to the interface must
- // be pointer-identical to the object on the stack, so we can't
- // reuse {result_on_branch} which was passed-by-value to {Push}.
- Value* value_on_branch = stack_value(1);
- if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ // This logic ensures that code generation can assume that functions
+ // can only be cast to function types, and data objects to data types.
+ if (V8_UNLIKELY(TypeCheckAlwaysSucceeds(obj, rtt))) {
+ CALL_INTERFACE(Drop); // rtt
+ // The branch will still not be taken on null.
+ if (obj.type.is_nullable()) {
+ CALL_INTERFACE(BrOnNonNull, obj, branch_depth.depth);
+ } else {
+ CALL_INTERFACE(BrOrRet, branch_depth.depth, 0);
+ }
+ c->br_merge()->reached = true;
+ } else if (V8_LIKELY(!TypeCheckAlwaysFails(obj, rtt))) {
+ // The {value_on_branch} parameter we pass to the interface must
+ // be pointer-identical to the object on the stack, so we can't
+ // reuse {result_on_branch} which was passed-by-value to {Push}.
+ Value* value_on_branch = stack_value(1);
CALL_INTERFACE(BrOnCast, obj, rtt, value_on_branch,
branch_depth.depth);
c->br_merge()->reached = true;
}
+ // Otherwise the types are unrelated. Do not branch.
}
- // Otherwise the types are unrelated. Do not branch.
+
Drop(result_on_branch);
Push(obj); // Restore stack state on fallthrough.
return opcode_length + branch_depth.length;
@@ -4699,13 +4737,10 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
rtt.type.is_bottom()
? kWasmBottom
: ValueType::Ref(rtt.type.ref_index(), kNonNullable));
- // This logic ensures that code generation can assume that functions
- // can only be cast to function types, and data objects to data types.
if (V8_LIKELY(current_code_reachable_and_ok_)) {
- if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
- CALL_INTERFACE(BrOnCastFail, obj, rtt, &result_on_fallthrough,
- branch_depth.depth);
- } else {
+ // This logic ensures that code generation can assume that functions
+ // can only be cast to function types, and data objects to data types.
+ if (V8_UNLIKELY(TypeCheckAlwaysFails(obj, rtt))) {
// Drop {rtt} in the interface.
CALL_INTERFACE(Drop);
// Otherwise the types are unrelated. Always branch.
@@ -4713,8 +4748,25 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
// We know that the following code is not reachable, but according
// to the spec it technically is. Set it to spec-only reachable.
SetSucceedingCodeDynamicallyUnreachable();
+ c->br_merge()->reached = true;
+ } else if (V8_UNLIKELY(TypeCheckAlwaysSucceeds(obj, rtt))) {
+ // Drop {rtt} in the interface.
+ CALL_INTERFACE(Drop);
+ // The branch can still be taken on null.
+ if (obj.type.is_nullable()) {
+ CALL_INTERFACE(BrOnNull, obj, branch_depth.depth, true,
+ &result_on_fallthrough);
+ c->br_merge()->reached = true;
+ } else {
+ // Drop {obj} in the interface.
+ CALL_INTERFACE(Drop);
+ }
+ } else {
+ CALL_INTERFACE(BrOnCastFail, obj, rtt, &result_on_fallthrough,
+ branch_depth.depth);
+ c->br_merge()->reached = true;
}
- c->br_merge()->reached = true;
+ // Otherwise, the type check always succeeds. Do not branch.
}
// Make sure the correct value is on the stack state on fallthrough.
Drop(obj);
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index e1b55535cd..53bfaccf74 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -695,7 +695,7 @@ class WasmGraphBuildingInterface {
TFNode* success_control;
TFNode* failure_control;
- builder_->CompareToExternalFunctionAtIndex(
+ builder_->CompareToInternalFunctionAtIndex(
func_ref.node, expected_function_index, &success_control,
&failure_control);
TFNode* initial_effect = effect();
@@ -766,7 +766,7 @@ class WasmGraphBuildingInterface {
TFNode* success_control;
TFNode* failure_control;
- builder_->CompareToExternalFunctionAtIndex(
+ builder_->CompareToInternalFunctionAtIndex(
func_ref.node, expected_function_index, &success_control,
&failure_control);
TFNode* initial_effect = effect();
@@ -784,7 +784,8 @@ class WasmGraphBuildingInterface {
args);
}
- void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
+ void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth,
+ bool pass_null_along_branch, Value* result_on_fallthrough) {
SsaEnv* false_env = ssa_env_;
SsaEnv* true_env = Split(decoder->zone(), false_env);
false_env->SetNotMerged();
@@ -792,8 +793,9 @@ class WasmGraphBuildingInterface {
&false_env->control);
builder_->SetControl(false_env->control);
SetEnv(true_env);
- BrOrRet(decoder, depth, 1);
+ BrOrRet(decoder, depth, pass_null_along_branch ? 0 : 1);
SetEnv(false_env);
+ result_on_fallthrough->node = ref_object.node;
}
void BrOnNonNull(FullDecoder* decoder, const Value& ref_object,
@@ -1634,6 +1636,18 @@ class WasmGraphBuildingInterface {
const Value args[], Value returns[]) {
size_t param_count = sig->parameter_count();
size_t return_count = sig->return_count();
+
+ // Construct a function signature based on the real function parameters.
+ FunctionSig::Builder real_sig_builder(builder_->graph_zone(), return_count,
+ param_count);
+ for (size_t i = 0; i < param_count; i++) {
+ real_sig_builder.AddParam(args[i].type);
+ }
+ for (size_t i = 0; i < return_count; i++) {
+ real_sig_builder.AddReturn(sig->GetReturn(i));
+ }
+ FunctionSig* real_sig = real_sig_builder.Build();
+
NodeVector arg_nodes(param_count + 1);
base::SmallVector<TFNode*, 1> return_nodes(return_count);
arg_nodes[0] = (call_info.call_mode() == CallInfo::kCallDirect)
@@ -1648,19 +1662,20 @@ class WasmGraphBuildingInterface {
CheckForException(
decoder, builder_->CallIndirect(
call_info.table_index(), call_info.sig_index(),
- base::VectorOf(arg_nodes),
+ real_sig, base::VectorOf(arg_nodes),
base::VectorOf(return_nodes), decoder->position()));
break;
case CallInfo::kCallDirect:
CheckForException(
- decoder, builder_->CallDirect(
- call_info.callee_index(), base::VectorOf(arg_nodes),
- base::VectorOf(return_nodes), decoder->position()));
+ decoder, builder_->CallDirect(call_info.callee_index(), real_sig,
+ base::VectorOf(arg_nodes),
+ base::VectorOf(return_nodes),
+ decoder->position()));
break;
case CallInfo::kCallRef:
CheckForException(
decoder,
- builder_->CallRef(sig, base::VectorOf(arg_nodes),
+ builder_->CallRef(real_sig, base::VectorOf(arg_nodes),
base::VectorOf(return_nodes),
call_info.null_check(), decoder->position()));
break;
@@ -1677,6 +1692,17 @@ class WasmGraphBuildingInterface {
const FunctionSig* sig, const Value args[]) {
size_t arg_count = sig->parameter_count();
+ // Construct a function signature based on the real function parameters.
+ FunctionSig::Builder real_sig_builder(builder_->graph_zone(),
+ sig->return_count(), arg_count);
+ for (size_t i = 0; i < arg_count; i++) {
+ real_sig_builder.AddParam(args[i].type);
+ }
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ real_sig_builder.AddReturn(sig->GetReturn(i));
+ }
+ FunctionSig* real_sig = real_sig_builder.Build();
+
ValueVector arg_values(arg_count + 1);
if (call_info.call_mode() == CallInfo::kCallDirect) {
arg_values[0].node = nullptr;
@@ -1699,22 +1725,23 @@ class WasmGraphBuildingInterface {
switch (call_info.call_mode()) {
case CallInfo::kCallIndirect:
- CheckForException(decoder,
- builder_->ReturnCallIndirect(
- call_info.table_index(), call_info.sig_index(),
- base::VectorOf(arg_nodes), decoder->position()));
+ CheckForException(
+ decoder,
+ builder_->ReturnCallIndirect(
+ call_info.table_index(), call_info.sig_index(), real_sig,
+ base::VectorOf(arg_nodes), decoder->position()));
break;
case CallInfo::kCallDirect:
- CheckForException(decoder,
- builder_->ReturnCall(call_info.callee_index(),
- base::VectorOf(arg_nodes),
- decoder->position()));
+ CheckForException(
+ decoder, builder_->ReturnCall(call_info.callee_index(), real_sig,
+ base::VectorOf(arg_nodes),
+ decoder->position()));
break;
case CallInfo::kCallRef:
- CheckForException(
- decoder, builder_->ReturnCallRef(sig, base::VectorOf(arg_nodes),
- call_info.null_check(),
- decoder->position()));
+ CheckForException(decoder,
+ builder_->ReturnCallRef(
+ real_sig, base::VectorOf(arg_nodes),
+ call_info.null_check(), decoder->position()));
break;
}
}
diff --git a/deps/v8/src/wasm/init-expr-interface.cc b/deps/v8/src/wasm/init-expr-interface.cc
index 818145d095..48cea65260 100644
--- a/deps/v8/src/wasm/init-expr-interface.cc
+++ b/deps/v8/src/wasm/init-expr-interface.cc
@@ -53,10 +53,10 @@ void InitExprInterface::RefNull(FullDecoder* decoder, ValueType type,
void InitExprInterface::RefFunc(FullDecoder* decoder, uint32_t function_index,
Value* result) {
if (isolate_ != nullptr) {
- auto function = WasmInstanceObject::GetOrCreateWasmExternalFunction(
+ auto internal = WasmInstanceObject::GetOrCreateWasmInternalFunction(
isolate_, instance_, function_index);
result->runtime_value = WasmValue(
- function, ValueType::Ref(module_->functions[function_index].sig_index,
+ internal, ValueType::Ref(module_->functions[function_index].sig_index,
kNonNullable));
} else {
outer_module_->functions[function_index].declared = true;
diff --git a/deps/v8/src/wasm/init-expr-interface.h b/deps/v8/src/wasm/init-expr-interface.h
index 535d2286c6..bf08fbf51a 100644
--- a/deps/v8/src/wasm/init-expr-interface.h
+++ b/deps/v8/src/wasm/init-expr-interface.h
@@ -63,7 +63,10 @@ class InitExprInterface {
#define EMPTY_INTERFACE_FUNCTION(name, ...) \
V8_INLINE void name(FullDecoder* decoder, ##__VA_ARGS__) {}
INTERFACE_META_FUNCTIONS(EMPTY_INTERFACE_FUNCTION)
- INTERFACE_NON_CONSTANT_FUNCTIONS(EMPTY_INTERFACE_FUNCTION)
+#undef EMPTY_INTERFACE_FUNCTION
+#define UNREACHABLE_INTERFACE_FUNCTION(name, ...) \
+ V8_INLINE void name(FullDecoder* decoder, ##__VA_ARGS__) { UNREACHABLE(); }
+ INTERFACE_NON_CONSTANT_FUNCTIONS(UNREACHABLE_INTERFACE_FUNCTION)
#undef EMPTY_INTERFACE_FUNCTION
#define DECLARE_INTERFACE_FUNCTION(name, ...) \
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index a04422b888..ef79c55f1c 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -25,7 +25,7 @@ bool JumpTableAssembler::EmitJumpSlot(Address target) {
intptr_t displacement = static_cast<intptr_t>(
reinterpret_cast<byte*>(target) - pc_ - kNearJmpInstrSize);
if (!is_int32(displacement)) return false;
- near_jmp(displacement, RelocInfo::NONE); // 5 bytes
+ near_jmp(displacement, RelocInfo::NO_INFO); // 5 bytes
return true;
}
@@ -63,16 +63,16 @@ void JumpTableAssembler::NopBytes(int bytes) {
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
mov(kWasmCompileLazyFuncIndexRegister, func_index); // 5 bytes
- jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
+ jmp(lazy_compile_target, RelocInfo::NO_INFO); // 5 bytes
}
bool JumpTableAssembler::EmitJumpSlot(Address target) {
- jmp(target, RelocInfo::NONE);
+ jmp(target, RelocInfo::NO_INFO);
return true;
}
void JumpTableAssembler::EmitFarJumpSlot(Address target) {
- jmp(target, RelocInfo::NONE);
+ jmp(target, RelocInfo::NO_INFO);
}
// static
@@ -136,7 +136,7 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
int start = pc_offset();
CodeEntry(); // 0-1 instr
Mov(kWasmCompileLazyFuncIndexRegister.W(), func_index); // 1-2 instr
- Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
+ Jump(lazy_compile_target, RelocInfo::NO_INFO); // 1 instr
int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
DCHECK(nop_bytes == 0 || nop_bytes == kInstrSize);
if (nop_bytes) nop();
@@ -150,7 +150,7 @@ bool JumpTableAssembler::EmitJumpSlot(Address target) {
CodeEntry();
- Jump(target, RelocInfo::NONE);
+ Jump(target, RelocInfo::NO_INFO);
return true;
}
@@ -254,7 +254,7 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
li(kWasmCompileLazyFuncIndexRegister, func_index); // max. 2 instr
// Jump produces max. 4 instructions for 32-bit platform
// and max. 6 instructions for 64-bit platform.
- Jump(lazy_compile_target, RelocInfo::NONE);
+ Jump(lazy_compile_target, RelocInfo::NO_INFO);
int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
DCHECK_EQ(nop_bytes % kInstrSize, 0);
for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
@@ -266,7 +266,7 @@ bool JumpTableAssembler::EmitJumpSlot(Address target) {
}
void JumpTableAssembler::EmitFarJumpSlot(Address target) {
- JumpToInstructionStream(target);
+ JumpToOffHeapInstructionStream(target);
}
// static
@@ -289,7 +289,7 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
int start = pc_offset();
li(kWasmCompileLazyFuncIndexRegister, (int32_t)func_index); // max. 2 instr
// Jump produces max 4 instructions.
- Jump(lazy_compile_target, RelocInfo::NONE);
+ Jump(lazy_compile_target, RelocInfo::NO_INFO);
int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
DCHECK_EQ(nop_bytes % kInstrSize, 0);
for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
@@ -299,7 +299,7 @@ bool JumpTableAssembler::EmitJumpSlot(Address target) {
return true;
}
void JumpTableAssembler::EmitFarJumpSlot(Address target) {
- JumpToInstructionStream(target);
+ JumpToOffHeapInstructionStream(target);
}
void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
UNREACHABLE();
@@ -374,7 +374,7 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
int start = pc_offset();
li(kWasmCompileLazyFuncIndexRegister, func_index); // max. 2 instr
// Jump produces max. 8 instructions (include constant pool and j)
- Jump(lazy_compile_target, RelocInfo::NONE);
+ Jump(lazy_compile_target, RelocInfo::NO_INFO);
int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
DCHECK_EQ(nop_bytes % kInstrSize, 0);
for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
diff --git a/deps/v8/src/wasm/memory-protection-key.cc b/deps/v8/src/wasm/memory-protection-key.cc
index c3e844ff1c..5bf89edf89 100644
--- a/deps/v8/src/wasm/memory-protection-key.cc
+++ b/deps/v8/src/wasm/memory-protection-key.cc
@@ -184,7 +184,7 @@ void SetPermissionsForMemoryProtectionKey(
}
DISABLE_CFI_ICALL
-bool MemoryProtectionKeyWritable(int key) {
+MemoryProtectionKeyPermission GetMemoryProtectionKeyPermission(int key) {
DCHECK_NE(kNoMemoryProtectionKey, key);
#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
@@ -193,8 +193,10 @@ bool MemoryProtectionKeyWritable(int key) {
// If a valid key was allocated, {pkey_get()} must also be available.
DCHECK_NOT_NULL(pkey_get);
- int permissions = pkey_get(key);
- return permissions == kNoRestrictions;
+ int permission = pkey_get(key);
+ CHECK(permission == kNoRestrictions || permission == kDisableAccess ||
+ permission == kDisableWrite);
+ return static_cast<MemoryProtectionKeyPermission>(permission);
#else
// On platforms without PKU support, this method cannot be called because
// no protection key can have been allocated.
diff --git a/deps/v8/src/wasm/memory-protection-key.h b/deps/v8/src/wasm/memory-protection-key.h
index 7a9ba72194..dd11b419ac 100644
--- a/deps/v8/src/wasm/memory-protection-key.h
+++ b/deps/v8/src/wasm/memory-protection-key.h
@@ -82,9 +82,8 @@ bool SetPermissionsAndMemoryProtectionKey(
void SetPermissionsForMemoryProtectionKey(
int key, MemoryProtectionKeyPermission permissions);
-// Returns {true} if the protection key {key} is write-enabled for the current
-// thread.
-bool MemoryProtectionKeyWritable(int key);
+// Get the permissions of the protection key {key} for the current thread.
+MemoryProtectionKeyPermission GetMemoryProtectionKeyPermission(int key);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 0a692e7bcd..af7551e535 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -17,7 +17,7 @@
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/wasm-compiler.h"
#include "src/handles/global-handles-inl.h"
-#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
+#include "src/heap/heap-inl.h" // For CodePageCollectionMemoryModificationScope.
#include "src/logging/counters-scopes.h"
#include "src/logging/metrics.h"
#include "src/objects/property-descriptor.h"
@@ -572,14 +572,14 @@ class CompilationStateImpl {
// for recompilation and add the respective compilation units. The callback is
// called immediately if no recompilation is needed, or called later
// otherwise.
- void InitializeRecompilation(
- TieringState new_tiering_state,
- CompilationState::callback_t recompilation_finished_callback);
+ void InitializeRecompilation(TieringState new_tiering_state,
+ std::unique_ptr<CompilationEventCallback>
+ recompilation_finished_callback);
- // Add the callback function to be called on compilation events. Needs to be
+ // Add the callback to be called on compilation events. Needs to be
// set before {CommitCompilationUnits} is run to ensure that it receives all
// events. The callback object must support being deleted from any thread.
- void AddCallback(CompilationState::callback_t);
+ void AddCallback(std::unique_ptr<CompilationEventCallback> callback);
// Inserts new functions to compile and kicks off compilation.
void CommitCompilationUnits(
@@ -744,8 +744,8 @@ class CompilationStateImpl {
//////////////////////////////////////////////////////////////////////////////
// Protected by {callbacks_mutex_}:
- // Callback functions to be called on compilation events.
- std::vector<CompilationState::callback_t> callbacks_;
+ // Callbacks to be called on compilation events.
+ std::vector<std::unique_ptr<CompilationEventCallback>> callbacks_;
// Events that already happened.
base::EnumSet<CompilationEvent> finished_events_;
@@ -836,7 +836,8 @@ std::shared_ptr<WireBytesStorage> CompilationState::GetWireBytesStorage()
return Impl(this)->GetWireBytesStorage();
}
-void CompilationState::AddCallback(CompilationState::callback_t callback) {
+void CompilationState::AddCallback(
+ std::unique_ptr<CompilationEventCallback> callback) {
return Impl(this)->AddCallback(std::move(callback));
}
@@ -1252,19 +1253,22 @@ std::vector<CallSiteFeedback> ProcessTypeFeedback(
static_cast<int>(instance->module()->num_imported_functions);
for (int i = 0; i < feedback.length(); i += 2) {
Object value = feedback.get(i);
- if (WasmExportedFunction::IsWasmExportedFunction(value)) {
- // Monomorphic. Mark the target for inlining if it's defined in the
- // same module.
- WasmExportedFunction target = WasmExportedFunction::cast(value);
+ if (value.IsWasmInternalFunction() &&
+ WasmExportedFunction::IsWasmExportedFunction(
+ WasmInternalFunction::cast(value).external())) {
+ // Monomorphic, and the internal function points to a wasm-generated
+ // external function (WasmExportedFunction). Mark the target for inlining
+ // if it's defined in the same module.
+ WasmExportedFunction target = WasmExportedFunction::cast(
+ WasmInternalFunction::cast(value).external());
if (target.instance() == *instance &&
target.function_index() >= imported_functions) {
if (FLAG_trace_wasm_speculative_inlining) {
PrintF("[Function #%d call_ref #%d inlineable (monomorphic)]\n",
func_index, i / 2);
}
- CallRefData data = CallRefData::cast(feedback.get(i + 1));
- result[i / 2] = {target.function_index(),
- static_cast<int>(data.count())};
+ int32_t count = Smi::cast(feedback.get(i + 1)).value();
+ result[i / 2] = {target.function_index(), count};
continue;
}
} else if (value.IsFixedArray()) {
@@ -1274,26 +1278,35 @@ std::vector<CallSiteFeedback> ProcessTypeFeedback(
FixedArray polymorphic = FixedArray::cast(value);
size_t total_count = 0;
for (int j = 0; j < polymorphic.length(); j += 2) {
- total_count += CallRefData::cast(polymorphic.get(j + 1)).count();
+ total_count += Smi::cast(polymorphic.get(j + 1)).value();
}
int found_target = -1;
int found_count = -1;
double best_frequency = 0;
for (int j = 0; j < polymorphic.length(); j += 2) {
- uint32_t this_count = CallRefData::cast(polymorphic.get(j + 1)).count();
+ int32_t this_count = Smi::cast(polymorphic.get(j + 1)).value();
double frequency = static_cast<double>(this_count) / total_count;
if (frequency > best_frequency) best_frequency = frequency;
if (frequency < 0.8) continue;
- Object maybe_target = polymorphic.get(j);
- if (!WasmExportedFunction::IsWasmExportedFunction(maybe_target)) {
+
+ // We reject this polymorphic entry if:
+ // - it is not defined,
+ // - it is not a wasm-defined function (WasmExportedFunction)
+ // - it was not defined in this module.
+ if (!polymorphic.get(j).IsWasmInternalFunction()) continue;
+ WasmInternalFunction internal =
+ WasmInternalFunction::cast(polymorphic.get(j));
+ if (!WasmExportedFunction::IsWasmExportedFunction(
+ internal.external())) {
continue;
}
WasmExportedFunction target =
- WasmExportedFunction::cast(polymorphic.get(j));
+ WasmExportedFunction::cast(internal.external());
if (target.instance() != *instance ||
target.function_index() < imported_functions) {
continue;
}
+
found_target = target.function_index();
found_count = static_cast<int>(this_count);
if (FLAG_trace_wasm_speculative_inlining) {
@@ -1313,6 +1326,10 @@ std::vector<CallSiteFeedback> ProcessTypeFeedback(
// If we fall through to here, then this call isn't eligible for inlining.
// Possible reasons: uninitialized or megamorphic feedback; or monomorphic
// or polymorphic that didn't meet our requirements.
+ if (FLAG_trace_wasm_speculative_inlining) {
+ PrintF("[Function #%d call_ref #%d *not* inlineable]\n", func_index,
+ i / 2);
+ }
result[i / 2] = {-1, -1};
}
return result;
@@ -1327,7 +1344,7 @@ void TriggerTierUp(Isolate* isolate, NativeModule* native_module,
const WasmModule* module = native_module->module();
size_t priority;
- if (FLAG_new_wasm_dynamic_tiering) {
+ {
base::MutexGuard mutex_guard(&module->type_feedback.mutex);
int saved_priority =
module->type_feedback.feedback_for_function[func_index].tierup_priority;
@@ -1353,11 +1370,6 @@ void TriggerTierUp(Isolate* isolate, NativeModule* native_module,
std::move(feedback);
}
- if (!FLAG_new_wasm_dynamic_tiering) {
- uint32_t* call_array = native_module->num_liftoff_function_calls_array();
- int offset = wasm::declared_function_index(module, func_index);
- priority = base::Relaxed_Load(reinterpret_cast<int*>(&call_array[offset]));
- }
compilation_state->AddTopTierPriorityCompilationUnit(tiering_unit, priority);
}
@@ -1651,7 +1663,7 @@ bool MayCompriseLazyFunctions(const WasmModule* module,
return false;
}
-class CompilationTimeCallback {
+class CompilationTimeCallback : public CompilationEventCallback {
public:
enum CompileMode { kSynchronous, kAsync, kStreaming };
explicit CompilationTimeCallback(
@@ -1666,7 +1678,12 @@ class CompilationTimeCallback {
native_module_(std::move(native_module)),
compile_mode_(compile_mode) {}
- void operator()(CompilationEvent compilation_event) {
+ // Keep this callback alive to be able to record caching metrics.
+ ReleaseAfterFinalEvent release_after_final_event() override {
+ return CompilationEventCallback::ReleaseAfterFinalEvent::kKeep;
+ }
+
+ void call(CompilationEvent compilation_event) override {
DCHECK(base::TimeTicks::IsHighResolution());
std::shared_ptr<NativeModule> native_module = native_module_.lock();
if (!native_module) return;
@@ -1761,9 +1778,9 @@ void CompileNativeModule(Isolate* isolate,
// The callback captures a shared ptr to the semaphore.
auto* compilation_state = Impl(native_module->compilation_state());
if (base::TimeTicks::IsHighResolution()) {
- compilation_state->AddCallback(CompilationTimeCallback{
+ compilation_state->AddCallback(std::make_unique<CompilationTimeCallback>(
isolate->async_counters(), isolate->metrics_recorder(), context_id,
- native_module, CompilationTimeCallback::kSynchronous});
+ native_module, CompilationTimeCallback::kSynchronous));
}
// Initialize the compilation units and kick off background compile tasks.
@@ -1835,7 +1852,8 @@ class BackgroundCompileJob final : public JobTask {
std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
- Handle<FixedArray>* export_wrappers_out, int compilation_id) {
+ Handle<FixedArray>* export_wrappers_out, int compilation_id,
+ v8::metrics::Recorder::ContextId context_id) {
const WasmModule* wasm_module = module.get();
WasmEngine* engine = GetWasmEngine();
base::OwnedVector<uint8_t> wire_bytes_copy =
@@ -1872,8 +1890,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
// Sync compilation is user blocking, so we increase the priority.
native_module->compilation_state()->SetHighPriority();
- v8::metrics::Recorder::ContextId context_id =
- isolate->GetOrRegisterRecorderContextId(isolate->native_context());
CompileNativeModule(isolate, context_id, thrower, wasm_module, native_module,
export_wrappers_out);
bool cache_hit = !engine->UpdateNativeModuleCache(thrower->error(),
@@ -1897,16 +1913,29 @@ void RecompileNativeModule(NativeModule* native_module,
auto recompilation_finished_semaphore = std::make_shared<base::Semaphore>(0);
auto* compilation_state = Impl(native_module->compilation_state());
+ class RecompilationFinishedCallback : public CompilationEventCallback {
+ public:
+ explicit RecompilationFinishedCallback(
+ std::shared_ptr<base::Semaphore> recompilation_finished_semaphore)
+ : recompilation_finished_semaphore_(
+ std::move(recompilation_finished_semaphore)) {}
+
+ void call(CompilationEvent event) override {
+ DCHECK_NE(CompilationEvent::kFailedCompilation, event);
+ if (event == CompilationEvent::kFinishedRecompilation) {
+ recompilation_finished_semaphore_->Signal();
+ }
+ }
+
+ private:
+ std::shared_ptr<base::Semaphore> recompilation_finished_semaphore_;
+ };
+
// The callback captures a shared ptr to the semaphore.
// Initialize the compilation units and kick off background compile tasks.
compilation_state->InitializeRecompilation(
- tiering_state,
- [recompilation_finished_semaphore](CompilationEvent event) {
- DCHECK_NE(CompilationEvent::kFailedCompilation, event);
- if (event == CompilationEvent::kFinishedRecompilation) {
- recompilation_finished_semaphore->Signal();
- }
- });
+ tiering_state, std::make_unique<RecompilationFinishedCallback>(
+ recompilation_finished_semaphore));
constexpr JobDelegate* kNoDelegate = nullptr;
ExecuteCompilationUnits(compilation_state->native_module_weak(),
@@ -2204,11 +2233,12 @@ void AsyncCompileJob::AsyncCompileSucceeded(Handle<WasmModuleObject> result) {
resolver_->OnCompilationSucceeded(result);
}
-class AsyncCompileJob::CompilationStateCallback {
+class AsyncCompileJob::CompilationStateCallback
+ : public CompilationEventCallback {
public:
explicit CompilationStateCallback(AsyncCompileJob* job) : job_(job) {}
- void operator()(CompilationEvent event) {
+ void call(CompilationEvent event) override {
// This callback is only being called from a foreground task.
switch (event) {
case CompilationEvent::kFinishedExportWrappers:
@@ -2521,14 +2551,15 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
CompilationStateImpl* compilation_state =
Impl(job->native_module_->compilation_state());
- compilation_state->AddCallback(CompilationStateCallback{job});
+ compilation_state->AddCallback(
+ std::make_unique<CompilationStateCallback>(job));
if (base::TimeTicks::IsHighResolution()) {
auto compile_mode = job->stream_ == nullptr
? CompilationTimeCallback::kAsync
: CompilationTimeCallback::kStreaming;
- compilation_state->AddCallback(CompilationTimeCallback{
+ compilation_state->AddCallback(std::make_unique<CompilationTimeCallback>(
job->isolate_->async_counters(), job->isolate_->metrics_recorder(),
- job->context_id_, job->native_module_, compile_mode});
+ job->context_id_, job->native_module_, compile_mode));
}
if (start_compilation_) {
@@ -2561,13 +2592,13 @@ class AsyncCompileJob::CompileFailed : public CompileStep {
};
namespace {
-class SampleTopTierCodeSizeCallback {
+class SampleTopTierCodeSizeCallback : public CompilationEventCallback {
public:
explicit SampleTopTierCodeSizeCallback(
std::weak_ptr<NativeModule> native_module)
: native_module_(std::move(native_module)) {}
- void operator()(CompilationEvent event) {
+ void call(CompilationEvent event) override {
if (event != CompilationEvent::kFinishedTopTierCompilation) return;
if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
GetWasmEngine()->SampleTopTierCodeSizeInAllIsolates(native_module);
@@ -2600,7 +2631,7 @@ class AsyncCompileJob::CompileFinished : public CompileStep {
// Also, set a callback to sample the code size after top-tier compilation
// finished. This callback will *not* keep the NativeModule alive.
job->native_module_->compilation_state()->AddCallback(
- SampleTopTierCodeSizeCallback{job->native_module_});
+ std::make_unique<SampleTopTierCodeSizeCallback>(job->native_module_));
}
// Then finalize and publish the generated module.
job->FinishCompile(cached_native_module_ != nullptr);
@@ -3169,6 +3200,11 @@ void CompilationStateImpl::AddCompilationUnit(CompilationUnitBuilder* builder,
void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
base::Vector<const int> missing_functions) {
+ TRACE_EVENT1("v8.wasm", "wasm.CompilationAfterDeserialization",
+ "num_missing_functions", missing_functions.size());
+ TimedHistogramScope lazy_compile_time_scope(
+ counters()->wasm_compile_after_deserialize());
+
auto* module = native_module_->module();
auto enabled_features = native_module_->enabled_features();
const bool lazy_module = IsLazyModule(module);
@@ -3202,7 +3238,7 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
void CompilationStateImpl::InitializeRecompilation(
TieringState new_tiering_state,
- CompilationState::callback_t recompilation_finished_callback) {
+ std::unique_ptr<CompilationEventCallback> recompilation_finished_callback) {
DCHECK(!failed());
// Hold the mutex as long as possible, to synchronize between multiple
@@ -3281,7 +3317,8 @@ void CompilationStateImpl::InitializeRecompilation(
}
}
-void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
+void CompilationStateImpl::AddCallback(
+ std::unique_ptr<CompilationEventCallback> callback) {
base::MutexGuard callbacks_guard(&callbacks_mutex_);
// Immediately trigger events that already happened.
for (auto event : {CompilationEvent::kFinishedExportWrappers,
@@ -3289,7 +3326,7 @@ void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
CompilationEvent::kFinishedTopTierCompilation,
CompilationEvent::kFailedCompilation}) {
if (finished_events_.contains(event)) {
- callback(event);
+ callback->call(event);
}
}
constexpr base::EnumSet<CompilationEvent> kFinalEvents{
@@ -3360,12 +3397,13 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers(
*export_wrappers_out = isolate->factory()->NewFixedArray(
MaxNumExportWrappers(module), AllocationType::kOld);
// TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
- // optimization we keep the code space unlocked to avoid repeated unlocking
- // because many such wrapper are allocated in sequence below.
+ // optimization we create a code memory modification scope that avoids
+ // changing the page permissions back-and-forth between RWX and RX, because
+ // many such wrapper are allocated in sequence below.
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.FinalizeJSToWasmWrappers", "wrappers",
js_to_wasm_wrapper_units_.size());
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ CodePageCollectionMemoryModificationScope modification_scope(isolate->heap());
for (auto& unit : js_to_wasm_wrapper_units_) {
DCHECK_EQ(isolate, unit->isolate());
Handle<Code> code = unit->Finalize();
@@ -3549,19 +3587,21 @@ void CompilationStateImpl::TriggerCallbacks(
DCHECK_NE(compilation_id_, kInvalidCompilationID);
TRACE_EVENT1("v8.wasm", event.second, "id", compilation_id_);
for (auto& callback : callbacks_) {
- callback(event.first);
+ callback->call(event.first);
}
}
- // With dynamic tiering, we don't know if we can ever delete the callback.
- // TODO(https://crbug.com/v8/12289): Release some callbacks also when dynamic
- // tiering is enabled.
- if (dynamic_tiering_ == DynamicTiering::kDisabled &&
- outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0 &&
+ if (outstanding_baseline_units_ == 0 && outstanding_export_wrappers_ == 0 &&
outstanding_top_tier_functions_ == 0 &&
outstanding_recompilation_functions_ == 0) {
- // Clear the callbacks because no more events will be delivered.
- callbacks_.clear();
+ callbacks_.erase(
+ std::remove_if(
+ callbacks_.begin(), callbacks_.end(),
+ [](std::unique_ptr<CompilationEventCallback>& event) {
+ return event->release_after_final_event() ==
+ CompilationEventCallback::ReleaseAfterFinalEvent::kRelease;
+ }),
+ callbacks_.end());
}
}
@@ -3668,6 +3708,27 @@ void CompilationStateImpl::SetError() {
void CompilationStateImpl::WaitForCompilationEvent(
CompilationEvent expect_event) {
+ class WaitForCompilationEventCallback : public CompilationEventCallback {
+ public:
+ WaitForCompilationEventCallback(std::shared_ptr<base::Semaphore> semaphore,
+ std::shared_ptr<std::atomic<bool>> done,
+ base::EnumSet<CompilationEvent> events)
+ : semaphore_(std::move(semaphore)),
+ done_(std::move(done)),
+ events_(events) {}
+
+ void call(CompilationEvent event) override {
+ if (!events_.contains(event)) return;
+ done_->store(true, std::memory_order_relaxed);
+ semaphore_->Signal();
+ }
+
+ private:
+ std::shared_ptr<base::Semaphore> semaphore_;
+ std::shared_ptr<std::atomic<bool>> done_;
+ base::EnumSet<CompilationEvent> events_;
+ };
+
auto semaphore = std::make_shared<base::Semaphore>(0);
auto done = std::make_shared<std::atomic<bool>>(false);
base::EnumSet<CompilationEvent> events{expect_event,
@@ -3675,11 +3736,8 @@ void CompilationStateImpl::WaitForCompilationEvent(
{
base::MutexGuard callbacks_guard(&callbacks_mutex_);
if (finished_events_.contains_any(events)) return;
- callbacks_.emplace_back([semaphore, events, done](CompilationEvent event) {
- if (!events.contains(event)) return;
- done->store(true, std::memory_order_relaxed);
- semaphore->Signal();
- });
+ callbacks_.emplace_back(std::make_unique<WaitForCompilationEventCallback>(
+ semaphore, done, events));
}
class WaitForEventDelegate final : public JobDelegate {
@@ -3798,9 +3856,10 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
// Finalize compilation jobs in the main thread.
// TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
- // optimization we keep the code space unlocked to avoid repeated unlocking
- // because many such wrapper are allocated in sequence below.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ // optimization we create a code memory modification scope that avoids
+ // changing the page permissions back-and-forth between RWX and RX, because
+ // many such wrapper are allocated in sequence below.
+ CodePageCollectionMemoryModificationScope modification_scope(isolate->heap());
for (auto& pair : compilation_units) {
JSToWasmWrapperKey key = pair.first;
JSToWasmWrapperCompilationUnit* unit = pair.second.get();
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index 6470dd4a65..1aab188d29 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -13,6 +13,7 @@
#include <functional>
#include <memory>
+#include "include/v8-metrics.h"
#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/logging/metrics.h"
@@ -52,7 +53,8 @@ V8_EXPORT_PRIVATE
std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
- Handle<FixedArray>* export_wrappers_out, int compilation_id);
+ Handle<FixedArray>* export_wrappers_out, int compilation_id,
+ v8::metrics::Recorder::ContextId context_id);
void RecompileNativeModule(NativeModule* native_module,
TieringState new_tiering_state);
@@ -75,8 +77,8 @@ WasmCode* CompileImportWrapper(
// also lazy.
bool CompileLazy(Isolate*, Handle<WasmInstanceObject>, int func_index);
-void TriggerTierUp(Isolate*, NativeModule*, int func_index,
- Handle<WasmInstanceObject> instance);
+V8_EXPORT_PRIVATE void TriggerTierUp(Isolate*, NativeModule*, int func_index,
+ Handle<WasmInstanceObject> instance);
template <typename Key, typename Hash>
class WrapperQueue {
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 974cdc06ab..f95d378f96 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -196,6 +196,23 @@ Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
return map;
}
+Handle<Map> CreateFuncRefMap(Isolate* isolate, const WasmModule* module,
+ Handle<Map> opt_rtt_parent,
+ Handle<WasmInstanceObject> instance) {
+ const int inobject_properties = 0;
+ const int instance_size =
+ Map::cast(isolate->root(RootIndex::kWasmInternalFunctionMap))
+ .instance_size();
+ const InstanceType instance_type = WASM_INTERNAL_FUNCTION_TYPE;
+ const ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
+ Handle<WasmTypeInfo> type_info = isolate->factory()->NewWasmTypeInfo(
+ kNullAddress, opt_rtt_parent, instance_size, instance);
+ Handle<Map> map = isolate->factory()->NewMap(
+ instance_type, instance_size, elements_kind, inobject_properties);
+ map->set_wasm_type_info(*type_info);
+ return map;
+}
+
void CreateMapForType(Isolate* isolate, const WasmModule* module,
int type_index, Handle<WasmInstanceObject> instance,
Handle<FixedArray> maps) {
@@ -221,11 +238,9 @@ void CreateMapForType(Isolate* isolate, const WasmModule* module,
map = CreateArrayMap(isolate, module, type_index, rtt_parent, instance);
break;
case kWasmFunctionTypeCode:
- // TODO(7748): Think about canonicalizing rtts to make them work for
- // identical function types.
- map = Map::Copy(isolate, isolate->wasm_exported_function_map(),
- "fresh function map for function type canonical rtt "
- "initialization");
+ // TODO(7748): Create funcref RTTs lazily?
+ // TODO(7748): Canonicalize function maps (cross-module)?
+ map = CreateFuncRefMap(isolate, module, rtt_parent, instance);
break;
}
maps->set(type_index, *map);
@@ -265,17 +280,14 @@ Handle<Map> AllocateSubRtt(Isolate* isolate,
Handle<WasmInstanceObject> instance, uint32_t type,
Handle<Map> parent, WasmRttSubMode mode) {
DCHECK(parent->IsWasmStructMap() || parent->IsWasmArrayMap() ||
- parent->IsJSFunctionMap());
+ parent->IsWasmInternalFunctionMap());
const wasm::WasmModule* module = instance->module();
if (module->has_signature(type)) {
- // Currently, parent rtts for functions are meaningless,
- // since (rtt.test func rtt) iff (func.map == rtt).
- // Therefore, we simply create a fresh function map here.
- // TODO(7748): Canonicalize rtts to make them work for identical function
- // types.
- return Map::Copy(isolate, isolate->wasm_exported_function_map(),
- "fresh function map for AllocateSubRtt");
+ // Function references are implicitly allocated with their canonical rtt,
+ // and type checks against sub-rtts will always fail. Therefore, we simply
+ // create a fresh function map here.
+ return CreateFuncRefMap(isolate, module, Handle<Map>(), instance);
}
// If canonicalization is requested, check for an existing RTT first.
Handle<ArrayList> cache;
@@ -668,11 +680,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
{
Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
- // Table 0 is handled specially. See {InitializeIndirectFunctionTable} for
- // the initilization. All generated and runtime code will use this optimized
- // shortcut in the instance. Hence it is safe to start with table 1 in the
- // iteration below.
- for (int i = 1; i < table_count; ++i) {
+ for (int i = 0; i < table_count; ++i) {
const WasmTable& table = module_->tables[i];
if (IsSubtypeOf(table.type, kWasmFuncRef, module_)) {
Handle<WasmIndirectFunctionTable> table_obj =
@@ -683,6 +691,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
instance->set_indirect_function_tables(*tables);
}
+ instance->SetIndirectFunctionTableShortcuts(isolate_);
+
//--------------------------------------------------------------------------
// Process the imports for the module.
//--------------------------------------------------------------------------
@@ -1061,9 +1071,11 @@ bool InstanceBuilder::ProcessImportedFunction(
// is resolved to preserve its identity. This handles exported functions as
// well as functions constructed via other means (e.g. WebAssembly.Function).
if (WasmExternalFunction::IsWasmExternalFunction(*value)) {
- WasmInstanceObject::SetWasmExternalFunction(
+ WasmInstanceObject::SetWasmInternalFunction(
isolate_, instance, func_index,
- Handle<WasmExternalFunction>::cast(value));
+ WasmInternalFunction::FromExternal(
+ Handle<WasmExternalFunction>::cast(value), isolate_)
+ .ToHandleChecked());
}
auto js_receiver = Handle<JSReceiver>::cast(value);
const FunctionSig* expected_sig = module_->functions[func_index].sig;
@@ -1187,9 +1199,10 @@ bool InstanceBuilder::InitializeImportedIndirectFunctionTable(
// Look up the signature's canonical id. If there is no canonical
// id, then the signature does not appear at all in this module,
// so putting {-1} in the table will cause checks to always fail.
- IndirectFunctionTableEntry(instance, table_index, i)
- .Set(module_->signature_map.Find(*sig), target_instance,
- function_index);
+ FunctionTargetAndRef entry(target_instance, function_index);
+ instance->GetIndirectFunctionTable(isolate_, table_index)
+ ->Set(i, module_->signature_map.Find(*sig), entry.call_target(),
+ *entry.ref());
}
return true;
}
@@ -1461,6 +1474,10 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
ReportLinkError(error_message, global_index, module_name, import_name);
return false;
}
+ if (IsSubtypeOf(global.type, kWasmFuncRef, module_) && !value->IsNull()) {
+ value =
+ WasmInternalFunction::FromExternal(value, isolate_).ToHandleChecked();
+ }
WriteGlobalValue(global, WasmValue(value, global.type));
return true;
}
@@ -1706,9 +1723,11 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
if (import.kind == kExternalFunction) {
Handle<Object> value = sanitized_imports_[index].value;
if (WasmExternalFunction::IsWasmExternalFunction(*value)) {
- WasmInstanceObject::SetWasmExternalFunction(
+ WasmInstanceObject::SetWasmInternalFunction(
isolate_, instance, import.index,
- Handle<WasmExternalFunction>::cast(value));
+ WasmInternalFunction::FromExternal(
+ Handle<WasmExternalFunction>::cast(value), isolate_)
+ .ToHandleChecked());
}
} else if (import.kind == kExternalGlobal) {
Handle<Object> value = sanitized_imports_[index].value;
@@ -1746,9 +1765,11 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
case kExternalFunction: {
// Wrap and export the code as a JSFunction.
// TODO(wasm): reduce duplication with LoadElemSegment() further below
- Handle<WasmExternalFunction> wasm_external_function =
- WasmInstanceObject::GetOrCreateWasmExternalFunction(
+ Handle<WasmInternalFunction> internal =
+ WasmInstanceObject::GetOrCreateWasmInternalFunction(
isolate_, instance, exp.index);
+ Handle<WasmExternalFunction> wasm_external_function =
+ handle(WasmExternalFunction::cast(internal->external()), isolate_);
desc.set_value(wasm_external_function);
if (is_asm_js &&
@@ -1871,7 +1892,8 @@ void SetNullTableEntry(Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t table_index, uint32_t entry_index) {
const WasmModule* module = instance->module();
if (IsSubtypeOf(table_object->type(), kWasmFuncRef, module)) {
- IndirectFunctionTableEntry(instance, table_index, entry_index).clear();
+ instance->GetIndirectFunctionTable(isolate, table_index)
+ ->Clear(entry_index);
}
WasmTableObject::Set(isolate, table_object, entry_index,
isolate->factory()->null_value());
@@ -1888,31 +1910,32 @@ void SetFunctionTableEntry(Isolate* isolate,
// For externref tables, we have to generate the WasmExternalFunction eagerly.
// Later we cannot know if an entry is a placeholder or not.
if (table_object->type().is_reference_to(HeapType::kExtern)) {
- Handle<WasmExternalFunction> wasm_external_function =
- WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
+ Handle<WasmInternalFunction> wasm_internal_function =
+ WasmInstanceObject::GetOrCreateWasmInternalFunction(isolate, instance,
func_index);
WasmTableObject::Set(isolate, table_object, entry_index,
- wasm_external_function);
+ wasm_internal_function);
} else {
DCHECK(IsSubtypeOf(table_object->type(), kWasmFuncRef, module));
// Update the local dispatch table first if necessary.
uint32_t sig_id = module->canonicalized_type_ids[function->sig_index];
- IndirectFunctionTableEntry(instance, table_index, entry_index)
- .Set(sig_id, instance, func_index);
+ FunctionTargetAndRef entry(instance, func_index);
+ instance->GetIndirectFunctionTable(isolate, table_index)
+ ->Set(entry_index, sig_id, entry.call_target(), *entry.ref());
// Update the table object's other dispatch tables.
- MaybeHandle<WasmExternalFunction> wasm_external_function =
- WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
+ MaybeHandle<WasmInternalFunction> wasm_internal_function =
+ WasmInstanceObject::GetWasmInternalFunction(isolate, instance,
func_index);
- if (wasm_external_function.is_null()) {
+ if (wasm_internal_function.is_null()) {
// No JSFunction entry yet exists for this function. Create a
// {Tuple2} holding the information to lazily allocate one.
WasmTableObject::SetFunctionTablePlaceholder(
isolate, table_object, entry_index, instance, func_index);
} else {
table_object->entries().set(entry_index,
- *wasm_external_function.ToHandleChecked());
+ *wasm_internal_function.ToHandleChecked());
}
// UpdateDispatchTables() updates all other dispatch tables, since
// we have not yet added the dispatch table we are currently building.
@@ -1944,19 +1967,22 @@ void InstanceBuilder::InitializeIndirectFunctionTables(
SetNullTableEntry(isolate_, instance, table_object, table_index,
entry_index);
}
- } else if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ } else if (value->IsWasmInternalFunction()) {
+ Handle<Object> external = handle(
+ Handle<WasmInternalFunction>::cast(value)->external(), isolate_);
+ // TODO(manoskouk): Support WasmJSFunction/WasmCapiFunction.
+ if (!WasmExportedFunction::IsWasmExportedFunction(*external)) {
+ thrower_->TypeError(
+ "Initializing a table with a Webassembly.Function object is not "
+ "supported yet");
+ }
uint32_t function_index =
- Handle<WasmExportedFunction>::cast(value)->function_index();
+ Handle<WasmExportedFunction>::cast(external)->function_index();
for (uint32_t entry_index = 0; entry_index < table.initial_size;
entry_index++) {
SetFunctionTableEntry(isolate_, instance, table_object, table_index,
entry_index, function_index);
}
- } else if (WasmJSFunction::IsWasmJSFunction(*value)) {
- // TODO(manoskouk): Support WasmJSFunction.
- thrower_->TypeError(
- "Initializing a table with a Webassembly.Function object is not "
- "supported yet");
} else {
for (uint32_t entry_index = 0; entry_index < table.initial_size;
entry_index++) {
diff --git a/deps/v8/src/wasm/stacks.h b/deps/v8/src/wasm/stacks.h
index df574d0aec..6e7c7f49d7 100644
--- a/deps/v8/src/wasm/stacks.h
+++ b/deps/v8/src/wasm/stacks.h
@@ -19,52 +19,93 @@ namespace internal {
namespace wasm {
struct JumpBuffer {
- void* sp;
+ Address sp;
+ Address fp;
void* stack_limit;
// TODO(thibaudm/fgm): Add general-purpose registers.
};
constexpr int kJmpBufSpOffset = offsetof(JumpBuffer, sp);
+constexpr int kJmpBufFpOffset = offsetof(JumpBuffer, fp);
constexpr int kJmpBufStackLimitOffset = offsetof(JumpBuffer, stack_limit);
class StackMemory {
public:
- static StackMemory* New() { return new StackMemory(); }
+ static StackMemory* New(Isolate* isolate) { return new StackMemory(isolate); }
// Returns a non-owning view of the current stack.
static StackMemory* GetCurrentStackView(Isolate* isolate) {
byte* limit =
reinterpret_cast<byte*>(isolate->stack_guard()->real_jslimit());
- return new StackMemory(limit);
+ return new StackMemory(isolate, limit);
}
~StackMemory() {
+ if (FLAG_trace_wasm_stack_switching) {
+ PrintF("Delete stack (sp: %p)\n", reinterpret_cast<void*>(jmpbuf_.sp));
+ }
PageAllocator* allocator = GetPlatformPageAllocator();
if (owned_) allocator->DecommitPages(limit_, size_);
+ // We don't need to handle removing the last stack from the list (next_ ==
+ // this). This only happens on isolate tear down, otherwise there is always
+ // at least one reachable stack (the active stack).
+ isolate_->wasm_stacks() = next_;
+ prev_->next_ = next_;
+ next_->prev_ = prev_;
}
- void* limit() { return limit_; }
- void* base() { return limit_ + size_; }
+ void* jslimit() const { return limit_ + kJSLimitOffsetKB; }
+ Address base() const { return reinterpret_cast<Address>(limit_ + size_); }
+ JumpBuffer* jmpbuf() { return &jmpbuf_; }
+
+ // Insert a stack in the linked list after this stack.
+ void Add(StackMemory* stack) {
+ stack->next_ = this->next_;
+ stack->prev_ = this;
+ this->next_->prev_ = stack;
+ this->next_ = stack;
+ }
+
+ StackMemory* next() { return next_; }
// Track external memory usage for Managed<StackMemory> objects.
size_t owned_size() { return sizeof(StackMemory) + (owned_ ? size_ : 0); }
+ bool IsActive() {
+ byte* sp = reinterpret_cast<byte*>(GetCurrentStackPosition());
+ return limit_ < sp && sp <= limit_ + size_;
+ }
private:
+ static constexpr int kJSLimitOffsetKB = 40;
+
// This constructor allocates a new stack segment.
- StackMemory() : owned_(true) {
+ explicit StackMemory(Isolate* isolate) : isolate_(isolate), owned_(true) {
PageAllocator* allocator = GetPlatformPageAllocator();
- size_ = allocator->AllocatePageSize();
- // TODO(thibaudm): Leave space for runtime functions.
- limit_ = static_cast<byte*>(allocator->AllocatePages(
- nullptr, size_, size_, PageAllocator::kReadWrite));
+ int kJsStackSizeKB = 4;
+ size_ = (kJsStackSizeKB + kJSLimitOffsetKB) * KB;
+ size_ = RoundUp(size_, allocator->AllocatePageSize());
+ limit_ = static_cast<byte*>(
+ allocator->AllocatePages(nullptr, size_, allocator->AllocatePageSize(),
+ PageAllocator::kReadWrite));
+ if (FLAG_trace_wasm_stack_switching)
+ PrintF("Allocate stack (sp: %p, limit: %p)\n", limit_ + size_, limit_);
}
// Overload to represent a view of the libc stack.
- explicit StackMemory(byte* limit) : limit_(limit), size_(0), owned_(false) {}
+ StackMemory(Isolate* isolate, byte* limit)
+ : isolate_(isolate),
+ limit_(limit),
+ size_(reinterpret_cast<size_t>(limit)),
+ owned_(false) {}
+ Isolate* isolate_;
byte* limit_;
size_t size_;
bool owned_;
+ JumpBuffer jmpbuf_;
+ // Stacks form a circular doubly linked list per isolate.
+ StackMemory* next_ = this;
+ StackMemory* prev_ = this;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index c332f3f94a..d182c87dbb 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -315,15 +315,21 @@ void AsyncStreamingDecoder::Abort() {
namespace {
-class CompilationChunkFinishedCallback {
+class CompilationChunkFinishedCallback : public CompilationEventCallback {
public:
CompilationChunkFinishedCallback(
std::weak_ptr<NativeModule> native_module,
AsyncStreamingDecoder::ModuleCompiledCallback callback)
: native_module_(std::move(native_module)),
- callback_(std::move(callback)) {}
+ callback_(std::move(callback)) {
+ // As a baseline we also count the modules that could be cached but
+ // never reach the threshold.
+ if (std::shared_ptr<NativeModule> module = native_module_.lock()) {
+ module->counters()->wasm_cache_count()->AddSample(0);
+ }
+ }
- void operator()(CompilationEvent event) const {
+ void call(CompilationEvent event) override {
if (event != CompilationEvent::kFinishedCompilationChunk &&
event != CompilationEvent::kFinishedTopTierCompilation) {
return;
@@ -331,13 +337,19 @@ class CompilationChunkFinishedCallback {
// If the native module is still alive, get back a shared ptr and call the
// callback.
if (std::shared_ptr<NativeModule> native_module = native_module_.lock()) {
+ native_module->counters()->wasm_cache_count()->AddSample(++cache_count_);
callback_(native_module);
}
}
+ ReleaseAfterFinalEvent release_after_final_event() override {
+ return CompilationEventCallback::ReleaseAfterFinalEvent::kKeep;
+ }
+
private:
const std::weak_ptr<NativeModule> native_module_;
const AsyncStreamingDecoder::ModuleCompiledCallback callback_;
+ int cache_count_ = 0;
};
} // namespace
@@ -346,8 +358,9 @@ void AsyncStreamingDecoder::NotifyNativeModuleCreated(
const std::shared_ptr<NativeModule>& native_module) {
if (!module_compiled_callback_) return;
auto* comp_state = native_module->compilation_state();
- comp_state->AddCallback(CompilationChunkFinishedCallback{
- std::move(native_module), std::move(module_compiled_callback_)});
+
+ comp_state->AddCallback(std::make_unique<CompilationChunkFinishedCallback>(
+ std::move(native_module), std::move(module_compiled_callback_)));
module_compiled_callback_ = {};
}
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 8cd5bbd187..0ad2c15df5 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -436,27 +436,7 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
if (safepoint_table_offset_ > 0) {
SafepointTable table(this);
- os << "Safepoints (size = " << table.size() << ")\n";
- for (uint32_t i = 0; i < table.length(); i++) {
- uintptr_t pc_offset = table.GetPcOffset(i);
- os << reinterpret_cast<const void*>(instruction_start() + pc_offset);
- os << std::setw(6) << std::hex << pc_offset << " " << std::dec;
- table.PrintEntry(i, os);
- os << " (sp -> fp)";
- SafepointEntry entry = table.GetEntry(i);
- if (entry.trampoline_pc() != SafepointEntry::kNoTrampolinePC) {
- os << " trampoline: " << std::hex << entry.trampoline_pc() << std::dec;
- }
- if (entry.has_register_bits()) {
- os << " registers: ";
- uint32_t register_bits = entry.register_bits();
- int bits = 32 - base::bits::CountLeadingZeros32(register_bits);
- for (int j = bits - 1; j >= 0; --j) {
- os << ((register_bits >> j) & 1);
- }
- }
- os << "\n";
- }
+ table.Print(os);
os << "\n";
}
@@ -533,7 +513,7 @@ WasmCodeAllocator::WasmCodeAllocator(std::shared_ptr<Counters> async_counters)
: protect_code_memory_(
!V8_HAS_PTHREAD_JIT_WRITE_PROTECT &&
FLAG_wasm_write_protect_code_memory &&
- !GetWasmCodeManager()->HasMemoryProtectionKeySupport()),
+ !GetWasmCodeManager()->MemoryProtectionKeysEnabled()),
async_counters_(std::move(async_counters)) {
owned_code_space_.reserve(4);
}
@@ -997,18 +977,11 @@ NativeModule::NativeModule(const WasmFeatures& enabled,
if (module_->num_declared_functions > 0) {
code_table_ =
std::make_unique<WasmCode*[]>(module_->num_declared_functions);
- num_liftoff_function_calls_ =
+ tiering_budgets_ =
std::make_unique<uint32_t[]>(module_->num_declared_functions);
- if (FLAG_new_wasm_dynamic_tiering) {
- std::fill_n(num_liftoff_function_calls_.get(),
- module_->num_declared_functions, FLAG_wasm_tiering_budget);
- } else {
- // Start counter at 4 to avoid runtime calls for smaller numbers.
- constexpr int kCounterStart = 4;
- std::fill_n(num_liftoff_function_calls_.get(),
- module_->num_declared_functions, kCounterStart);
- }
+ std::fill_n(tiering_budgets_.get(), module_->num_declared_functions,
+ FLAG_wasm_tiering_budget);
}
// Even though there cannot be another thread using this object (since we are
// just constructing it), we need to hold the mutex to fulfill the
@@ -1879,17 +1852,13 @@ NativeModule::~NativeModule() {
WasmCodeManager::WasmCodeManager()
: max_committed_code_space_(FLAG_wasm_max_code_space * MB),
critical_committed_code_space_(max_committed_code_space_ / 2),
- memory_protection_key_(FLAG_wasm_memory_protection_keys
- ? AllocateMemoryProtectionKey()
- : kNoMemoryProtectionKey) {}
+ memory_protection_key_(AllocateMemoryProtectionKey()) {}
WasmCodeManager::~WasmCodeManager() {
// No more committed code space.
DCHECK_EQ(0, total_committed_code_space_.load());
- if (FLAG_wasm_memory_protection_keys) {
- FreeMemoryProtectionKey(memory_protection_key_);
- }
+ FreeMemoryProtectionKey(memory_protection_key_);
}
#if defined(V8_OS_WIN64)
@@ -1937,7 +1906,7 @@ void WasmCodeManager::Commit(base::AddressRegion region) {
PageAllocator::Permission permission = PageAllocator::kReadWriteExecute;
bool success;
- if (FLAG_wasm_memory_protection_keys) {
+ if (MemoryProtectionKeysEnabled()) {
TRACE_HEAP(
"Setting rwx permissions and memory protection key %d for 0x%" PRIxPTR
":0x%" PRIxPTR "\n",
@@ -1970,7 +1939,7 @@ void WasmCodeManager::Decommit(base::AddressRegion region) {
USE(old_committed);
TRACE_HEAP("Discarding system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
region.begin(), region.end());
- if (FLAG_wasm_memory_protection_keys) {
+ if (MemoryProtectionKeysEnabled()) {
CHECK(SetPermissionsAndMemoryProtectionKey(
allocator, region, PageAllocator::kNoAccess, kNoMemoryProtectionKey));
} else {
@@ -2126,7 +2095,7 @@ size_t WasmCodeManager::EstimateNativeModuleMetaDataSize(
}
void WasmCodeManager::SetThreadWritable(bool writable) {
- DCHECK(HasMemoryProtectionKeySupport());
+ DCHECK(MemoryProtectionKeysEnabled());
MemoryProtectionKeyPermission permissions =
writable ? kNoRestrictions : kDisableWrite;
@@ -2145,8 +2114,13 @@ bool WasmCodeManager::HasMemoryProtectionKeySupport() const {
return memory_protection_key_ != kNoMemoryProtectionKey;
}
+bool WasmCodeManager::MemoryProtectionKeysEnabled() const {
+ return HasMemoryProtectionKeySupport() && FLAG_wasm_memory_protection_keys;
+}
+
bool WasmCodeManager::MemoryProtectionKeyWritable() const {
- return wasm::MemoryProtectionKeyWritable(memory_protection_key_);
+ return GetMemoryProtectionKeyPermission(memory_protection_key_) ==
+ MemoryProtectionKeyPermission::kNoRestrictions;
}
void WasmCodeManager::InitializeMemoryProtectionKeyForTesting() {
@@ -2155,6 +2129,18 @@ void WasmCodeManager::InitializeMemoryProtectionKeyForTesting() {
}
}
+void WasmCodeManager::InitializeMemoryProtectionKeyPermissionsIfSupported()
+ const {
+ if (!HasMemoryProtectionKeySupport()) return;
+ // The default permission is {kDisableAccess}. Switch from that to
+ // {kDisableWrite}. Leave other permissions untouched, as the thread did
+ // already use the memory protection key in that case.
+ if (GetMemoryProtectionKeyPermission(memory_protection_key_) ==
+ kDisableAccess) {
+ SetPermissionsForMemoryProtectionKey(memory_protection_key_, kDisableWrite);
+ }
+}
+
std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
std::shared_ptr<const WasmModule> module) {
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 950c732545..f8c3db2cf4 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -563,6 +563,8 @@ class WasmCodeAllocator {
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
size_t GetNumCodeSpaces() const;
+ Counters* counters() const { return async_counters_.get(); }
+
private:
// Sentinel value to be used for {AllocateForCodeInRegion} for specifying no
// restriction on the region to allocate in.
@@ -839,9 +841,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Get or create the debug info for this NativeModule.
DebugInfo* GetDebugInfo();
- uint32_t* num_liftoff_function_calls_array() {
- return num_liftoff_function_calls_.get();
- }
+ uint32_t* tiering_budget_array() { return tiering_budgets_.get(); }
+
+ Counters* counters() const { return code_allocator_.counters(); }
private:
friend class WasmCode;
@@ -944,7 +946,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::unique_ptr<WasmImportWrapperCache> import_wrapper_cache_;
// Array to handle number of function calls.
- std::unique_ptr<uint32_t[]> num_liftoff_function_calls_;
+ std::unique_ptr<uint32_t[]> tiering_budgets_;
// This mutex protects concurrent calls to {AddCode} and friends.
// TODO(dlehmann): Revert this to a regular {Mutex} again.
@@ -1043,9 +1045,14 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// lock when calling this method.
void SetThreadWritable(bool writable);
- // Returns true if there is PKU support, false otherwise.
+ // Returns true if there is hardware support for PKU. Use
+ // {MemoryProtectionKeysEnabled} to also check if PKU usage is enabled via
+ // flags.
bool HasMemoryProtectionKeySupport() const;
+ // Returns true if PKU should be used.
+ bool MemoryProtectionKeysEnabled() const;
+
// Returns {true} if the memory protection key is write-enabled for the
// current thread.
// Can only be called if {HasMemoryProtectionKeySupport()} is {true}.
@@ -1055,6 +1062,10 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// independent of the --wasm-memory-protection-keys flag.
void InitializeMemoryProtectionKeyForTesting();
+ // Initialize the current thread's permissions for the memory protection key,
+ // if we have support.
+ void InitializeMemoryProtectionKeyPermissionsIfSupported() const;
+
private:
friend class WasmCodeAllocator;
friend class WasmEngine;
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index a9b66876d2..f5bce0a5b4 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -160,6 +160,10 @@ constexpr int kAnonymousFuncIndex = -1;
// often enough.
constexpr uint32_t kGenericWrapperBudget = 1000;
+// The minimum length of supertype arrays for wasm-gc types. Having a size > 0
+// gives up some module size for faster access to the supertypes.
+constexpr uint32_t kMinimumSupertypeArraySize = 3;
+
#if V8_TARGET_ARCH_X64
constexpr int32_t kOSRTargetOffset = 5 * kSystemPointerSize;
#endif
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 769bb9c781..eb39d7910a 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -13,6 +13,7 @@
#include "src/execution/v8threads.h"
#include "src/handles/global-handles-inl.h"
#include "src/logging/counters.h"
+#include "src/logging/metrics.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-promise.h"
#include "src/objects/managed-inl.h"
@@ -490,10 +491,13 @@ MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
ModuleOrigin origin = language_mode == LanguageMode::kSloppy
? kAsmJsSloppyOrigin
: kAsmJsStrictOrigin;
+ // TODO(leszeks): If we want asm.js in UKM, we should figure out a way to pass
+ // the context id in here.
+ v8::metrics::Recorder::ContextId context_id =
+ v8::metrics::Recorder::ContextId::Empty();
ModuleResult result = DecodeWasmModule(
WasmFeatures::ForAsmjs(), bytes.start(), bytes.end(), false, origin,
- isolate->counters(), isolate->metrics_recorder(),
- isolate->GetOrRegisterRecorderContextId(isolate->native_context()),
+ isolate->counters(), isolate->metrics_recorder(), context_id,
DecodingMethod::kSync, allocator());
if (result.failed()) {
// This happens once in a while when we have missed some limit check
@@ -510,7 +514,7 @@ MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
Handle<FixedArray> export_wrappers;
std::shared_ptr<NativeModule> native_module = CompileToNativeModule(
isolate, WasmFeatures::ForAsmjs(), thrower, std::move(result).value(),
- bytes, &export_wrappers, compilation_id);
+ bytes, &export_wrappers, compilation_id, context_id);
if (!native_module) return {};
return AsmWasmData::New(isolate, std::move(native_module), export_wrappers,
@@ -534,11 +538,12 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
const ModuleWireBytes& bytes) {
int compilation_id = next_compilation_id_.fetch_add(1);
TRACE_EVENT1("v8.wasm", "wasm.SyncCompile", "id", compilation_id);
- ModuleResult result = DecodeWasmModule(
- enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
- isolate->counters(), isolate->metrics_recorder(),
- isolate->GetOrRegisterRecorderContextId(isolate->native_context()),
- DecodingMethod::kSync, allocator());
+ v8::metrics::Recorder::ContextId context_id =
+ isolate->GetOrRegisterRecorderContextId(isolate->native_context());
+ ModuleResult result =
+ DecodeWasmModule(enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
+ isolate->counters(), isolate->metrics_recorder(),
+ context_id, DecodingMethod::kSync, allocator());
if (result.failed()) {
thrower->CompileFailed(result.error());
return {};
@@ -549,7 +554,7 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
Handle<FixedArray> export_wrappers;
std::shared_ptr<NativeModule> native_module = CompileToNativeModule(
isolate, enabled, thrower, std::move(result).value(), bytes,
- &export_wrappers, compilation_id);
+ &export_wrappers, compilation_id, context_id);
if (!native_module) return {};
#ifdef DEBUG
@@ -1012,6 +1017,12 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
DCHECK_EQ(0, isolates_.count(isolate));
isolates_.emplace(isolate, std::make_unique<IsolateInfo>(isolate));
+ // The isolate might access existing (cached) code without ever compiling any.
+ // In that case, the current thread might still have the default permissions
+ // for the memory protection key (== no access). Thus initialize the
+ // permissions now.
+ GetWasmCodeManager()->InitializeMemoryProtectionKeyPermissionsIfSupported();
+
// Install sampling GC callback.
// TODO(v8:7424): For now we sample module sizes in a GC callback. This will
// bias samples towards apps with high memory pressure. We should switch to
@@ -1167,12 +1178,11 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
}
// Record memory protection key support.
- if (FLAG_wasm_memory_protection_keys && !isolate_info->pku_support_sampled) {
+ if (!isolate_info->pku_support_sampled) {
isolate_info->pku_support_sampled = true;
auto* histogram =
isolate->counters()->wasm_memory_protection_keys_support();
- bool has_mpk =
- GetWasmCodeManager()->memory_protection_key_ != kNoMemoryProtectionKey;
+ bool has_mpk = GetWasmCodeManager()->HasMemoryProtectionKeySupport();
histogram->AddSample(has_mpk ? 1 : 0);
}
@@ -1645,9 +1655,6 @@ WasmCodeManager* GetWasmCodeManager() {
// {max_mem_pages} is declared in wasm-limits.h.
uint32_t max_mem_pages() {
- static_assert(
- kV8MaxWasmMemoryPages * kWasmPageSize <= JSArrayBuffer::kMaxByteLength,
- "Wasm memories must not be bigger than JSArrayBuffers");
STATIC_ASSERT(kV8MaxWasmMemoryPages <= kMaxUInt32);
return std::min(uint32_t{kV8MaxWasmMemoryPages}, FLAG_wasm_max_mem_pages);
}
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 0d8c14a641..d1ae05c570 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -56,7 +56,12 @@ void f32_ceil_wrapper(Address data) {
}
void f32_nearest_int_wrapper(Address data) {
- WriteUnalignedValue<float>(data, nearbyintf(ReadUnalignedValue<float>(data)));
+ float input = ReadUnalignedValue<float>(data);
+ float value = nearbyintf(input);
+#if V8_OS_AIX
+ value = FpOpWorkaround<float>(input, value);
+#endif
+ WriteUnalignedValue<float>(data, value);
}
void f64_trunc_wrapper(Address data) {
@@ -72,8 +77,12 @@ void f64_ceil_wrapper(Address data) {
}
void f64_nearest_int_wrapper(Address data) {
- WriteUnalignedValue<double>(data,
- nearbyint(ReadUnalignedValue<double>(data)));
+ double input = ReadUnalignedValue<double>(data);
+ double value = nearbyint(input);
+#if V8_OS_AIX
+ value = FpOpWorkaround<double>(input, value);
+#endif
+ WriteUnalignedValue<double>(data, value);
}
void int64_to_float32_wrapper(Address data) {
diff --git a/deps/v8/src/wasm/wasm-init-expr.cc b/deps/v8/src/wasm/wasm-init-expr.cc
index c6641034ba..db7e003e95 100644
--- a/deps/v8/src/wasm/wasm-init-expr.cc
+++ b/deps/v8/src/wasm/wasm-init-expr.cc
@@ -49,7 +49,7 @@ ValueType WasmInitExpr::type(const WasmModule* module,
return ValueType::Rtt(immediate().heap_type, 0);
case kRttSub:
case kRttFreshSub: {
- ValueType operand_type = operands()[0].type(module, enabled_features);
+ ValueType operand_type = (*operands())[0].type(module, enabled_features);
if (!operand_type.is_rtt()) return kWasmBottom;
if (operand_type.has_depth()) {
return ValueType::Rtt(immediate().heap_type, operand_type.depth() + 1);
diff --git a/deps/v8/src/wasm/wasm-init-expr.h b/deps/v8/src/wasm/wasm-init-expr.h
index 551fce2991..1673062f0f 100644
--- a/deps/v8/src/wasm/wasm-init-expr.h
+++ b/deps/v8/src/wasm/wasm-init-expr.h
@@ -12,6 +12,7 @@
#include <memory>
#include "src/wasm/value-type.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -21,7 +22,7 @@ struct WasmModule;
class WasmFeatures;
// Representation of an initializer expression.
-class WasmInitExpr {
+class WasmInitExpr : public ZoneObject {
public:
enum Operator {
kNone,
@@ -54,25 +55,26 @@ class WasmInitExpr {
HeapType::Representation heap_type;
};
- WasmInitExpr() : kind_(kNone) { immediate_.i32_const = 0; }
- explicit WasmInitExpr(int32_t v) : kind_(kI32Const) {
+ WasmInitExpr() : kind_(kNone), operands_(nullptr) {
+ immediate_.i32_const = 0;
+ }
+ explicit WasmInitExpr(int32_t v) : kind_(kI32Const), operands_(nullptr) {
immediate_.i32_const = v;
}
- explicit WasmInitExpr(int64_t v) : kind_(kI64Const) {
+ explicit WasmInitExpr(int64_t v) : kind_(kI64Const), operands_(nullptr) {
immediate_.i64_const = v;
}
- explicit WasmInitExpr(float v) : kind_(kF32Const) {
+ explicit WasmInitExpr(float v) : kind_(kF32Const), operands_(nullptr) {
immediate_.f32_const = v;
}
- explicit WasmInitExpr(double v) : kind_(kF64Const) {
+ explicit WasmInitExpr(double v) : kind_(kF64Const), operands_(nullptr) {
immediate_.f64_const = v;
}
- explicit WasmInitExpr(uint8_t v[kSimd128Size]) : kind_(kS128Const) {
+ explicit WasmInitExpr(uint8_t v[kSimd128Size])
+ : kind_(kS128Const), operands_(nullptr) {
memcpy(immediate_.s128_const.data(), v, kSimd128Size);
}
- MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmInitExpr);
-
static WasmInitExpr GlobalGet(uint32_t index) {
WasmInitExpr expr;
expr.kind_ = kGlobalGet;
@@ -95,29 +97,25 @@ class WasmInitExpr {
}
static WasmInitExpr StructNewWithRtt(uint32_t index,
- std::vector<WasmInitExpr> elements) {
- WasmInitExpr expr;
- expr.kind_ = kStructNewWithRtt;
+ ZoneVector<WasmInitExpr>* elements) {
+ WasmInitExpr expr(kStructNewWithRtt, elements);
expr.immediate_.index = index;
- expr.operands_ = std::move(elements);
return expr;
}
static WasmInitExpr StructNew(uint32_t index,
- std::vector<WasmInitExpr> elements) {
- WasmInitExpr expr;
- expr.kind_ = kStructNew;
+ ZoneVector<WasmInitExpr>* elements) {
+ WasmInitExpr expr(kStructNew, elements);
expr.immediate_.index = index;
- expr.operands_ = std::move(elements);
return expr;
}
- static WasmInitExpr StructNewDefaultWithRtt(uint32_t index,
+ static WasmInitExpr StructNewDefaultWithRtt(Zone* zone, uint32_t index,
WasmInitExpr rtt) {
- WasmInitExpr expr;
- expr.kind_ = kStructNewDefaultWithRtt;
+ WasmInitExpr expr(kStructNewDefaultWithRtt,
+ zone->New<ZoneVector<WasmInitExpr>>(
+ std::initializer_list<WasmInitExpr>{rtt}, zone));
expr.immediate_.index = index;
- expr.operands_.push_back(std::move(rtt));
return expr;
}
@@ -129,20 +127,16 @@ class WasmInitExpr {
}
static WasmInitExpr ArrayInit(uint32_t index,
- std::vector<WasmInitExpr> elements) {
- WasmInitExpr expr;
- expr.kind_ = kArrayInit;
+ ZoneVector<WasmInitExpr>* elements) {
+ WasmInitExpr expr(kArrayInit, elements);
expr.immediate_.index = index;
- expr.operands_ = std::move(elements);
return expr;
}
static WasmInitExpr ArrayInitStatic(uint32_t index,
- std::vector<WasmInitExpr> elements) {
- WasmInitExpr expr;
- expr.kind_ = kArrayInitStatic;
+ ZoneVector<WasmInitExpr>* elements) {
+ WasmInitExpr expr(kArrayInitStatic, elements);
expr.immediate_.index = index;
- expr.operands_ = std::move(elements);
return expr;
}
@@ -153,25 +147,28 @@ class WasmInitExpr {
return expr;
}
- static WasmInitExpr RttSub(uint32_t index, WasmInitExpr supertype) {
- WasmInitExpr expr;
- expr.kind_ = kRttSub;
+ static WasmInitExpr RttSub(Zone* zone, uint32_t index,
+ WasmInitExpr supertype) {
+ WasmInitExpr expr(
+ kRttSub, zone->New<ZoneVector<WasmInitExpr>>(
+ std::initializer_list<WasmInitExpr>{supertype}, zone));
expr.immediate_.index = index;
- expr.operands_.push_back(std::move(supertype));
return expr;
}
- static WasmInitExpr RttFreshSub(uint32_t index, WasmInitExpr supertype) {
- WasmInitExpr expr;
- expr.kind_ = kRttFreshSub;
+ static WasmInitExpr RttFreshSub(Zone* zone, uint32_t index,
+ WasmInitExpr supertype) {
+ WasmInitExpr expr(
+ kRttFreshSub,
+ zone->New<ZoneVector<WasmInitExpr>>(
+ std::initializer_list<WasmInitExpr>{supertype}, zone));
expr.immediate_.index = index;
- expr.operands_.push_back(std::move(supertype));
return expr;
}
Immediate immediate() const { return immediate_; }
Operator kind() const { return kind_; }
- const std::vector<WasmInitExpr>& operands() const { return operands_; }
+ const ZoneVector<WasmInitExpr>* operands() const { return operands_; }
bool operator==(const WasmInitExpr& other) const {
if (kind() != other.kind()) return false;
@@ -199,16 +196,16 @@ class WasmInitExpr {
case kStructNewDefaultWithRtt:
case kStructNewDefault:
if (immediate().index != other.immediate().index) return false;
- DCHECK_EQ(operands().size(), other.operands().size());
- for (uint32_t i = 0; i < operands().size(); i++) {
+ DCHECK_EQ(operands()->size(), other.operands()->size());
+ for (uint32_t i = 0; i < operands()->size(); i++) {
if (operands()[i] != other.operands()[i]) return false;
}
return true;
case kArrayInit:
case kArrayInitStatic:
if (immediate().index != other.immediate().index) return false;
- if (operands().size() != other.operands().size()) return false;
- for (uint32_t i = 0; i < operands().size(); i++) {
+ if (operands()->size() != other.operands()->size()) return false;
+ for (uint32_t i = 0; i < operands()->size(); i++) {
if (operands()[i] != other.operands()[i]) return false;
}
return true;
@@ -227,11 +224,15 @@ class WasmInitExpr {
const WasmFeatures& enabled_features) const;
private:
+ WasmInitExpr(Operator kind, const ZoneVector<WasmInitExpr>* operands)
+ : kind_(kind), operands_(operands) {}
Immediate immediate_;
Operator kind_;
- std::vector<WasmInitExpr> operands_;
+ const ZoneVector<WasmInitExpr>* operands_;
};
+ASSERT_TRIVIALLY_COPYABLE(WasmInitExpr);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 346d6d90f8..936bf14301 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -139,7 +139,8 @@ void WasmStreaming::SetClient(std::shared_ptr<Client> client) {
}
void WasmStreaming::SetUrl(const char* url, size_t length) {
- TRACE_EVENT0("v8.wasm", "wasm.SetUrl");
+ DCHECK_EQ('\0', url[length]); // {url} is null-terminated.
+ TRACE_EVENT1("v8.wasm", "wasm.SetUrl", "url", url);
impl_->SetUrl(base::VectorOf(url, length));
}
@@ -182,9 +183,7 @@ Local<String> v8_str(Isolate* isolate, const char* str) {
thrower->TypeError("Argument 0 must be a WebAssembly." #Type); \
return {}; \
} \
- Local<Object> obj = Local<Object>::Cast(args[0]); \
- return i::Handle<i::Wasm##Type##Object>::cast( \
- v8::Utils::OpenHandle(*obj)); \
+ return i::Handle<i::Wasm##Type##Object>::cast(arg0); \
}
GET_FIRST_ARGUMENT_AS(Module)
@@ -233,6 +232,16 @@ i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
return i::wasm::ModuleWireBytes(start, start + length);
}
+i::MaybeHandle<i::JSFunction> GetFirstArgumentAsJSFunction(
+ const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
+ i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]);
+ if (!arg0->IsJSFunction()) {
+ thrower->TypeError("Argument 0 must be a function");
+ return {};
+ }
+ return i::Handle<i::JSFunction>::cast(arg0);
+}
+
i::MaybeHandle<i::JSReceiver> GetValueAsImports(Local<Value> arg,
ErrorThrower* thrower) {
if (arg->IsUndefined()) return {};
@@ -1197,6 +1206,11 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
"with the type of the new table.");
return;
}
+ // TODO(7748): Generalize this if other table types are allowed.
+ if (type == i::wasm::kWasmFuncRef && !element->IsNull()) {
+ element = i::WasmInternalFunction::FromExternal(element, i_isolate)
+ .ToHandleChecked();
+ }
for (uint32_t index = 0; index < static_cast<uint32_t>(initial); ++index) {
i::WasmTableObject::Set(i_isolate, table_obj, index, element);
}
@@ -1605,6 +1619,34 @@ void WebAssemblyTag(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(Utils::ToLocal(tag_object));
}
+// WebAssembly.Suspender
+void WebAssemblySuspender(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Suspender()");
+ if (!args.IsConstructCall()) {
+ thrower.TypeError("WebAssembly.Suspender must be invoked with 'new'");
+ return;
+ }
+
+ i::Handle<i::JSObject> suspender = i::WasmSuspenderObject::New(i_isolate);
+
+ // The infrastructure for `new Foo` calls allocates an object, which is
+ // available here as {args.This()}. We're going to discard this object
+ // and use {suspender} instead, but it does have the correct prototype,
+ // which we must harvest from it. This makes a difference when the JS
+ // constructor function wasn't {WebAssembly.Suspender} directly, but some
+ // subclass: {suspender} has {WebAssembly.Suspender}'s prototype at this
+ // point, so we must overwrite that with the correct prototype for {Foo}.
+ if (!TransferPrototype(i_isolate, suspender,
+ Utils::OpenHandle(*args.This()))) {
+ return;
+ }
+ args.GetReturnValue().Set(Utils::ToLocal(suspender));
+}
+
namespace {
uint32_t GetEncodedSize(i::Handle<i::WasmTagObject> tag_object) {
@@ -1939,6 +1981,14 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
init_value = DefaultReferenceValue(i_isolate, receiver->type());
}
+ // TODO(7748): Generalize this if other table types are allowed.
+ bool has_function_type =
+ receiver->type() == i::wasm::kWasmFuncRef || receiver->type().has_index();
+ if (has_function_type && !init_value->IsNull()) {
+ init_value = i::WasmInternalFunction::FromExternal(init_value, i_isolate)
+ .ToHandleChecked();
+ }
+
int old_size =
i::WasmTableObject::Grow(i_isolate, receiver, grow_by, init_value);
@@ -1970,6 +2020,11 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::Object> result =
i::WasmTableObject::Get(i_isolate, receiver, index);
+ if (result->IsWasmInternalFunction()) {
+ result =
+ handle(i::Handle<i::WasmInternalFunction>::cast(result)->external(),
+ i_isolate);
+ }
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(Utils::ToLocal(result));
@@ -2006,6 +2061,15 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
"to 'this'");
return;
}
+
+ // TODO(7748): Generalize this if other table types are allowed.
+ bool has_function_type = table_object->type() == i::wasm::kWasmFuncRef ||
+ table_object->type().has_index();
+ if (has_function_type && !element->IsNull()) {
+ element = i::WasmInternalFunction::FromExternal(element, i_isolate)
+ .ToHandleChecked();
+ }
+
i::WasmTableObject::Set(i_isolate, table_object, index, element);
}
@@ -2334,10 +2398,19 @@ void WebAssemblyGlobalGetValueCommon(
case i::wasm::kOptRef:
switch (receiver->type().heap_representation()) {
case i::wasm::HeapType::kExtern:
- case i::wasm::HeapType::kFunc:
- case i::wasm::HeapType::kAny:
return_value.Set(Utils::ToLocal(receiver->GetRef()));
break;
+ case i::wasm::HeapType::kFunc:
+ case i::wasm::HeapType::kAny: {
+ i::Handle<i::Object> result = receiver->GetRef();
+ if (result->IsWasmInternalFunction()) {
+ result = handle(
+ i::Handle<i::WasmInternalFunction>::cast(result)->external(),
+ i_isolate);
+ }
+ return_value.Set(Utils::ToLocal(result));
+ break;
+ }
case internal::wasm::HeapType::kBottom:
UNREACHABLE();
case internal::wasm::HeapType::kI31:
@@ -2467,6 +2540,39 @@ void WebAssemblyGlobalType(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(Utils::ToLocal(type));
}
+// WebAssembly.Suspender.returnPromiseOnSuspend(WebAssembly.Function) ->
+// WebAssembly.Function
+void WebAssemblySuspenderReturnPromiseOnSuspend(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ScheduledErrorThrower thrower(
+ i_isolate, "WebAssembly.Suspender.returnPromiseOnSuspend()");
+ if (args.Length() == 0) {
+ thrower.TypeError("Argument 0 is required");
+ return;
+ }
+ auto maybe_function = GetFirstArgumentAsJSFunction(args, &thrower);
+ if (thrower.error()) return;
+ i::Handle<i::JSFunction> function = maybe_function.ToHandleChecked();
+ i::SharedFunctionInfo sfi = function->shared();
+ if (!sfi.HasWasmExportedFunctionData()) {
+ thrower.TypeError("Argument 0 must be a wasm function");
+ }
+ i::WasmExportedFunctionData data = sfi.wasm_exported_function_data();
+ int index = data.function_index();
+ i::Handle<i::WasmInstanceObject> instance(
+ i::WasmInstanceObject::cast(data.internal().ref()), i_isolate);
+ i::Handle<i::Code> wrapper = i_isolate->builtins()->code_handle(
+ i::Builtin::kWasmReturnPromiseOnSuspend);
+ i::Handle<i::JSObject> result =
+ i::Handle<i::WasmExternalFunction>::cast(i::WasmExportedFunction::New(
+ i_isolate, instance, index,
+ static_cast<int>(data.sig()->parameter_count()), wrapper));
+ args.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
} // namespace
// TODO(titzer): we use the API to create the function template because the
@@ -2753,6 +2859,18 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
exception_proto);
}
+ // Setup Suspender.
+ if (enabled_features.has_stack_switching()) {
+ Handle<JSFunction> suspender_constructor = InstallConstructorFunc(
+ isolate, webassembly, "Suspender", WebAssemblySuspender);
+ context->set_wasm_suspender_constructor(*suspender_constructor);
+ Handle<JSObject> suspender_proto = SetupConstructor(
+ isolate, suspender_constructor, i::WASM_SUSPENDER_OBJECT_TYPE,
+ WasmSuspenderObject::kHeaderSize, "WebAssembly.Suspender");
+ InstallFunc(isolate, suspender_proto, "returnPromiseOnSuspend",
+ WebAssemblySuspenderReturnPromiseOnSuspend, 1);
+ }
+
// Setup Function
if (enabled_features.has_type_reflection()) {
Handle<JSFunction> function_constructor = InstallConstructorFunc(
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index fa7784e724..fcafb69395 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -40,7 +40,7 @@ constexpr size_t kV8MaxWasmDataSegments = 100000;
// Also, do not use this limit to validate declared memory, use
// kSpecMaxMemoryPages for that.
constexpr size_t kV8MaxWasmMemoryPages = kSystemPointerSize == 4
- ? 32767 // = 2 GiB
+ ? 32768 // = 2 GiB
: 65536; // = 4 GiB
constexpr size_t kV8MaxWasmStringSize = 100000;
constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 9bb3472138..ab7262ed74 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -355,7 +355,7 @@ uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
uint32_t max_size, WasmInitExpr init) {
- tables_.push_back({type, min_size, max_size, true, std::move(init)});
+ tables_.push_back({type, min_size, max_size, true, init});
return static_cast<uint32_t>(tables_.size() - 1);
}
@@ -403,7 +403,7 @@ void WasmModuleBuilder::AddExport(base::Vector<const char> name,
uint32_t WasmModuleBuilder::AddExportedGlobal(ValueType type, bool mutability,
WasmInitExpr init,
base::Vector<const char> name) {
- uint32_t index = AddGlobal(type, mutability, std::move(init));
+ uint32_t index = AddGlobal(type, mutability, init);
AddExport(name, kExternalGlobal, index);
return index;
}
@@ -421,7 +421,7 @@ void WasmModuleBuilder::ExportImportedFunction(base::Vector<const char> name,
uint32_t WasmModuleBuilder::AddGlobal(ValueType type, bool mutability,
WasmInitExpr init) {
- globals_.push_back({type, mutability, std::move(init)});
+ globals_.push_back({type, mutability, init});
return static_cast<uint32_t>(globals_.size() - 1);
}
@@ -523,7 +523,7 @@ void WriteInitializerExpressionWithEnd(ZoneBuffer* buffer,
STATIC_ASSERT((kExprStructNewWithRtt >> 8) == kGCPrefix);
STATIC_ASSERT((kExprStructNewDefault >> 8) == kGCPrefix);
STATIC_ASSERT((kExprStructNewDefaultWithRtt >> 8) == kGCPrefix);
- for (const WasmInitExpr& operand : init.operands()) {
+ for (const WasmInitExpr& operand : *init.operands()) {
WriteInitializerExpressionWithEnd(buffer, operand, kWasmBottom);
}
buffer->write_u8(kGCPrefix);
@@ -551,7 +551,7 @@ void WriteInitializerExpressionWithEnd(ZoneBuffer* buffer,
case WasmInitExpr::kArrayInitStatic:
STATIC_ASSERT((kExprArrayInit >> 8) == kGCPrefix);
STATIC_ASSERT((kExprArrayInitStatic >> 8) == kGCPrefix);
- for (const WasmInitExpr& operand : init.operands()) {
+ for (const WasmInitExpr& operand : *init.operands()) {
WriteInitializerExpressionWithEnd(buffer, operand, kWasmBottom);
}
buffer->write_u8(kGCPrefix);
@@ -559,7 +559,7 @@ void WriteInitializerExpressionWithEnd(ZoneBuffer* buffer,
init.kind() == WasmInitExpr::kArrayInit ? kExprArrayInit
: kExprArrayInitStatic));
buffer->write_u32v(init.immediate().index);
- buffer->write_u32v(static_cast<uint32_t>(init.operands().size() - 1));
+ buffer->write_u32v(static_cast<uint32_t>(init.operands()->size() - 1));
break;
case WasmInitExpr::kRttCanon:
STATIC_ASSERT((kExprRttCanon >> 8) == kGCPrefix);
@@ -570,7 +570,7 @@ void WriteInitializerExpressionWithEnd(ZoneBuffer* buffer,
case WasmInitExpr::kRttSub:
case WasmInitExpr::kRttFreshSub:
// The operand to rtt.sub must be emitted first.
- WriteInitializerExpressionWithEnd(buffer, init.operands()[0],
+ WriteInitializerExpressionWithEnd(buffer, (*init.operands())[0],
kWasmBottom);
STATIC_ASSERT((kExprRttSub >> 8) == kGCPrefix);
STATIC_ASSERT((kExprRttFreshSub >> 8) == kGCPrefix);
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 7ba140775d..ca4ed582df 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -273,7 +273,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
WasmInitExpr offset)
: type(type),
table_index(table_index),
- offset(std::move(offset)),
+ offset(offset),
entries(zone),
status(kStatusActive) {
DCHECK(IsValidOffsetKind(offset.kind()));
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 407ca18dab..e67940a2b5 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -314,9 +314,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::vector<TypeDefinition> types; // by type index
std::vector<uint8_t> type_kinds; // by type index
std::vector<uint32_t> supertypes; // by type index
- // Map from each type index to the index of its corresponding canonical type.
+ // Map from each type index to the index of its corresponding canonical index.
+ // Canonical indices do not correspond to types.
// Note: right now, only functions are canonicalized, and arrays and structs
- // map to themselves.
+ // map to 0.
std::vector<uint32_t> canonicalized_type_ids;
bool has_type(uint32_t index) const { return index < types.size(); }
@@ -462,7 +463,8 @@ int GetNearestWasmFunction(const WasmModule* module, uint32_t byte_offset);
// Returns 0 if the type has no explicit supertype.
// The result is capped to {kV8MaxRttSubtypingDepth + 1}.
// Invalid cyclic hierarchies will return -1.
-int GetSubtypingDepth(const WasmModule* module, uint32_t type_index);
+V8_EXPORT_PRIVATE int GetSubtypingDepth(const WasmModule* module,
+ uint32_t type_index);
// Interface to the storage (wire bytes) of a wasm module.
// It is illegal for anyone receiving a ModuleWireBytes to store pointers based
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index e18d6c23d4..6f33696e7d 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -48,10 +48,12 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(WasmTableObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(AsmWasmData)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmFunctionData)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmApiFunctionRef)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmInternalFunction)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmTypeInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmStruct)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmArray)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmContinuationObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmSuspenderObject)
CAST_ACCESSOR(WasmInstanceObject)
@@ -176,13 +178,12 @@ void WasmGlobalObject::SetExternRef(Handle<Object> value) {
bool WasmGlobalObject::SetFuncRef(Isolate* isolate, Handle<Object> value) {
DCHECK_EQ(type(), wasm::kWasmFuncRef);
- if (!value->IsNull(isolate) &&
- !WasmExternalFunction::IsWasmExternalFunction(*value) &&
- !WasmCapiFunction::IsWasmCapiFunction(*value)) {
- return false;
+ if (value->IsNull() ||
+ WasmInternalFunction::FromExternal(value, isolate).ToHandle(&value)) {
+ tagged_buffer().set(offset(), *value);
+ return true;
}
- tagged_buffer().set(offset(), *value);
- return true;
+ return false;
}
// WasmInstanceObject
@@ -224,10 +225,8 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, dropped_elem_segments, byte*,
kDroppedElemSegmentsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, hook_on_function_call_address, Address,
kHookOnFunctionCallAddressOffset)
-PRIMITIVE_ACCESSORS(WasmInstanceObject, num_liftoff_function_calls_array,
- uint32_t*, kNumLiftoffFunctionCallsArrayOffset)
-ACCESSORS(WasmInstanceObject, active_continuation, WasmContinuationObject,
- kActiveContinuationOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, tiering_budget_array, uint32_t*,
+ kTieringBudgetArrayOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, break_on_entry, uint8_t,
kBreakOnEntryOffset)
@@ -253,8 +252,8 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_refs, FixedArray,
OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, tags_table, FixedArray, kTagsTableOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_external_functions, FixedArray,
- kWasmExternalFunctionsOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_internal_functions, FixedArray,
+ kWasmInternalFunctionsOffset)
ACCESSORS(WasmInstanceObject, managed_object_maps, FixedArray,
kManagedObjectMapsOffset)
ACCESSORS(WasmInstanceObject, feedback_vectors, FixedArray,
@@ -268,32 +267,6 @@ void WasmInstanceObject::clear_padding() {
}
}
-IndirectFunctionTableEntry::IndirectFunctionTableEntry(
- Handle<WasmInstanceObject> instance, int table_index, int entry_index)
- : instance_(table_index == 0 ? instance
- : Handle<WasmInstanceObject>::null()),
- table_(table_index != 0
- ? handle(WasmIndirectFunctionTable::cast(
- instance->indirect_function_tables().get(
- table_index)),
- instance->GetIsolate())
- : Handle<WasmIndirectFunctionTable>::null()),
- index_(entry_index) {
- DCHECK_GE(entry_index, 0);
- DCHECK_LT(entry_index, table_index == 0
- ? instance->indirect_function_table_size()
- : table_->size());
-}
-
-IndirectFunctionTableEntry::IndirectFunctionTableEntry(
- Handle<WasmIndirectFunctionTable> table, int entry_index)
- : instance_(Handle<WasmInstanceObject>::null()),
- table_(table),
- index_(entry_index) {
- DCHECK_GE(entry_index, 0);
- DCHECK_LT(entry_index, table_->size());
-}
-
ImportedFunctionEntry::ImportedFunctionEntry(
Handle<WasmInstanceObject> instance, int index)
: instance_(instance), index_(index) {
@@ -312,7 +285,7 @@ WasmExportedFunction::WasmExportedFunction(Address ptr) : JSFunction(ptr) {
CAST_ACCESSOR(WasmExportedFunction)
// WasmFunctionData
-ACCESSORS(WasmFunctionData, ref, Object, kRefOffset)
+ACCESSORS(WasmFunctionData, internal, WasmInternalFunction, kInternalOffset)
DEF_GETTER(WasmFunctionData, wrapper_code, Code) {
return FromCodeT(TorqueGeneratedClass::wrapper_code(cage_base));
@@ -333,15 +306,15 @@ CAST_ACCESSOR(WasmJSFunction)
// WasmJSFunctionData
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmJSFunctionData)
-ACCESSORS(WasmJSFunctionData, raw_wasm_to_js_wrapper_code, CodeT,
- kWasmToJsWrapperCodeOffset)
-DEF_GETTER(WasmJSFunctionData, wasm_to_js_wrapper_code, Code) {
- return FromCodeT(raw_wasm_to_js_wrapper_code(cage_base));
+// WasmInternalFunction
+ACCESSORS(WasmInternalFunction, raw_code, CodeT, kCodeOffset)
+
+DEF_GETTER(WasmInternalFunction, code, Code) {
+ return FromCodeT(raw_code(cage_base));
}
-void WasmJSFunctionData::set_wasm_to_js_wrapper_code(Code code,
- WriteBarrierMode mode) {
- set_raw_wasm_to_js_wrapper_code(ToCodeT(code), mode);
+void WasmInternalFunction::set_code(Code code, WriteBarrierMode mode) {
+ set_raw_code(ToCodeT(code), mode);
}
// WasmCapiFunction
@@ -548,10 +521,10 @@ wasm::StructType* WasmStruct::type(Map map) {
wasm::StructType* WasmStruct::GcSafeType(Map map) {
DCHECK_EQ(WASM_STRUCT_TYPE, map.instance_type());
HeapObject raw = HeapObject::cast(map.constructor_or_back_pointer());
- MapWord map_word = raw.map_word(kRelaxedLoad);
- HeapObject forwarded =
- map_word.IsForwardingAddress() ? map_word.ToForwardingAddress() : raw;
- Foreign foreign = Foreign::cast(forwarded);
+ // The {Foreign} might be in the middle of being moved, which is why we
+ // can't read its map for a checked cast. But we can rely on its payload
+ // being intact in the old location.
+ Foreign foreign = Foreign::unchecked_cast(raw);
return reinterpret_cast<wasm::StructType*>(foreign.foreign_address());
}
@@ -624,10 +597,10 @@ wasm::ArrayType* WasmArray::type(Map map) {
wasm::ArrayType* WasmArray::GcSafeType(Map map) {
DCHECK_EQ(WASM_ARRAY_TYPE, map.instance_type());
HeapObject raw = HeapObject::cast(map.constructor_or_back_pointer());
- MapWord map_word = raw.map_word(kRelaxedLoad);
- HeapObject forwarded =
- map_word.IsForwardingAddress() ? map_word.ToForwardingAddress() : raw;
- Foreign foreign = Foreign::cast(forwarded);
+ // The {Foreign} might be in the middle of being moved, which is why we
+ // can't read its map for a checked cast. But we can rely on its payload
+ // being intact in the old location.
+ Foreign foreign = Foreign::unchecked_cast(raw);
return reinterpret_cast<wasm::ArrayType*>(foreign.foreign_address());
}
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 43792a7532..a69dc4f173 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -77,36 +77,6 @@ class WasmInstanceNativeAllocations {
std::make_unique<uint8_t[]>(num_elem_segments));
}
- uint32_t indirect_function_table_capacity() const {
- return indirect_function_table_capacity_;
- }
-
- // Resizes the indirect function table.
- void resize_indirect_function_table(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- uint32_t new_capacity) {
- uint32_t old_capacity = indirect_function_table_capacity_;
- DCHECK_LT(old_capacity, new_capacity);
- // Grow exponentially to support repeated re-allocation.
- new_capacity = std::max(new_capacity, 2 * old_capacity);
- CHECK_GE(kMaxInt, old_capacity);
- CHECK_GE(kMaxInt, new_capacity);
-
- SET(instance, indirect_function_table_sig_ids,
- grow(indirect_function_table_sig_ids_.get(), old_capacity,
- new_capacity));
- SET(instance, indirect_function_table_targets,
- grow(indirect_function_table_targets_.get(), old_capacity,
- new_capacity));
-
- Handle<FixedArray> old_refs(instance->indirect_function_table_refs(),
- isolate);
- Handle<FixedArray> new_refs = isolate->factory()->CopyFixedArrayAndGrow(
- old_refs, static_cast<int>(new_capacity - old_capacity));
- instance->set_indirect_function_table_refs(*new_refs);
- indirect_function_table_capacity_ = new_capacity;
- }
-
private:
template <typename T>
std::unique_ptr<T[]> grow(T* old_arr, size_t old_size, size_t new_size) {
@@ -115,9 +85,6 @@ class WasmInstanceNativeAllocations {
return new_arr;
}
- uint32_t indirect_function_table_capacity_ = 0;
- std::unique_ptr<uint32_t[]> indirect_function_table_sig_ids_;
- std::unique_ptr<Address[]> indirect_function_table_targets_;
std::unique_ptr<Address[]> imported_function_targets_;
std::unique_ptr<Address[]> imported_mutable_globals_;
std::unique_ptr<Address[]> data_segment_starts_;
@@ -133,19 +100,9 @@ size_t EstimateNativeAllocationsSize(const WasmModule* module) {
(2 * kSystemPointerSize * module->num_imported_functions) +
((kSystemPointerSize + sizeof(uint32_t) + sizeof(uint8_t)) *
module->num_declared_data_segments);
- for (auto& table : module->tables) {
- estimate += 3 * kSystemPointerSize * table.initial_size;
- }
return estimate;
}
-WasmInstanceNativeAllocations* GetNativeAllocations(
- WasmInstanceObject instance) {
- return Managed<WasmInstanceNativeAllocations>::cast(
- instance.managed_native_allocations())
- .raw();
-}
-
enum DispatchTableElements : int {
kDispatchTableInstanceOffset,
kDispatchTableIndexOffset,
@@ -362,8 +319,8 @@ int WasmTableObject::Grow(Isolate* isolate, Handle<WasmTableObject> table,
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(dispatch_tables->get(i)), isolate);
- DCHECK_EQ(old_size, WasmInstanceObject::IndirectFunctionTableSize(
- isolate, instance, table_index));
+ DCHECK_EQ(old_size,
+ instance->GetIndirectFunctionTable(isolate, table_index)->size());
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
instance, table_index, new_size);
}
@@ -388,6 +345,10 @@ bool WasmTableObject::IsValidElement(Isolate* isolate,
!table->instance().IsUndefined()
? WasmInstanceObject::cast(table->instance()).module()
: nullptr;
+ if (entry->IsWasmInternalFunction()) {
+ entry =
+ handle(Handle<WasmInternalFunction>::cast(entry)->external(), isolate);
+ }
return wasm::TypecheckJSObject(isolate, module, entry, table->type(),
&error_message);
}
@@ -403,8 +364,11 @@ void WasmTableObject::SetFunctionTableEntry(Isolate* isolate,
return;
}
- if (WasmExportedFunction::IsWasmExportedFunction(*entry)) {
- auto exported_function = Handle<WasmExportedFunction>::cast(entry);
+ Handle<Object> external =
+ handle(Handle<WasmInternalFunction>::cast(entry)->external(), isolate);
+
+ if (WasmExportedFunction::IsWasmExportedFunction(*external)) {
+ auto exported_function = Handle<WasmExportedFunction>::cast(external);
Handle<WasmInstanceObject> target_instance(exported_function->instance(),
isolate);
int func_index = exported_function->function_index();
@@ -413,13 +377,13 @@ void WasmTableObject::SetFunctionTableEntry(Isolate* isolate,
DCHECK_NOT_NULL(wasm_function->sig);
UpdateDispatchTables(isolate, table, entry_index, wasm_function->sig,
target_instance, func_index);
- } else if (WasmJSFunction::IsWasmJSFunction(*entry)) {
+ } else if (WasmJSFunction::IsWasmJSFunction(*external)) {
UpdateDispatchTables(isolate, table, entry_index,
- Handle<WasmJSFunction>::cast(entry));
+ Handle<WasmJSFunction>::cast(external));
} else {
- DCHECK(WasmCapiFunction::IsWasmCapiFunction(*entry));
+ DCHECK(WasmCapiFunction::IsWasmCapiFunction(*external));
UpdateDispatchTables(isolate, table, entry_index,
- Handle<WasmCapiFunction>::cast(entry));
+ Handle<WasmCapiFunction>::cast(external));
}
entries->set(entry_index, *entry);
}
@@ -480,11 +444,7 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
case wasm::HeapType::kExtern:
return entry;
case wasm::HeapType::kFunc:
- if (WasmExportedFunction::IsWasmExportedFunction(*entry) ||
- WasmJSFunction::IsWasmJSFunction(*entry) ||
- WasmCapiFunction::IsWasmCapiFunction(*entry)) {
- return entry;
- }
+ if (entry->IsWasmInternalFunction()) return entry;
break;
case wasm::HeapType::kEq:
case wasm::HeapType::kI31:
@@ -501,11 +461,7 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
DCHECK(WasmInstanceObject::cast(table->instance())
.module()
->has_signature(table->type().ref_index()));
- if (WasmExportedFunction::IsWasmExportedFunction(*entry) ||
- WasmJSFunction::IsWasmJSFunction(*entry) ||
- WasmCapiFunction::IsWasmCapiFunction(*entry)) {
- return entry;
- }
+ if (entry->IsWasmInternalFunction()) return entry;
break;
}
@@ -517,10 +473,11 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
// Check if we already compiled a wrapper for the function but did not store
// it in the table slot yet.
- entry = WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
- function_index);
- entries->set(entry_index, *entry);
- return entry;
+ Handle<WasmInternalFunction> internal =
+ WasmInstanceObject::GetOrCreateWasmInternalFunction(isolate, instance,
+ function_index);
+ entries->set(entry_index, *internal);
+ return internal;
}
void WasmTableObject::Fill(Isolate* isolate, Handle<WasmTableObject> table,
@@ -556,8 +513,9 @@ void WasmTableObject::UpdateDispatchTables(
// Note that {SignatureMap::Find} may return {-1} if the signature is
// not found; it will simply never match any check.
auto sig_id = instance->module()->signature_map.Find(*sig);
- IndirectFunctionTableEntry(instance, table_index, entry_index)
- .Set(sig_id, target_instance, target_func_index);
+ FunctionTargetAndRef entry(target_instance, target_func_index);
+ instance->GetIndirectFunctionTable(isolate, table_index)
+ ->Set(entry_index, sig_id, entry.call_target(), *entry.ref());
}
}
@@ -637,11 +595,12 @@ void WasmTableObject::UpdateDispatchTables(
// Note that {SignatureMap::Find} may return {-1} if the signature is
// not found; it will simply never match any check.
auto sig_id = instance->module()->signature_map.Find(sig);
- IndirectFunctionTableEntry(instance, table_index, entry_index)
- .Set(sig_id, wasm_code->instruction_start(),
- WasmCapiFunctionData::cast(
- capi_function->shared().function_data(kAcquireLoad))
- .ref());
+ instance->GetIndirectFunctionTable(isolate, table_index)
+ ->Set(entry_index, sig_id, wasm_code->instruction_start(),
+ WasmCapiFunctionData::cast(
+ capi_function->shared().function_data(kAcquireLoad))
+ .internal()
+ .ref());
}
}
@@ -658,16 +617,17 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate,
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
isolate);
- DCHECK_LT(index, WasmInstanceObject::IndirectFunctionTableSize(
- isolate, target_instance, table_index));
- IndirectFunctionTableEntry(target_instance, table_index, index).clear();
+ Handle<WasmIndirectFunctionTable> function_table =
+ target_instance->GetIndirectFunctionTable(isolate, table_index);
+ DCHECK_LT(index, function_table->size());
+ function_table->Clear(index);
}
}
void WasmTableObject::SetFunctionTablePlaceholder(
Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
Handle<WasmInstanceObject> instance, int func_index) {
- // Put (instance, func_index) as a Tuple2 into the table_index.
+ // Put (instance, func_index) as a Tuple2 into the entry_index.
// The {WasmExportedFunction} will be created lazily.
Handle<Tuple2> tuple = isolate->factory()->NewTuple2(
instance, Handle<Smi>(Smi::FromInt(func_index), isolate),
@@ -689,6 +649,10 @@ void WasmTableObject::GetFunctionTableEntry(
*is_null = element->IsNull(isolate);
if (*is_null) return;
+ if (element->IsWasmInternalFunction()) {
+ element = handle(Handle<WasmInternalFunction>::cast(element)->external(),
+ isolate);
+ }
if (WasmExportedFunction::IsWasmExportedFunction(*element)) {
auto target_func = Handle<WasmExportedFunction>::cast(element);
*instance = handle(target_func->instance(), isolate);
@@ -752,10 +716,24 @@ Handle<WasmIndirectFunctionTable> WasmIndirectFunctionTable::New(
isolate, IftNativeAllocations::SizeInMemory(size), table, size);
table->set_managed_native_allocations(*native_allocations);
for (uint32_t i = 0; i < size; ++i) {
- IndirectFunctionTableEntry(table, static_cast<int>(i)).clear();
+ table->Clear(i);
}
return table;
}
+void WasmIndirectFunctionTable::Set(uint32_t index, int sig_id,
+ Address call_target, Object ref) {
+ sig_ids()[index] = sig_id;
+ targets()[index] = call_target;
+ refs().set(index, ref);
+}
+
+void WasmIndirectFunctionTable::Clear(uint32_t index) {
+ sig_ids()[index] = -1;
+ targets()[index] = 0;
+ refs().set(
+ index,
+ ReadOnlyRoots(GetIsolateFromWritableObject(*this)).undefined_value());
+}
void WasmIndirectFunctionTable::Resize(Isolate* isolate,
Handle<WasmIndirectFunctionTable> table,
@@ -763,17 +741,27 @@ void WasmIndirectFunctionTable::Resize(Isolate* isolate,
uint32_t old_size = table->size();
if (old_size >= new_size) return; // Nothing to do.
+ table->set_size(new_size);
+
+ // Grow table exponentially to guarantee amortized constant allocation and gc
+ // time.
+ Handle<FixedArray> old_refs(table->refs(), isolate);
+ // Since we might have overallocated, {old_capacity} might be different than
+ // {old_size}.
+ uint32_t old_capacity = old_refs->length();
+ // If we have enough capacity, there is no need to reallocate.
+ if (new_size <= old_capacity) return;
+ uint32_t new_capacity = std::max(2 * old_capacity, new_size);
+
Managed<IftNativeAllocations>::cast(table->managed_native_allocations())
.raw()
- ->resize(table, new_size);
+ ->resize(table, new_capacity);
- Handle<FixedArray> old_refs(table->refs(), isolate);
Handle<FixedArray> new_refs = isolate->factory()->CopyFixedArrayAndGrow(
- old_refs, static_cast<int>(new_size - old_size));
+ old_refs, static_cast<int>(new_capacity - old_capacity));
table->set_refs(*new_refs);
- table->set_size(new_size);
- for (uint32_t i = old_size; i < new_size; ++i) {
- IndirectFunctionTableEntry(table, static_cast<int>(i)).clear();
+ for (uint32_t i = old_capacity; i < new_capacity; ++i) {
+ table->Clear(i);
}
}
@@ -1069,80 +1057,23 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
return global_obj;
}
-void IndirectFunctionTableEntry::clear() {
- if (!instance_.is_null()) {
- instance_->indirect_function_table_sig_ids()[index_] = -1;
- instance_->indirect_function_table_targets()[index_] = 0;
- instance_->indirect_function_table_refs().set(
- index_, ReadOnlyRoots(instance_->GetIsolate()).undefined_value());
- } else {
- DCHECK(!table_.is_null());
- table_->sig_ids()[index_] = -1;
- table_->targets()[index_] = 0;
- table_->refs().set(
- index_,
- ReadOnlyRoots(GetIsolateFromWritableObject(*table_)).undefined_value());
- }
-}
-
-void IndirectFunctionTableEntry::Set(int sig_id,
- Handle<WasmInstanceObject> target_instance,
- int target_func_index) {
- TRACE_IFT("IFT entry 0x%" PRIxPTR
- "[%d] = {sig_id=%d, target_instance=0x%" PRIxPTR
- ", target_func_index=%d}\n",
- instance_->ptr(), index_, sig_id, target_instance->ptr(),
- target_func_index);
-
- Object ref;
- Address call_target = 0;
+FunctionTargetAndRef::FunctionTargetAndRef(
+ Handle<WasmInstanceObject> target_instance, int target_func_index) {
+ Isolate* isolate = target_instance->native_context().GetIsolate();
if (target_func_index <
static_cast<int>(target_instance->module()->num_imported_functions)) {
// The function in the target instance was imported. Use its imports table,
// which contains a tuple needed by the import wrapper.
ImportedFunctionEntry entry(target_instance, target_func_index);
- ref = entry.object_ref();
- call_target = entry.target();
+ ref_ = handle(entry.object_ref(), isolate);
+ call_target_ = entry.target();
} else {
// The function in the target instance was not imported.
- ref = *target_instance;
- call_target = target_instance->GetCallTarget(target_func_index);
- }
- Set(sig_id, call_target, ref);
-}
-
-void IndirectFunctionTableEntry::Set(int sig_id, Address call_target,
- Object ref) {
- if (!instance_.is_null()) {
- instance_->indirect_function_table_sig_ids()[index_] = sig_id;
- instance_->indirect_function_table_targets()[index_] = call_target;
- instance_->indirect_function_table_refs().set(index_, ref);
- } else {
- DCHECK(!table_.is_null());
- table_->sig_ids()[index_] = sig_id;
- table_->targets()[index_] = call_target;
- table_->refs().set(index_, ref);
+ ref_ = target_instance;
+ call_target_ = target_instance->GetCallTarget(target_func_index);
}
}
-Object IndirectFunctionTableEntry::object_ref() const {
- return !instance_.is_null()
- ? instance_->indirect_function_table_refs().get(index_)
- : table_->refs().get(index_);
-}
-
-int IndirectFunctionTableEntry::sig_id() const {
- return !instance_.is_null()
- ? instance_->indirect_function_table_sig_ids()[index_]
- : table_->sig_ids()[index_];
-}
-
-Address IndirectFunctionTableEntry::target() const {
- return !instance_.is_null()
- ? instance_->indirect_function_table_targets()[index_]
- : table_->targets()[index_];
-}
-
void ImportedFunctionEntry::SetWasmToJs(
Isolate* isolate, Handle<JSReceiver> callable,
const wasm::WasmCode* wasm_to_js_wrapper) {
@@ -1196,34 +1127,13 @@ bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
Handle<WasmInstanceObject> instance, int table_index,
uint32_t minimum_size) {
Isolate* isolate = instance->GetIsolate();
- if (table_index > 0) {
- DCHECK_LT(table_index, instance->indirect_function_tables().length());
- auto table =
- handle(WasmIndirectFunctionTable::cast(
- instance->indirect_function_tables().get(table_index)),
- isolate);
- WasmIndirectFunctionTable::Resize(isolate, table, minimum_size);
- return true;
- }
-
- uint32_t old_size = instance->indirect_function_table_size();
- if (old_size >= minimum_size) return false; // Nothing to do.
-
- auto native_allocations = GetNativeAllocations(*instance);
- if (native_allocations->indirect_function_table_capacity() < minimum_size) {
- HandleScope scope(isolate);
- native_allocations->resize_indirect_function_table(isolate, instance,
- minimum_size);
- DCHECK_GE(native_allocations->indirect_function_table_capacity(),
- minimum_size);
- }
- instance->set_indirect_function_table_size(minimum_size);
- for (uint32_t j = old_size; j < minimum_size; j++) {
- // {WasmInstanceNativeAllocations} only manages the memory of table 0.
- // Therefore we pass the {table_index} as a constant here.
- IndirectFunctionTableEntry(instance, 0, static_cast<int>(j)).clear();
+ DCHECK_LT(table_index, instance->indirect_function_tables().length());
+ Handle<WasmIndirectFunctionTable> table =
+ instance->GetIndirectFunctionTable(isolate, table_index);
+ WasmIndirectFunctionTable::Resize(isolate, table, minimum_size);
+ if (table_index == 0) {
+ instance->SetIndirectFunctionTableShortcuts(isolate);
}
-
return true;
}
@@ -1299,19 +1209,10 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
isolate->debug()->hook_on_function_call_address());
instance->set_managed_object_maps(*isolate->factory()->empty_fixed_array());
instance->set_feedback_vectors(*isolate->factory()->empty_fixed_array());
- instance->set_num_liftoff_function_calls_array(
- module_object->native_module()->num_liftoff_function_calls_array());
+ instance->set_tiering_budget_array(
+ module_object->native_module()->tiering_budget_array());
instance->set_break_on_entry(module_object->script().break_on_entry());
- if (FLAG_experimental_wasm_stack_switching) {
- // TODO(thibaudm): If there is already a continuation object for the current
- // execution context, re-use that instead of creating a new one.
- std::unique_ptr<wasm::StackMemory> stack(
- wasm::StackMemory::GetCurrentStackView(isolate));
- auto continuation = WasmContinuationObject::New(isolate, std::move(stack));
- instance->set_active_continuation(*continuation);
- }
-
// Insert the new instance into the scripts weak list of instances. This list
// is used for breakpoints affecting all instances belonging to the script.
if (module_object->script().type() == Script::TYPE_WASM) {
@@ -1379,17 +1280,25 @@ Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
return native_module->GetCallTargetForFunction(func_index);
}
-int WasmInstanceObject::IndirectFunctionTableSize(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- uint32_t table_index) {
- if (table_index == 0) {
- return instance->indirect_function_table_size();
+Handle<WasmIndirectFunctionTable> WasmInstanceObject::GetIndirectFunctionTable(
+ Isolate* isolate, uint32_t table_index) {
+ DCHECK_LT(table_index, indirect_function_tables().length());
+ return handle(WasmIndirectFunctionTable::cast(
+ indirect_function_tables().get(table_index)),
+ isolate);
+}
+
+void WasmInstanceObject::SetIndirectFunctionTableShortcuts(Isolate* isolate) {
+ if (indirect_function_tables().length() > 0 &&
+ indirect_function_tables().get(0).IsWasmIndirectFunctionTable()) {
+ HandleScope scope(isolate);
+ Handle<WasmIndirectFunctionTable> table0 =
+ GetIndirectFunctionTable(isolate, 0);
+ set_indirect_function_table_size(table0->size());
+ set_indirect_function_table_refs(table0->refs());
+ set_indirect_function_table_sig_ids(table0->sig_ids());
+ set_indirect_function_table_targets(table0->targets());
}
- auto table =
- handle(WasmIndirectFunctionTable::cast(
- instance->indirect_function_tables().get(table_index)),
- isolate);
- return table->size();
}
// static
@@ -1440,27 +1349,27 @@ bool WasmInstanceObject::InitTableEntries(Isolate* isolate,
dst, src, count);
}
-MaybeHandle<WasmExternalFunction> WasmInstanceObject::GetWasmExternalFunction(
+MaybeHandle<WasmInternalFunction> WasmInstanceObject::GetWasmInternalFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance, int index) {
- MaybeHandle<WasmExternalFunction> result;
- if (instance->has_wasm_external_functions()) {
- Object val = instance->wasm_external_functions().get(index);
+ MaybeHandle<WasmInternalFunction> result;
+ if (instance->has_wasm_internal_functions()) {
+ Object val = instance->wasm_internal_functions().get(index);
if (!val.IsUndefined(isolate)) {
- result = Handle<WasmExternalFunction>(WasmExternalFunction::cast(val),
+ result = Handle<WasmInternalFunction>(WasmInternalFunction::cast(val),
isolate);
}
}
return result;
}
-Handle<WasmExternalFunction>
-WasmInstanceObject::GetOrCreateWasmExternalFunction(
+Handle<WasmInternalFunction>
+WasmInstanceObject::GetOrCreateWasmInternalFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance, int function_index) {
- MaybeHandle<WasmExternalFunction> maybe_result =
- WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
+ MaybeHandle<WasmInternalFunction> maybe_result =
+ WasmInstanceObject::GetWasmInternalFunction(isolate, instance,
function_index);
- Handle<WasmExternalFunction> result;
+ Handle<WasmInternalFunction> result;
if (maybe_result.ToHandle(&result)) {
return result;
}
@@ -1487,27 +1396,29 @@ WasmInstanceObject::GetOrCreateWasmExternalFunction(
isolate, function.sig, instance->module(), function.imported);
module_object->export_wrappers().set(wrapper_index, ToCodeT(*wrapper));
}
- result = Handle<WasmExternalFunction>::cast(WasmExportedFunction::New(
+ auto external = Handle<WasmExternalFunction>::cast(WasmExportedFunction::New(
isolate, instance, function_index,
static_cast<int>(function.sig->parameter_count()), wrapper));
+ result =
+ WasmInternalFunction::FromExternal(external, isolate).ToHandleChecked();
- WasmInstanceObject::SetWasmExternalFunction(isolate, instance, function_index,
+ WasmInstanceObject::SetWasmInternalFunction(isolate, instance, function_index,
result);
return result;
}
-void WasmInstanceObject::SetWasmExternalFunction(
+void WasmInstanceObject::SetWasmInternalFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance, int index,
- Handle<WasmExternalFunction> val) {
+ Handle<WasmInternalFunction> val) {
Handle<FixedArray> functions;
- if (!instance->has_wasm_external_functions()) {
+ if (!instance->has_wasm_internal_functions()) {
// Lazily allocate the wasm external functions array.
functions = isolate->factory()->NewFixedArray(
static_cast<int>(instance->module()->functions.size()));
- instance->set_wasm_external_functions(*functions);
+ instance->set_wasm_internal_functions(*functions);
} else {
functions =
- Handle<FixedArray>(instance->wasm_external_functions(), isolate);
+ Handle<FixedArray>(instance->wasm_internal_functions(), isolate);
}
functions->set(index, *val);
}
@@ -1546,6 +1457,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
->shared()
.internal_formal_parameter_count_without_receiver();
}
+ // TODO(manoskouk): Reuse js_function->wasm_to_js_wrapper_code().
wasm::WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
&env, kind, sig, false, expected_arity);
wasm::CodeSpaceWriteScope write_scope(native_module);
@@ -1567,8 +1479,9 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
// Update the dispatch table.
Handle<WasmApiFunctionRef> ref =
isolate->factory()->NewWasmApiFunctionRef(callable);
- IndirectFunctionTableEntry(instance, table_index, entry_index)
- .Set(sig_id, call_target, *ref);
+ WasmIndirectFunctionTable::cast(
+ instance->indirect_function_tables().get(table_index))
+ .Set(entry_index, sig_id, call_target, *ref);
}
// static
@@ -1793,7 +1706,7 @@ Handle<Object> WasmExceptionPackage::GetExceptionValues(
isolate, exception_package,
isolate->factory()->wasm_exception_values_symbol())
.ToHandle(&values)) {
- DCHECK(values->IsFixedArray());
+ DCHECK_IMPLIES(!values->IsUndefined(), values->IsFixedArray());
return values;
}
return ReadOnlyRoots(isolate).undefined_value_handle();
@@ -1834,18 +1747,15 @@ Handle<WasmContinuationObject> WasmContinuationObject::New(
HeapObject parent) {
Handle<WasmContinuationObject> result = Handle<WasmContinuationObject>::cast(
isolate->factory()->NewStruct(WASM_CONTINUATION_OBJECT_TYPE));
- auto jmpbuf = std::make_unique<wasm::JumpBuffer>();
- jmpbuf->stack_limit = stack->limit();
- jmpbuf->sp = stack->base();
- result->set_jmpbuf(
- *isolate->factory()->NewForeign(reinterpret_cast<Address>(jmpbuf.get())));
+ stack->jmpbuf()->stack_limit = stack->jslimit();
+ stack->jmpbuf()->sp = stack->base();
+ stack->jmpbuf()->fp = kNullAddress;
+ result->set_jmpbuf(*isolate->factory()->NewForeign(
+ reinterpret_cast<Address>(stack->jmpbuf())));
size_t external_size = stack->owned_size();
Handle<Foreign> managed_stack = Managed<wasm::StackMemory>::FromUniquePtr(
isolate, external_size, std::move(stack));
- Handle<Foreign> managed_jmpbuf = Managed<wasm::JumpBuffer>::FromUniquePtr(
- isolate, sizeof(wasm::JumpBuffer), std::move(jmpbuf));
- result->set_managed_stack(*managed_stack);
- result->set_managed_jmpbuf(*managed_jmpbuf);
+ result->set_stack(*managed_stack);
result->set_parent(parent);
return result;
}
@@ -1860,10 +1770,23 @@ Handle<WasmContinuationObject> WasmContinuationObject::New(
// static
Handle<WasmContinuationObject> WasmContinuationObject::New(
Isolate* isolate, WasmContinuationObject parent) {
- auto stack = std::unique_ptr<wasm::StackMemory>(wasm::StackMemory::New());
+ auto stack =
+ std::unique_ptr<wasm::StackMemory>(wasm::StackMemory::New(isolate));
return New(isolate, std::move(stack), parent);
}
+// static
+Handle<WasmSuspenderObject> WasmSuspenderObject::New(Isolate* isolate) {
+ Handle<JSFunction> suspender_cons(
+ isolate->native_context()->wasm_suspender_constructor(), isolate);
+ // Suspender objects should be at least as long-lived as the instances of
+ // which it will wrap the imports/exports, allocate in old space too.
+ auto suspender = Handle<WasmSuspenderObject>::cast(
+ isolate->factory()->NewJSObject(suspender_cons, AllocationType::kOld));
+ suspender->set_continuation(ReadOnlyRoots(isolate).undefined_value());
+ return suspender;
+}
+
#ifdef DEBUG
namespace {
@@ -1954,16 +1877,21 @@ Handle<WasmCapiFunction> WasmCapiFunction::New(
// call target (which is an address pointing into the C++ binary).
call_target = ExternalReference::Create(call_target).address();
+ // TODO(7748): Support proper typing for external functions. That requires
+ // global (cross-module) canonicalization of signatures/RTTs.
+ Handle<Map> rtt = isolate->factory()->wasm_internal_function_map();
Handle<WasmCapiFunctionData> fun_data =
isolate->factory()->NewWasmCapiFunctionData(
call_target, embedder_data,
- isolate->builtins()->code_handle(Builtin::kIllegal),
+ isolate->builtins()->code_handle(Builtin::kIllegal), rtt,
serialized_signature);
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfoForWasmCapiFunction(fun_data);
- return Handle<WasmCapiFunction>::cast(
+ Handle<JSFunction> result =
Factory::JSFunctionBuilder{isolate, shared, isolate->native_context()}
- .Build());
+ .Build();
+ fun_data->internal().set_external(*result);
+ return Handle<WasmCapiFunction>::cast(result);
}
WasmInstanceObject WasmExportedFunction::instance() {
@@ -1991,10 +1919,19 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
Factory* factory = isolate->factory();
const wasm::FunctionSig* sig = instance->module()->functions[func_index].sig;
Address call_target = instance->GetCallTarget(func_index);
+ Handle<Map> rtt;
+ if (FLAG_experimental_wasm_gc) {
+ int sig_index = instance->module()->functions[func_index].sig_index;
+ // TODO(7748): Create funcref RTTs lazily?
+ rtt = handle(Map::cast(instance->managed_object_maps().get(sig_index)),
+ isolate);
+ } else {
+ rtt = factory->wasm_internal_function_map();
+ }
Handle<WasmExportedFunctionData> function_data =
factory->NewWasmExportedFunctionData(
export_wrapper, instance, call_target, ref, func_index,
- reinterpret_cast<Address>(sig), wasm::kGenericWrapperBudget);
+ reinterpret_cast<Address>(sig), wasm::kGenericWrapperBudget, rtt);
MaybeHandle<String> maybe_name;
bool is_asm_js_module = instance->module_object().is_asm_js();
@@ -2016,17 +1953,7 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
Handle<Map> function_map;
switch (instance->module()->origin) {
case wasm::kWasmOrigin:
- if (instance->module_object()
- .native_module()
- ->enabled_features()
- .has_gc()) {
- uint32_t sig_index =
- instance->module()->functions[func_index].sig_index;
- function_map = handle(
- Map::cast(instance->managed_object_maps().get(sig_index)), isolate);
- } else {
- function_map = isolate->wasm_exported_function_map();
- }
+ function_map = isolate->wasm_exported_function_map();
break;
case wasm::kAsmJsSloppyOrigin:
function_map = isolate->sloppy_function_map();
@@ -2051,6 +1978,7 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
shared->set_length(arity);
shared->set_internal_formal_parameter_count(JSParameterCount(arity));
shared->set_script(instance->module_object().script());
+ function_data->internal().set_external(*js_function);
return Handle<WasmExportedFunction>::cast(js_function);
}
@@ -2124,9 +2052,12 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
}
Factory* factory = isolate->factory();
+ // TODO(7748): Support proper typing for external functions. That requires
+ // global (cross-module) canonicalization of signatures/RTTs.
+ Handle<Map> rtt = factory->wasm_internal_function_map();
Handle<WasmJSFunctionData> function_data = factory->NewWasmJSFunctionData(
call_target, callable, return_count, parameter_count, serialized_sig,
- wrapper_code);
+ wrapper_code, rtt);
if (wasm::WasmFeatures::FromIsolate(isolate).has_typed_funcref()) {
using CK = compiler::WasmImportCallKind;
@@ -2145,7 +2076,7 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
Handle<Code> wasm_to_js_wrapper_code =
compiler::CompileWasmToJSWrapper(isolate, sig, kind, expected_arity)
.ToHandleChecked();
- function_data->set_wasm_to_js_wrapper_code(*wasm_to_js_wrapper_code);
+ function_data->internal().set_code(*wasm_to_js_wrapper_code);
}
Handle<String> name = factory->Function_string();
@@ -2153,25 +2084,23 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
name = JSFunction::GetDebugName(Handle<JSFunction>::cast(callable));
name = String::Flatten(isolate, name);
}
- Handle<Map> function_map =
- Map::Copy(isolate, isolate->wasm_exported_function_map(),
- "fresh function map for WasmJSFunction::New");
Handle<NativeContext> context(isolate->native_context());
Handle<SharedFunctionInfo> shared =
factory->NewSharedFunctionInfoForWasmJSFunction(name, function_data);
Handle<JSFunction> js_function =
Factory::JSFunctionBuilder{isolate, shared, context}
- .set_map(function_map)
+ .set_map(isolate->wasm_exported_function_map())
.Build();
js_function->shared().set_internal_formal_parameter_count(
JSParameterCount(parameter_count));
+ function_data->internal().set_external(*js_function);
return Handle<WasmJSFunction>::cast(js_function);
}
JSReceiver WasmJSFunction::GetCallable() const {
- return JSReceiver::cast(
- WasmApiFunctionRef::cast(shared().wasm_js_function_data().ref())
- .callable());
+ return JSReceiver::cast(WasmApiFunctionRef::cast(
+ shared().wasm_js_function_data().internal().ref())
+ .callable());
}
const wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
@@ -2211,6 +2140,24 @@ bool WasmExternalFunction::IsWasmExternalFunction(Object object) {
WasmJSFunction::IsWasmJSFunction(object);
}
+// static
+MaybeHandle<WasmInternalFunction> WasmInternalFunction::FromExternal(
+ Handle<Object> external, Isolate* isolate) {
+ if (external->IsNull(isolate)) {
+ return MaybeHandle<WasmInternalFunction>();
+ }
+ if (WasmExportedFunction::IsWasmExportedFunction(*external) ||
+ WasmJSFunction::IsWasmJSFunction(*external) ||
+ WasmCapiFunction::IsWasmCapiFunction(*external)) {
+ WasmFunctionData data = WasmFunctionData::cast(
+ Handle<JSFunction>::cast(external)->shared().function_data(
+ kAcquireLoad));
+ return handle(data.internal(), isolate);
+ }
+ // {external} is not null or a wasm external function.
+ return MaybeHandle<WasmInternalFunction>();
+}
+
Handle<WasmExceptionTag> WasmExceptionTag::New(Isolate* isolate, int index) {
Handle<WasmExceptionTag> result =
Handle<WasmExceptionTag>::cast(isolate->factory()->NewStruct(
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 41a5c5b694..bf07fd2bb3 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -42,6 +42,7 @@ class WireBytesRef;
class BreakPoint;
class JSArrayBuffer;
class SeqOneByteString;
+class StructBodyDescriptor;
class WasmCapiFunction;
class WasmExceptionTag;
class WasmExportedFunction;
@@ -61,34 +62,16 @@ class Managed;
DECL_GETTER(has_##name, bool) \
DECL_ACCESSORS(name, type)
-// A helper for an entry in an indirect function table (IFT).
-// The underlying storage in the instance is used by generated code to
-// call functions indirectly at runtime.
-// Each entry has the following fields:
-// - object = target instance, if a Wasm function, tuple if imported
-// - sig_id = signature id of function
-// - target = entrypoint to Wasm code or import wrapper code
-class V8_EXPORT_PRIVATE IndirectFunctionTableEntry {
+class V8_EXPORT_PRIVATE FunctionTargetAndRef {
public:
- inline IndirectFunctionTableEntry(Handle<WasmInstanceObject>, int table_index,
- int entry_index);
-
- inline IndirectFunctionTableEntry(Handle<WasmIndirectFunctionTable> table,
- int entry_index);
-
- void clear();
- void Set(int sig_id, Handle<WasmInstanceObject> target_instance,
- int target_func_index);
- void Set(int sig_id, Address call_target, Object ref);
-
- Object object_ref() const;
- int sig_id() const;
- Address target() const;
+ FunctionTargetAndRef(Handle<WasmInstanceObject> target_instance,
+ int target_func_index);
+ Handle<Object> ref() { return ref_; }
+ Address call_target() { return call_target_; }
private:
- Handle<WasmInstanceObject> const instance_;
- Handle<WasmIndirectFunctionTable> const table_;
- int const index_;
+ Handle<Object> ref_;
+ Address call_target_;
};
// A helper for an entry for an imported function, indexed statically.
@@ -250,6 +233,7 @@ class WasmTableObject
int* function_index, MaybeHandle<WasmJSFunction>* maybe_js_function);
private:
+ // {entry} is either {Null} or a {WasmInternalFunction}.
static void SetFunctionTableEntry(Isolate* isolate,
Handle<WasmTableObject> table,
Handle<FixedArray> entries, int entry_index,
@@ -347,7 +331,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(indirect_function_table_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
DECL_OPTIONAL_ACCESSORS(tags_table, FixedArray)
- DECL_OPTIONAL_ACCESSORS(wasm_external_functions, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(wasm_internal_functions, FixedArray)
DECL_ACCESSORS(managed_object_maps, FixedArray)
DECL_ACCESSORS(feedback_vectors, FixedArray)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
@@ -370,8 +354,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(data_segment_sizes, uint32_t*)
DECL_PRIMITIVE_ACCESSORS(dropped_elem_segments, byte*)
DECL_PRIMITIVE_ACCESSORS(hook_on_function_call_address, Address)
- DECL_PRIMITIVE_ACCESSORS(num_liftoff_function_calls_array, uint32_t*)
- DECL_ACCESSORS(active_continuation, WasmContinuationObject)
+ DECL_PRIMITIVE_ACCESSORS(tiering_budget_array, uint32_t*)
DECL_PRIMITIVE_ACCESSORS(break_on_entry, uint8_t)
// Clear uninitialized padding space. This ensures that the snapshot content
@@ -412,7 +395,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
V(kDataSegmentSizesOffset, kSystemPointerSize) \
V(kDroppedElemSegmentsOffset, kSystemPointerSize) \
V(kHookOnFunctionCallAddressOffset, kSystemPointerSize) \
- V(kNumLiftoffFunctionCallsArrayOffset, kSystemPointerSize) \
+ V(kTieringBudgetArrayOffset, kSystemPointerSize) \
/* Less than system pointer size aligned fields are below. */ \
V(kModuleObjectOffset, kTaggedSize) \
V(kExportsObjectOffset, kTaggedSize) \
@@ -425,10 +408,9 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
V(kIndirectFunctionTablesOffset, kTaggedSize) \
V(kManagedNativeAllocationsOffset, kTaggedSize) \
V(kTagsTableOffset, kTaggedSize) \
- V(kWasmExternalFunctionsOffset, kTaggedSize) \
+ V(kWasmInternalFunctionsOffset, kTaggedSize) \
V(kManagedObjectMapsOffset, kTaggedSize) \
V(kFeedbackVectorsOffset, kTaggedSize) \
- V(kActiveContinuationOffset, kTaggedSize) \
V(kBreakOnEntryOffset, kUInt8Size) \
/* More padding to make the header pointer-size aligned */ \
V(kHeaderPaddingOffset, POINTER_SIZE_PADDING(kHeaderPaddingOffset)) \
@@ -463,7 +445,7 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
kIndirectFunctionTablesOffset,
kManagedNativeAllocationsOffset,
kTagsTableOffset,
- kWasmExternalFunctionsOffset,
+ kWasmInternalFunctionsOffset,
kManagedObjectMapsOffset,
kFeedbackVectorsOffset};
@@ -479,9 +461,10 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
Address GetCallTarget(uint32_t func_index);
- static int IndirectFunctionTableSize(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- uint32_t table_index);
+ Handle<WasmIndirectFunctionTable> GetIndirectFunctionTable(
+ Isolate*, uint32_t table_index);
+
+ void SetIndirectFunctionTableShortcuts(Isolate* isolate);
// Copies table entries. Returns {false} if the ranges are out-of-bounds.
static bool CopyTableEntries(Isolate* isolate,
@@ -502,21 +485,21 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
// Iterates all fields in the object except the untagged fields.
class BodyDescriptor;
- static MaybeHandle<WasmExternalFunction> GetWasmExternalFunction(
+ static MaybeHandle<WasmInternalFunction> GetWasmInternalFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance, int index);
- // Acquires the {WasmExternalFunction} for a given {function_index} from the
- // cache of the given {instance}, or creates a new {WasmExportedFunction} if
- // it does not exist yet. The new {WasmExportedFunction} is added to the
+ // Acquires the {WasmInternalFunction} for a given {function_index} from the
+ // cache of the given {instance}, or creates a new {WasmInternalFunction} if
+ // it does not exist yet. The new {WasmInternalFunction} is added to the
// cache of the {instance} immediately.
- static Handle<WasmExternalFunction> GetOrCreateWasmExternalFunction(
+ static Handle<WasmInternalFunction> GetOrCreateWasmInternalFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance,
int function_index);
- static void SetWasmExternalFunction(Isolate* isolate,
+ static void SetWasmInternalFunction(Isolate* isolate,
Handle<WasmInstanceObject> instance,
int index,
- Handle<WasmExternalFunction> val);
+ Handle<WasmInternalFunction> val);
// Imports a constructed {WasmJSFunction} into the indirect function table of
// this instance. Note that this might trigger wrapper compilation, since a
@@ -694,6 +677,9 @@ class WasmIndirectFunctionTable
Isolate* isolate, uint32_t size);
static void Resize(Isolate* isolate, Handle<WasmIndirectFunctionTable> table,
uint32_t new_size);
+ V8_EXPORT_PRIVATE void Set(uint32_t index, int sig_id, Address call_target,
+ Object ref);
+ void Clear(uint32_t index);
DECL_PRINTER(WasmIndirectFunctionTable)
@@ -704,13 +690,15 @@ class WasmIndirectFunctionTable
};
class WasmFunctionData
- : public TorqueGeneratedWasmFunctionData<WasmFunctionData, Foreign> {
+ : public TorqueGeneratedWasmFunctionData<WasmFunctionData, HeapObject> {
public:
- DECL_ACCESSORS(ref, Object)
+ DECL_ACCESSORS(internal, WasmInternalFunction)
DECL_ACCESSORS(wrapper_code, Code)
DECL_PRINTER(WasmFunctionData)
+ using BodyDescriptor = FlexibleBodyDescriptor<kStartOfStrongFieldsOffset>;
+
TQ_OBJECT_CONSTRUCTORS(WasmFunctionData)
};
@@ -727,13 +715,14 @@ class WasmExportedFunctionData
DECL_PRINTER(WasmExportedFunctionData)
DECL_VERIFIER(WasmExportedFunctionData)
- class BodyDescriptor;
+ using BodyDescriptor =
+ FlexibleBodyDescriptor<WasmFunctionData::kStartOfStrongFieldsOffset>;
TQ_OBJECT_CONSTRUCTORS(WasmExportedFunctionData)
};
class WasmApiFunctionRef
- : public TorqueGeneratedWasmApiFunctionRef<WasmApiFunctionRef, Foreign> {
+ : public TorqueGeneratedWasmApiFunctionRef<WasmApiFunctionRef, HeapObject> {
public:
// Dispatched behavior.
DECL_PRINTER(WasmApiFunctionRef)
@@ -743,6 +732,28 @@ class WasmApiFunctionRef
TQ_OBJECT_CONSTRUCTORS(WasmApiFunctionRef)
};
+class WasmInternalFunction
+ : public TorqueGeneratedWasmInternalFunction<WasmInternalFunction,
+ Foreign> {
+ public:
+ DECL_ACCESSORS(code, Code)
+
+ // Returns a handle to the corresponding WasmInternalFunction if {external} is
+ // a WasmExternalFunction, or an empty handle otherwise.
+ static MaybeHandle<WasmInternalFunction> FromExternal(Handle<Object> external,
+ Isolate* isolate);
+
+ // Dispatched behavior.
+ DECL_PRINTER(WasmInternalFunction)
+
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(WasmInternalFunction)
+
+ private:
+ DECL_ACCESSORS(raw_code, CodeT)
+};
+
// Information for a WasmJSFunction which is referenced as the function data of
// the SharedFunctionInfo underlying the function. For details please see the
// {SharedFunctionInfo::HasWasmJSFunctionData} predicate.
@@ -755,11 +766,10 @@ class WasmJSFunctionData
// Dispatched behavior.
DECL_PRINTER(WasmJSFunctionData)
- class BodyDescriptor;
+ using BodyDescriptor =
+ FlexibleBodyDescriptor<WasmFunctionData::kStartOfStrongFieldsOffset>;
private:
- DECL_ACCESSORS(raw_wasm_to_js_wrapper_code, CodeT)
-
TQ_OBJECT_CONSTRUCTORS(WasmJSFunctionData)
};
@@ -769,7 +779,8 @@ class WasmCapiFunctionData
public:
DECL_PRINTER(WasmCapiFunctionData)
- class BodyDescriptor;
+ using BodyDescriptor =
+ FlexibleBodyDescriptor<WasmFunctionData::kStartOfStrongFieldsOffset>;
TQ_OBJECT_CONSTRUCTORS(WasmCapiFunctionData)
};
@@ -848,6 +859,8 @@ class WasmExceptionTag
V8_EXPORT_PRIVATE static Handle<WasmExceptionTag> New(Isolate* isolate,
int index);
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(WasmExceptionTag)
};
@@ -861,6 +874,8 @@ class AsmWasmData : public TorqueGeneratedAsmWasmData<AsmWasmData, Struct> {
DECL_PRINTER(AsmWasmData)
+ using BodyDescriptor = StructBodyDescriptor;
+
TQ_OBJECT_CONSTRUCTORS(AsmWasmData)
};
@@ -973,6 +988,7 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, WasmObject> {
TQ_OBJECT_CONSTRUCTORS(WasmArray)
};
+// A wasm delimited continuation.
class WasmContinuationObject
: public TorqueGeneratedWasmContinuationObject<WasmContinuationObject,
Struct> {
@@ -983,12 +999,26 @@ class WasmContinuationObject
WasmContinuationObject parent);
DECL_PRINTER(WasmContinuationObject)
- TQ_OBJECT_CONSTRUCTORS(WasmContinuationObject)
+
+ using BodyDescriptor = StructBodyDescriptor;
private:
static Handle<WasmContinuationObject> New(
Isolate* isolate, std::unique_ptr<wasm::StackMemory> stack,
HeapObject parent);
+
+ TQ_OBJECT_CONSTRUCTORS(WasmContinuationObject)
+};
+
+// The suspender object provides an API to suspend and resume wasm code using
+// promises. See: https://github.com/WebAssembly/js-promise-integration.
+class WasmSuspenderObject
+ : public TorqueGeneratedWasmSuspenderObject<WasmSuspenderObject, JSObject> {
+ public:
+ static Handle<WasmSuspenderObject> New(Isolate* isolate);
+ // TODO(thibaudm): returnPromiseOnSuspend & suspendOnReturnedPromise.
+ DECL_PRINTER(WasmSuspenderObject)
+ TQ_OBJECT_CONSTRUCTORS(WasmSuspenderObject)
};
#undef DECL_OPTIONAL_ACCESSORS
diff --git a/deps/v8/src/wasm/wasm-objects.tq b/deps/v8/src/wasm/wasm-objects.tq
index b524b08e38..8525d530fd 100644
--- a/deps/v8/src/wasm/wasm-objects.tq
+++ b/deps/v8/src/wasm/wasm-objects.tq
@@ -14,14 +14,20 @@ extern class WasmInstanceObject extends JSObject;
// Represents the context of a function that is defined through the JS or C
// APIs. Corresponds to the WasmInstanceObject passed to a Wasm function
// reference.
-// The {foreign_address} field inherited from {Foreign} points the IsolateRoots
-// of the defining isolate.
-extern class WasmApiFunctionRef extends Foreign {
+// TODO(manoskouk): If V8_HEAP_SANDBOX, we cannot encode the isolate_root as a
+// sandboxed pointer, because that would require having access to the isolate
+// root in the first place.
+extern class WasmApiFunctionRef extends HeapObject {
+ isolate_root: RawPtr;
native_context: NativeContext;
callable: JSReceiver|Undefined;
}
-extern class WasmFunctionData extends Foreign {
+// This is the representation that is used internally by wasm to represent
+// function references.
+// The {foreign_address} field inherited from {Foreign} points to the call
+// target.
+extern class WasmInternalFunction extends Foreign {
// This is the "reference" value that must be passed along in the "instance"
// register when calling the given function. It is either the target instance
// (for wasm functions), or a WasmApiFunctionRef object (for functions defined
@@ -29,6 +35,19 @@ extern class WasmFunctionData extends Foreign {
// For imported functions, this value equals the respective entry in
// the module's imported_function_refs array.
ref: WasmInstanceObject|WasmApiFunctionRef;
+ // The external (JS) representation of this function reference.
+ external: JSFunction|Undefined;
+ // This field is used when the call target is null.
+ @if(V8_EXTERNAL_CODE_SPACE) code: CodeDataContainer;
+ @ifnot(V8_EXTERNAL_CODE_SPACE) code: Code;
+}
+// WasmInternalFunction is safely comparable for pointer equality.
+extern operator '==' macro TaggedEqual(WasmInternalFunction, Object): bool;
+extern operator '==' macro TaggedEqual(Object, WasmInternalFunction): bool;
+
+extern class WasmFunctionData extends HeapObject {
+ // The wasm-internal representation of this function object.
+ internal: WasmInternalFunction;
// Used for calling this function from JavaScript.
@if(V8_EXTERNAL_CODE_SPACE) wrapper_code: CodeDataContainer;
@ifnot(V8_EXTERNAL_CODE_SPACE) wrapper_code: Code;
@@ -50,8 +69,6 @@ extern class WasmExportedFunctionData extends WasmFunctionData {
}
extern class WasmJSFunctionData extends WasmFunctionData {
- @if(V8_EXTERNAL_CODE_SPACE) wasm_to_js_wrapper_code: CodeDataContainer;
- @ifnot(V8_EXTERNAL_CODE_SPACE) wasm_to_js_wrapper_code: Code;
serialized_return_count: Smi;
serialized_parameter_count: Smi;
serialized_signature: PodArrayOfWasmValueType;
@@ -73,12 +90,15 @@ extern class WasmIndirectFunctionTable extends Struct {
}
extern class WasmContinuationObject extends Struct {
- managed_stack: Foreign;
- managed_jmpbuf: Foreign;
- jmpbuf: Foreign; // Direct access to managed_jmpbuf's underlying pointer.
+ stack: Foreign;
+ jmpbuf: Foreign; // Direct access to the stack's jump buffer.
parent: WasmContinuationObject|Undefined;
}
+extern class WasmSuspenderObject extends JSObject {
+ continuation: WasmContinuationObject|Undefined;
+}
+
extern class WasmExceptionTag extends Struct {
// Note that this index is only useful for debugging purposes and it is not
// unique across modules. The GC however does not allow objects without at
@@ -173,10 +193,3 @@ extern class WasmArray extends WasmObject {
@if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
@ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void;
}
-
-@export
-class CallRefData extends HeapObject {
- instance: HeapObject;
- target: RawPtr;
- count: uint32;
-}
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index b0d697924e..08dfce0f65 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -291,6 +291,7 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
const base::Vector<WasmCode* const> code_table_;
bool write_called_ = false;
size_t total_written_code_ = 0;
+ int num_turbofan_functions_ = 0;
};
NativeModuleSerializer::NativeModuleSerializer(
@@ -341,6 +342,7 @@ bool NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->Write(false);
return true;
}
+ ++num_turbofan_functions_;
writer->Write(true);
// Write the size of the entire code section, followed by the code header.
writer->Write(code->constant_pool_offset());
@@ -449,6 +451,8 @@ bool NativeModuleSerializer::Write(Writer* writer) {
for (WasmCode* code : code_table_) {
if (!WriteCode(code, writer)) return false;
}
+ // If not a single function was written, serialization was not successful.
+ if (num_turbofan_functions_ == 0) return false;
// Make sure that the serialized total code size was correct.
CHECK_EQ(total_written_code_, total_code_size);
diff --git a/deps/v8/src/web-snapshot/web-snapshot.cc b/deps/v8/src/web-snapshot/web-snapshot.cc
index 76f43be15e..2e52583a1f 100644
--- a/deps/v8/src/web-snapshot/web-snapshot.cc
+++ b/deps/v8/src/web-snapshot/web-snapshot.cc
@@ -41,9 +41,9 @@ void WebSnapshotSerializerDeserializer::Throw(const char* message) {
}
error_message_ = message;
if (!isolate_->has_pending_exception()) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- v8_isolate->ThrowError(
- v8::String::NewFromUtf8(v8_isolate, message).ToLocalChecked());
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kWebSnapshotError,
+ isolate_->factory()->NewStringFromAsciiChecked(error_message_)));
}
}
@@ -63,7 +63,7 @@ uint32_t WebSnapshotSerializerDeserializer::FunctionKindToFunctionFlags(
case FunctionKind::kAsyncConciseMethod:
break;
default:
- Throw("Web Snapshot: Unsupported function kind");
+ Throw("Unsupported function kind");
}
auto flags = AsyncFunctionBitField::encode(IsAsyncFunction(kind)) |
GeneratorFunctionBitField::encode(IsGeneratorFunction(kind)) |
@@ -147,7 +147,7 @@ FunctionKind WebSnapshotSerializerDeserializer::FunctionFlagsToFunctionKind(
kind = FunctionKind::kInvalid;
}
if (kind == FunctionKind::kInvalid) {
- Throw("Web Snapshots: Invalid function flags\n");
+ Throw("Invalid function flags\n");
}
return kind;
}
@@ -215,10 +215,19 @@ bool WebSnapshotSerializer::TakeSnapshot(v8::Local<v8::Context> context,
v8::Local<v8::PrimitiveArray> exports,
WebSnapshotData& data_out) {
if (string_ids_.size() > 0) {
- Throw("Web snapshot: Can't reuse WebSnapshotSerializer");
+ Throw("Can't reuse WebSnapshotSerializer");
return false;
}
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+
+ contexts_ = ArrayList::New(isolate_, 30);
+ functions_ = ArrayList::New(isolate_, 30);
+ classes_ = ArrayList::New(isolate_, 30);
+ arrays_ = ArrayList::New(isolate_, 30);
+ objects_ = ArrayList::New(isolate_, 30);
+
+ std::unique_ptr<Handle<JSObject>[]> export_objects(
+ new Handle<JSObject>[exports->Length()]);
for (int i = 0, length = exports->Length(); i < length; ++i) {
v8::Local<v8::String> str =
exports->Get(v8_isolate, i)->ToString(context).ToLocalChecked();
@@ -232,31 +241,57 @@ bool WebSnapshotSerializer::TakeSnapshot(v8::Local<v8::Context> context,
if (script_result.IsEmpty() ||
!script_result.ToLocalChecked()->ToObject(context).ToLocal(
&v8_object)) {
- Throw("Web snapshot: Exported object not found");
+ Throw("Exported object not found");
return false;
}
- auto object = Handle<JSObject>::cast(Utils::OpenHandle(*v8_object));
- SerializeExport(object, Handle<String>::cast(Utils::OpenHandle(*str)));
+ export_objects[i] = Handle<JSObject>::cast(Utils::OpenHandle(*v8_object));
+ Discovery(export_objects[i]);
+ }
+
+ for (int i = 0, length = exports->Length(); i < length; ++i) {
+ v8::Local<v8::String> str =
+ exports->Get(v8_isolate, i)->ToString(context).ToLocalChecked();
+ SerializeExport(export_objects[i],
+ Handle<String>::cast(Utils::OpenHandle(*str)));
}
+
WriteSnapshot(data_out.buffer, data_out.buffer_size);
- return !has_error();
+
+ if (has_error()) {
+ isolate_->ReportPendingMessages();
+ return false;
+ }
+ return true;
}
void WebSnapshotSerializer::SerializePendingItems() {
- while (!pending_objects_.empty() || !pending_arrays_.empty()) {
- while (!pending_objects_.empty()) {
- const Handle<JSObject>& object = pending_objects_.front();
- SerializePendingObject(object);
- pending_objects_.pop();
- }
-
- while (!pending_arrays_.empty()) {
- const Handle<JSArray>& array = pending_arrays_.front();
- SerializePendingArray(array);
- pending_arrays_.pop();
- }
+ for (int i = 0; i < contexts_->Length(); ++i) {
+ Handle<Context> context =
+ handle(Context::cast(contexts_->Get(i)), isolate_);
+ SerializeContext(context);
+ }
+ for (int i = 0; i < functions_->Length(); ++i) {
+ Handle<JSFunction> function =
+ handle(JSFunction::cast(functions_->Get(i)), isolate_);
+ SerializeFunction(function);
+ }
+ for (int i = 0; i < classes_->Length(); ++i) {
+ Handle<JSFunction> function =
+ handle(JSFunction::cast(classes_->Get(i)), isolate_);
+ SerializeClass(function);
+ }
+ for (int i = 0; i < arrays_->Length(); ++i) {
+ Handle<JSArray> array = handle(JSArray::cast(arrays_->Get(i)), isolate_);
+ SerializeArray(array);
+ }
+ for (int i = 0; i < objects_->Length(); ++i) {
+ Handle<JSObject> object =
+ handle(JSObject::cast(objects_->Get(i)), isolate_);
+ SerializeObject(object);
}
+ // Maps and strings get serialized when they're encountered; we don't need to
+ // serialize them explicitly.
}
// Format (full snapshot):
@@ -291,7 +326,7 @@ void WebSnapshotSerializer::WriteSnapshot(uint8_t*& buffer,
array_serializer_.buffer_size_ + object_serializer_.buffer_size_ +
export_serializer_.buffer_size_ + 8 * sizeof(uint32_t);
if (total_serializer.ExpandBuffer(needed_size).IsNothing()) {
- Throw("Web snapshot: Out of memory");
+ Throw("Out of memory");
return;
}
total_serializer.WriteRawBytes(kMagicNumber, 4);
@@ -334,7 +369,7 @@ bool WebSnapshotSerializer::InsertIntoIndexMap(ObjectCacheIndexMap& map,
uint32_t& id) {
if (static_cast<uint32_t>(map.size()) >=
std::numeric_limits<uint32_t>::max()) {
- Throw("Web snapshot: Too many objects");
+ Throw("Too many objects");
return true;
}
int index_out;
@@ -363,8 +398,13 @@ void WebSnapshotSerializer::SerializeString(Handle<String> string,
string_serializer_.WriteRawBytes(chars.begin(),
chars.length() * sizeof(uint8_t));
} else if (flat.IsTwoByte()) {
- // TODO(v8:11525): Support two-byte strings.
- UNREACHABLE();
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ v8::Local<v8::String> api_string = Utils::ToLocal(string);
+ int length = api_string->Utf8Length(v8_isolate);
+ std::unique_ptr<char[]> buffer(new char[length]);
+ api_string->WriteUtf8(v8_isolate, buffer.get(), length);
+ string_serializer_.WriteUint32(length);
+ string_serializer_.WriteRawBytes(buffer.get(), length * sizeof(uint8_t));
} else {
UNREACHABLE();
}
@@ -388,7 +428,7 @@ void WebSnapshotSerializer::SerializeMap(Handle<Map> map, uint32_t& id) {
Handle<Name> key(map->instance_descriptors(kRelaxedLoad).GetKey(i),
isolate_);
if (!key->IsString()) {
- Throw("Web snapshot: Key is not a string");
+ Throw("Key is not a string");
return;
}
@@ -396,7 +436,7 @@ void WebSnapshotSerializer::SerializeMap(Handle<Map> map, uint32_t& id) {
map->instance_descriptors(kRelaxedLoad).GetDetails(i);
if (details.location() != PropertyLocation::kField) {
- Throw("Web snapshot: Properties which are not fields not supported");
+ Throw("Properties which are not fields not supported");
return;
}
if (first_custom_index >= 0 || details.IsReadOnly() ||
@@ -448,24 +488,202 @@ void WebSnapshotSerializer::SerializeSource(ValueSerializer* serializer,
void WebSnapshotSerializer::SerializeFunctionInfo(ValueSerializer* serializer,
Handle<JSFunction> function) {
if (!function->shared().HasSourceCode()) {
- Throw("Web snapshot: Function without source code");
+ Throw("Function without source code");
return;
}
- Handle<Context> context(function->context(), isolate_);
- if (context->IsNativeContext() || context->IsScriptContext()) {
- serializer->WriteUint32(0);
- } else {
- DCHECK(context->IsFunctionContext() || context->IsBlockContext());
- uint32_t context_id = 0;
- SerializeContext(context, context_id);
- serializer->WriteUint32(context_id + 1);
+ {
+ DisallowGarbageCollection no_gc;
+ Context context = function->context();
+ if (context.IsNativeContext() || context.IsScriptContext()) {
+ serializer->WriteUint32(0);
+ } else {
+ DCHECK(context.IsFunctionContext() || context.IsBlockContext());
+ uint32_t context_id = GetContextId(context);
+ serializer->WriteUint32(context_id + 1);
+ }
}
SerializeSource(serializer, function);
-
+ serializer->WriteUint32(
+ function->shared().internal_formal_parameter_count_without_receiver());
serializer->WriteUint32(
FunctionKindToFunctionFlags(function->shared().kind()));
+
+ if (function->has_prototype_slot() && function->has_instance_prototype()) {
+ DisallowGarbageCollection no_gc;
+ JSObject prototype = JSObject::cast(function->instance_prototype());
+ uint32_t prototype_id = GetObjectId(prototype);
+ serializer->WriteUint32(prototype_id + 1);
+ } else {
+ serializer->WriteUint32(0);
+ }
+}
+
+void WebSnapshotSerializer::Discovery(Handle<Object> start_object) {
+ // The object discovery phase assigns IDs for objects / functions / classes /
+ // arrays and discovers outgoing references from them. This is needed so that
+ // e.g., we know all functions upfront and can construct the source code that
+ // covers them before serializing the functions.
+
+ // TODO(v8:11525): Serialize leaf objects first.
+
+ discovery_queue_.push(start_object);
+
+ while (!discovery_queue_.empty()) {
+ const Handle<Object>& object = discovery_queue_.front();
+ if (object->IsHeapObject()) {
+ switch (HeapObject::cast(*object).map().instance_type()) {
+ case JS_FUNCTION_TYPE:
+ DiscoverFunction(Handle<JSFunction>::cast(object));
+ break;
+ case JS_CLASS_CONSTRUCTOR_TYPE:
+ DiscoverClass(Handle<JSFunction>::cast(object));
+ break;
+ case JS_OBJECT_TYPE:
+ DiscoverObject(Handle<JSObject>::cast(object));
+ break;
+ case JS_ARRAY_TYPE:
+ DiscoverArray(Handle<JSArray>::cast(object));
+ break;
+ case ODDBALL_TYPE:
+ case HEAP_NUMBER_TYPE:
+ case JS_PRIMITIVE_WRAPPER_TYPE:
+ case JS_REG_EXP_TYPE:
+ // Can't contain references to other objects.
+ break;
+ default:
+ if (object->IsString()) {
+ // Can't contain references to other objects.
+ break;
+ } else {
+ Throw("Unsupported object");
+ }
+ }
+ }
+ discovery_queue_.pop();
+ }
+}
+
+void WebSnapshotSerializer::DiscoverFunction(Handle<JSFunction> function) {
+ uint32_t id;
+ if (InsertIntoIndexMap(function_ids_, function, id)) {
+ return;
+ }
+
+ DCHECK_EQ(id, functions_->Length());
+ functions_ = ArrayList::Add(isolate_, functions_, function);
+ DiscoverContextAndPrototype(function);
+ // TODO(v8:11525): Support properties in functions.
+}
+
+void WebSnapshotSerializer::DiscoverClass(Handle<JSFunction> function) {
+ uint32_t id;
+ if (InsertIntoIndexMap(class_ids_, function, id)) {
+ return;
+ }
+
+ DCHECK_EQ(id, classes_->Length());
+ classes_ = ArrayList::Add(isolate_, classes_, function);
+
+ DiscoverContextAndPrototype(function);
+ // TODO(v8:11525): Support properties in classes.
+ // TODO(v8:11525): Support class members.
+}
+
+void WebSnapshotSerializer::DiscoverContextAndPrototype(
+ Handle<JSFunction> function) {
+ Handle<Context> context(function->context(), isolate_);
+ if (context->IsFunctionContext() || context->IsBlockContext()) {
+ DiscoverContext(context);
+ }
+
+ if (function->has_prototype_slot() &&
+ function->map().has_non_instance_prototype()) {
+ Throw("Functions with non-instance prototypes not supported");
+ return;
+ }
+
+ if (function->has_prototype_slot() && function->has_instance_prototype()) {
+ Handle<JSObject> prototype = Handle<JSObject>::cast(
+ handle(function->instance_prototype(), isolate_));
+ discovery_queue_.push(prototype);
+ }
+}
+
+void WebSnapshotSerializer::DiscoverContext(Handle<Context> context) {
+ // Ensure that parent contexts get a lower ID.
+ if (!context->previous().IsNativeContext() &&
+ !context->previous().IsScriptContext()) {
+ DiscoverContext(handle(context->previous(), isolate_));
+ }
+
+ uint32_t id;
+ if (InsertIntoIndexMap(context_ids_, context, id)) {
+ return;
+ }
+
+ DCHECK_EQ(id, contexts_->Length());
+ contexts_ = ArrayList::Add(isolate_, contexts_, context);
+
+ Handle<ScopeInfo> scope_info(context->scope_info(), isolate_);
+ int count = scope_info->ContextLocalCount();
+
+ for (int i = 0; i < count; ++i) {
+ // TODO(v8:11525): support parameters
+ // TODO(v8:11525): distinguish variable modes
+ Handle<Object> value(context->get(scope_info->ContextHeaderLength() + i),
+ isolate_);
+ discovery_queue_.push(value);
+ }
+}
+
+void WebSnapshotSerializer::DiscoverArray(Handle<JSArray> array) {
+ uint32_t id;
+ if (InsertIntoIndexMap(array_ids_, array, id)) {
+ return;
+ }
+
+ DCHECK_EQ(id, arrays_->Length());
+ arrays_ = ArrayList::Add(isolate_, arrays_, array);
+
+ auto elements_kind = array->GetElementsKind();
+ if (elements_kind != PACKED_SMI_ELEMENTS &&
+ elements_kind != PACKED_ELEMENTS) {
+ Throw("Unsupported array");
+ return;
+ }
+ // TODO(v8:11525): Support sparse arrays & arrays with holes.
+ uint32_t length = static_cast<uint32_t>(array->length().ToSmi().value());
+ Handle<FixedArray> elements =
+ handle(FixedArray::cast(array->elements()), isolate_);
+ for (uint32_t i = 0; i < length; ++i) {
+ discovery_queue_.push(handle(elements->get(i), isolate_));
+ }
+}
+
+void WebSnapshotSerializer::DiscoverObject(Handle<JSObject> object) {
+ uint32_t id;
+ if (InsertIntoIndexMap(object_ids_, object, id)) {
+ return;
+ }
+
+ DCHECK_EQ(id, objects_->Length());
+ objects_ = ArrayList::Add(isolate_, objects_, object);
+
+ // TODO(v8:11525): Support objects with so many properties that they can't be
+ // in fast mode.
+ JSObject::MigrateSlowToFast(object, 0, "Web snapshot");
+
+ Handle<Map> map(object->map(), isolate_);
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
+ PropertyDetails details =
+ map->instance_descriptors(kRelaxedLoad).GetDetails(i);
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ Handle<Object> value =
+ JSObject::FastPropertyAt(object, details.representation(), field_index);
+ discovery_queue_.push(value);
+ }
}
// Format (serialized function):
@@ -473,21 +691,14 @@ void WebSnapshotSerializer::SerializeFunctionInfo(ValueSerializer* serializer,
// - String id (source snippet)
// - Start position in the source snippet
// - Length in the source snippet
+// - Formal parameter count
// - Flags (see FunctionFlags)
+// - 0 if there's no function prototype, 1 + object id for the function
+// prototype otherwise
// TODO(v8:11525): Investigate whether the length is really needed.
-// TODO(v8:11525): Serialize the formal parameter count.
-void WebSnapshotSerializer::SerializeFunction(Handle<JSFunction> function,
- uint32_t& id) {
- if (InsertIntoIndexMap(function_ids_, function, id)) {
- return;
- }
-
+void WebSnapshotSerializer::SerializeFunction(Handle<JSFunction> function) {
SerializeFunctionInfo(&function_serializer_, function);
-
- // TODO(v8:11525): Serialize .prototype.
// TODO(v8:11525): Support properties in functions.
- // TODO(v8:11525): Support function referencing a function indirectly (e.g.,
- // function -> context -> array -> function).
}
// Format (serialized class):
@@ -495,25 +706,12 @@ void WebSnapshotSerializer::SerializeFunction(Handle<JSFunction> function,
// - String id (source snippet)
// - Start position in the source snippet
// - Length in the source snippet
+// - Formal parameter count
// - Flags (see FunctionFlags)
-// - Object id (function prototype)
-void WebSnapshotSerializer::SerializeClass(Handle<JSFunction> function,
- uint32_t& id) {
- if (InsertIntoIndexMap(class_ids_, function, id)) {
- return;
- }
-
+// - 1 + object id for the function prototype
+void WebSnapshotSerializer::SerializeClass(Handle<JSFunction> function) {
SerializeFunctionInfo(&class_serializer_, function);
-
- Handle<JSObject> prototype =
- Handle<JSObject>::cast(handle(function->prototype(), isolate_));
- uint32_t prototype_id;
- SerializeObject(prototype, prototype_id);
- class_serializer_.WriteUint32(prototype_id);
-
// TODO(v8:11525): Support properties in classes.
- // TODO(v8:11525): Support class referencing a class indirectly (e.g.,
- // class -> context -> array -> class).
// TODO(v8:11525): Support class members.
}
@@ -523,34 +721,20 @@ void WebSnapshotSerializer::SerializeClass(Handle<JSFunction> function,
// - For each variable:
// - String id (name)
// - Serialized value
-void WebSnapshotSerializer::SerializeContext(Handle<Context> context,
- uint32_t& id) {
- // Invariant: parent context is serialized first.
-
- // Can't use InsertIntoIndexMap here, because it might reserve a lower id
- // for the context than its parent.
- int index_out = 0;
- if (context_ids_.Lookup(context, &index_out)) {
- id = static_cast<uint32_t>(index_out);
- return;
- }
-
+void WebSnapshotSerializer::SerializeContext(Handle<Context> context) {
uint32_t parent_context_id = 0;
if (!context->previous().IsNativeContext() &&
!context->previous().IsScriptContext()) {
- SerializeContext(handle(context->previous(), isolate_), parent_context_id);
- ++parent_context_id;
+ parent_context_id = GetContextId(context->previous()) + 1;
}
- InsertIntoIndexMap(context_ids_, context, id);
-
// TODO(v8:11525): Use less space for encoding the context type.
if (context->IsFunctionContext()) {
context_serializer_.WriteUint32(ContextType::FUNCTION);
} else if (context->IsBlockContext()) {
context_serializer_.WriteUint32(ContextType::BLOCK);
} else {
- Throw("Web snapshot: Unsupported context type");
+ Throw("Unsupported context type");
return;
}
@@ -571,40 +755,19 @@ void WebSnapshotSerializer::SerializeContext(Handle<Context> context,
isolate_);
WriteValue(value, context_serializer_);
}
- // TODO(v8:11525): Support context referencing a context indirectly (e.g.,
- // context -> array -> function -> context).
-}
-
-void WebSnapshotSerializer::SerializeObject(Handle<JSObject> object,
- uint32_t& id) {
- // TODO(v8:11525): Serialize the leaf objects first.
- DCHECK(!object->IsJSFunction());
- if (InsertIntoIndexMap(object_ids_, object, id)) {
- return;
- }
- pending_objects_.push(object);
-}
-
-void WebSnapshotSerializer::SerializeArray(Handle<JSArray> array,
- uint32_t& id) {
- // TODO(v8:11525): Serialize the leaf objects first.
- if (InsertIntoIndexMap(array_ids_, array, id)) {
- return;
- }
- pending_arrays_.push(array);
}
// Format (serialized object):
// - Shape id
// - For each property:
// - Serialized value
-void WebSnapshotSerializer::SerializePendingObject(Handle<JSObject> object) {
+void WebSnapshotSerializer::SerializeObject(Handle<JSObject> object) {
Handle<Map> map(object->map(), isolate_);
uint32_t map_id = 0;
SerializeMap(map, map_id);
if (*map != object->map()) {
- Throw("Web snapshot: Map changed");
+ Throw("Map changed");
return;
}
@@ -624,11 +787,11 @@ void WebSnapshotSerializer::SerializePendingObject(Handle<JSObject> object) {
// - Length
// - For each element:
// - Serialized value
-void WebSnapshotSerializer::SerializePendingArray(Handle<JSArray> array) {
+void WebSnapshotSerializer::SerializeArray(Handle<JSArray> array) {
auto elements_kind = array->GetElementsKind();
if (elements_kind != PACKED_SMI_ELEMENTS &&
elements_kind != PACKED_ELEMENTS) {
- Throw("Web Snapshot: Unsupported array");
+ Throw("Unsupported array");
return;
}
// TODO(v8:11525): Support sparse arrays & arrays with holes.
@@ -698,29 +861,25 @@ void WebSnapshotSerializer::WriteValue(Handle<Object> object,
serializer.WriteDouble(HeapNumber::cast(*object).value());
break;
case JS_FUNCTION_TYPE:
- SerializeFunction(Handle<JSFunction>::cast(object), id);
serializer.WriteUint32(ValueType::FUNCTION_ID);
- serializer.WriteUint32(id);
+ serializer.WriteUint32(GetFunctionId(JSFunction::cast(*object)));
break;
case JS_CLASS_CONSTRUCTOR_TYPE:
- SerializeClass(Handle<JSFunction>::cast(object), id);
serializer.WriteUint32(ValueType::CLASS_ID);
- serializer.WriteUint32(id);
+ serializer.WriteUint32(GetClassId(JSFunction::cast(*object)));
break;
case JS_OBJECT_TYPE:
- SerializeObject(Handle<JSObject>::cast(object), id);
serializer.WriteUint32(ValueType::OBJECT_ID);
- serializer.WriteUint32(id);
+ serializer.WriteUint32(GetObjectId(JSObject::cast(*object)));
break;
case JS_ARRAY_TYPE:
- SerializeArray(Handle<JSArray>::cast(object), id);
serializer.WriteUint32(ValueType::ARRAY_ID);
- serializer.WriteUint32(id);
+ serializer.WriteUint32(GetArrayId(JSArray::cast(*object)));
break;
case JS_REG_EXP_TYPE: {
Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(object);
if (regexp->map() != isolate_->regexp_function()->initial_map()) {
- Throw("Web snapshot: Unsupported RegExp map");
+ Throw("Unsupported RegExp map");
return;
}
uint32_t pattern_id, flags_id;
@@ -740,12 +899,52 @@ void WebSnapshotSerializer::WriteValue(Handle<Object> object,
serializer.WriteUint32(ValueType::STRING_ID);
serializer.WriteUint32(id);
} else {
- Throw("Web snapshot: Unsupported object");
+ Throw("Unsupported object");
}
}
// TODO(v8:11525): Support more types.
}
+uint32_t WebSnapshotSerializer::GetFunctionId(JSFunction function) {
+ int id;
+ bool return_value = function_ids_.Lookup(function, &id);
+ DCHECK(return_value);
+ USE(return_value);
+ return static_cast<uint32_t>(id);
+}
+
+uint32_t WebSnapshotSerializer::GetClassId(JSFunction function) {
+ int id;
+ bool return_value = class_ids_.Lookup(function, &id);
+ DCHECK(return_value);
+ USE(return_value);
+ return static_cast<uint32_t>(id);
+}
+
+uint32_t WebSnapshotSerializer::GetContextId(Context context) {
+ int id;
+ bool return_value = context_ids_.Lookup(context, &id);
+ DCHECK(return_value);
+ USE(return_value);
+ return static_cast<uint32_t>(id);
+}
+
+uint32_t WebSnapshotSerializer::GetArrayId(JSArray array) {
+ int id;
+ bool return_value = array_ids_.Lookup(array, &id);
+ DCHECK(return_value);
+ USE(return_value);
+ return static_cast<uint32_t>(id);
+}
+
+uint32_t WebSnapshotSerializer::GetObjectId(JSObject object) {
+ int id;
+ bool return_value = object_ids_.Lookup(object, &id);
+ DCHECK(return_value);
+ USE(return_value);
+ return static_cast<uint32_t>(id);
+}
+
WebSnapshotDeserializer::WebSnapshotDeserializer(v8::Isolate* isolate)
: WebSnapshotSerializerDeserializer(
reinterpret_cast<v8::internal::Isolate*>(isolate)) {}
@@ -759,6 +958,8 @@ void WebSnapshotDeserializer::Throw(const char* message) {
class_count_ = 0;
function_count_ = 0;
object_count_ = 0;
+ deferred_references_->SetLength(0);
+
// Make sure we don't read any more data
deserializer_->position_ = deserializer_->end_;
@@ -782,26 +983,59 @@ bool WebSnapshotDeserializer::UseWebSnapshot(
isolate_, reinterpret_cast<const uint8_t*>(resource->data()),
resource->length()));
return Deserialize();
+ } else if (source->IsSeqOneByteString()) {
+ SeqOneByteString source_as_seq = SeqOneByteString::cast(*source);
+ auto length = source_as_seq.length();
+ std::unique_ptr<uint8_t[]> data_copy(new uint8_t[length]);
+ {
+ DisallowGarbageCollection no_gc;
+ uint8_t* data = source_as_seq.GetChars(no_gc);
+ memcpy(data_copy.get(), data, length);
+ }
+ deserializer_.reset(
+ new ValueDeserializer(isolate_, data_copy.get(), length));
+ return Deserialize();
+ } else if (source->IsExternalTwoByteString()) {
+ // TODO(v8:11525): Implement end-to-end snapshot processing which gets rid
+ // of the need to copy the data here.
+ const v8::String::ExternalStringResource* resource =
+ ExternalTwoByteString::cast(*source).resource();
+ auto length = resource->length();
+ std::unique_ptr<uint8_t[]> data_copy(new uint8_t[length]);
+ {
+ DisallowGarbageCollection no_gc;
+ const uint16_t* data = resource->data();
+ uint8_t* data_copy_ptr = data_copy.get();
+ for (size_t i = 0; i < length; ++i) {
+ data_copy_ptr[i] = static_cast<uint8_t>(data[i]);
+ }
+ }
+ deserializer_.reset(
+ new ValueDeserializer(isolate_, data_copy.get(), length));
+ return Deserialize();
+ } else if (source->IsSeqTwoByteString()) {
+ SeqTwoByteString source_as_seq = SeqTwoByteString::cast(*source);
+ auto length = source_as_seq.length();
+ std::unique_ptr<uint8_t[]> data_copy(new uint8_t[length]);
+ {
+ DisallowGarbageCollection no_gc;
+ uint16_t* data = source_as_seq.GetChars(no_gc);
+ uint8_t* data_copy_ptr = data_copy.get();
+ for (int i = 0; i < length; ++i) {
+ data_copy_ptr[i] = static_cast<uint8_t>(data[i]);
+ }
+ }
+ deserializer_.reset(
+ new ValueDeserializer(isolate_, data_copy.get(), length));
+ return Deserialize();
}
- DCHECK(source->IsSeqOneByteString());
- SeqOneByteString source_as_seq = SeqOneByteString::cast(*source);
- auto length = source_as_seq.length();
- uint8_t* data_copy = new uint8_t[length];
- {
- DisallowGarbageCollection no_gc;
- uint8_t* data = source_as_seq.GetChars(no_gc);
- memcpy(data_copy, data, length);
- }
- deserializer_.reset(new ValueDeserializer(isolate_, data_copy, length));
- bool return_value = Deserialize();
- delete[] data_copy;
- return return_value;
+ UNREACHABLE();
}
bool WebSnapshotDeserializer::Deserialize() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize);
if (deserialized_) {
- Throw("Web snapshot: Can't reuse WebSnapshotDeserializer");
+ Throw("Can't reuse WebSnapshotDeserializer");
return false;
}
deserialized_ = true;
@@ -811,13 +1045,28 @@ bool WebSnapshotDeserializer::Deserialize() {
if (FLAG_trace_web_snapshot) {
timer.Start();
}
+ if (!DeserializeSnapshot()) {
+ isolate_->ReportPendingMessages();
+ return false;
+ }
+ if (!DeserializeScript()) {
+ return false;
+ }
+ if (FLAG_trace_web_snapshot) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ PrintF("[Deserializing snapshot (%zu bytes) took %0.3f ms]\n", buffer_size,
+ ms);
+ }
+ return true;
+}
+bool WebSnapshotDeserializer::DeserializeSnapshot() {
deferred_references_ = ArrayList::New(isolate_, 30);
const void* magic_bytes;
if (!deserializer_->ReadRawBytes(sizeof(kMagicNumber), &magic_bytes) ||
memcmp(magic_bytes, kMagicNumber, sizeof(kMagicNumber)) != 0) {
- Throw("Web snapshot: Invalid magic number");
+ Throw("Invalid magic number");
return false;
}
@@ -827,22 +1076,46 @@ bool WebSnapshotDeserializer::Deserialize() {
DeserializeFunctions();
DeserializeArrays();
DeserializeObjects();
- // It comes in handy to deserialize objects before classes. This
- // way, we already have the function prototype for a class deserialized when
- // processing the class and it's easier to adjust it as needed.
DeserializeClasses();
ProcessDeferredReferences();
DeserializeExports();
- DCHECK_EQ(deferred_references_->Length(), 0);
- if (deserializer_->position_ != deserializer_->end_) {
- Throw("Web snapshot: Snapshot length mismatch");
- return false;
- }
+ DCHECK_EQ(0, deferred_references_->Length());
- if (FLAG_trace_web_snapshot) {
- double ms = timer.Elapsed().InMillisecondsF();
- PrintF("[Deserializing snapshot (%zu bytes) took %0.3f ms]\n", buffer_size,
- ms);
+ return !has_error();
+}
+
+bool WebSnapshotDeserializer::DeserializeScript() {
+ // If there is more data, treat it as normal JavaScript.
+ DCHECK_LE(deserializer_->position_, deserializer_->end_);
+ auto remaining_bytes = deserializer_->end_ - deserializer_->position_;
+ if (remaining_bytes > 0 && remaining_bytes < v8::String::kMaxLength) {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ v8::Local<v8::String> source =
+ v8::String::NewFromUtf8(
+ v8_isolate, reinterpret_cast<const char*>(deserializer_->position_),
+ NewStringType::kNormal, static_cast<int>(remaining_bytes))
+ .ToLocalChecked();
+
+ ScriptOrigin origin(v8_isolate, v8::String::NewFromUtf8Literal(
+ v8_isolate, "(web snapshot)",
+ NewStringType::kInternalized));
+
+ ScriptCompiler::Source script_source(source, origin);
+ Local<UnboundScript> script;
+ if (!ScriptCompiler::CompileUnboundScript(v8_isolate, &script_source)
+ .ToLocal(&script)) {
+ // The exception has already been reported.
+ DCHECK(!isolate_->has_pending_exception());
+ return false;
+ }
+ Local<Value> result;
+ if (!script->BindToCurrentContext()
+ ->Run(v8_isolate->GetCurrentContext())
+ .ToLocal(&result)) {
+ // The exception has already been reported.
+ DCHECK(!isolate_->has_pending_exception());
+ return false;
+ }
}
// TODO(v8:11525): Add verification mode; verify the objects we just produced.
@@ -853,17 +1126,16 @@ void WebSnapshotDeserializer::DeserializeStrings() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Strings);
if (!deserializer_->ReadUint32(&string_count_) ||
string_count_ > kMaxItemCount) {
- Throw("Web snapshot: Malformed string table");
+ Throw("Malformed string table");
return;
}
STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
strings_ = isolate_->factory()->NewFixedArray(string_count_);
for (uint32_t i = 0; i < string_count_; ++i) {
- // TODO(v8:11525): Read strings as UTF-8.
- MaybeHandle<String> maybe_string = deserializer_->ReadOneByteString();
+ MaybeHandle<String> maybe_string = deserializer_->ReadUtf8String();
Handle<String> string;
if (!maybe_string.ToHandle(&string)) {
- Throw("Web snapshot: Malformed string");
+ Throw("Malformed string");
return;
}
strings_->set(i, *string);
@@ -874,7 +1146,7 @@ Handle<String> WebSnapshotDeserializer::ReadString(bool internalize) {
DCHECK(!strings_->is_null());
uint32_t string_id;
if (!deserializer_->ReadUint32(&string_id) || string_id >= string_count_) {
- Throw("Web snapshot: malformed string id\n");
+ Throw("malformed string id\n");
return isolate_->factory()->empty_string();
}
Handle<String> string =
@@ -889,7 +1161,7 @@ Handle<String> WebSnapshotDeserializer::ReadString(bool internalize) {
void WebSnapshotDeserializer::DeserializeMaps() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Maps);
if (!deserializer_->ReadUint32(&map_count_) || map_count_ > kMaxItemCount) {
- Throw("Web snapshot: Malformed shape table");
+ Throw("Malformed shape table");
return;
}
STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
@@ -897,7 +1169,7 @@ void WebSnapshotDeserializer::DeserializeMaps() {
for (uint32_t i = 0; i < map_count_; ++i) {
uint32_t map_type;
if (!deserializer_->ReadUint32(&map_type)) {
- Throw("Web snapshot: Malformed shape");
+ Throw("Malformed shape");
return;
}
bool has_custom_property_attributes;
@@ -909,19 +1181,19 @@ void WebSnapshotDeserializer::DeserializeMaps() {
has_custom_property_attributes = true;
break;
default:
- Throw("Web snapshot: Unsupported map type");
+ Throw("Unsupported map type");
return;
}
uint32_t property_count;
if (!deserializer_->ReadUint32(&property_count)) {
- Throw("Web snapshot: Malformed shape");
+ Throw("Malformed shape");
return;
}
// TODO(v8:11525): Consider passing the upper bound as a param and
// systematically enforcing it on the ValueSerializer side.
if (property_count > kMaxNumberOfDescriptors) {
- Throw("Web snapshot: Malformed shape: too many properties");
+ Throw("Malformed shape: too many properties");
return;
}
@@ -940,7 +1212,7 @@ void WebSnapshotDeserializer::DeserializeMaps() {
if (has_custom_property_attributes) {
uint32_t flags;
if (!deserializer_->ReadUint32(&flags)) {
- Throw("Web snapshot: Malformed shape");
+ Throw("Malformed shape");
return;
}
attributes = FlagsToAttributes(flags);
@@ -970,7 +1242,7 @@ void WebSnapshotDeserializer::DeserializeContexts() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Contexts);
if (!deserializer_->ReadUint32(&context_count_) ||
context_count_ > kMaxItemCount) {
- Throw("Web snapshot: Malformed context table");
+ Throw("Malformed context table");
return;
}
STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
@@ -978,7 +1250,7 @@ void WebSnapshotDeserializer::DeserializeContexts() {
for (uint32_t i = 0; i < context_count_; ++i) {
uint32_t context_type;
if (!deserializer_->ReadUint32(&context_type)) {
- Throw("Web snapshot: Malformed context type");
+ Throw("Malformed context type");
return;
}
@@ -987,13 +1259,13 @@ void WebSnapshotDeserializer::DeserializeContexts() {
// purpose, we're going to subtract 1 later.
if (!deserializer_->ReadUint32(&parent_context_id) ||
parent_context_id > i) {
- Throw("Web snapshot: Malformed context");
+ Throw("Malformed context");
return;
}
uint32_t variable_count;
if (!deserializer_->ReadUint32(&variable_count)) {
- Throw("Web snapshot: Malformed context");
+ Throw("Malformed context");
return;
}
// TODO(v8:11525): Enforce upper limit for variable count.
@@ -1044,7 +1316,7 @@ void WebSnapshotDeserializer::DeserializeContexts() {
isolate_->factory()->NewBlockContext(parent_context, scope_info);
break;
default:
- Throw("Web snapshot: Unsupported context type");
+ Throw("Unsupported context type");
return;
}
for (int variable_index = 0;
@@ -1102,7 +1374,7 @@ Handle<ScopeInfo> WebSnapshotDeserializer::CreateScopeInfo(
// Default to a CLASS_SCOPE, so that the rest of the code can be executed
// without failures.
scope_type = ScopeType::CLASS_SCOPE;
- Throw("Web snapshot: Unsupported context type");
+ Throw("Unsupported context type");
}
flags |= ScopeInfo::ScopeTypeBits::encode(scope_type);
const int length = ScopeInfo::kVariablePartIndex +
@@ -1126,7 +1398,7 @@ Handle<ScopeInfo> WebSnapshotDeserializer::CreateScopeInfo(
Handle<JSFunction> WebSnapshotDeserializer::CreateJSFunction(
int shared_function_info_index, uint32_t start_position, uint32_t length,
- uint32_t flags, uint32_t context_id) {
+ uint32_t parameter_count, uint32_t flags, uint32_t context_id) {
// TODO(v8:11525): Deduplicate the SFIs for class methods.
FunctionKind kind = FunctionFlagsToFunctionKind(flags);
Handle<SharedFunctionInfo> shared =
@@ -1138,6 +1410,8 @@ Handle<JSFunction> WebSnapshotDeserializer::CreateJSFunction(
}
shared->set_script(*script_);
shared->set_function_literal_id(shared_function_info_index);
+ shared->set_internal_formal_parameter_count(
+ JSParameterCount(parameter_count));
// TODO(v8:11525): Decide how to handle language modes.
shared->set_language_mode(LanguageMode::kStrict);
shared->set_uncompiled_data(
@@ -1170,7 +1444,7 @@ void WebSnapshotDeserializer::DeserializeFunctions() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Functions);
if (!deserializer_->ReadUint32(&function_count_) ||
function_count_ > kMaxItemCount) {
- Throw("Web snapshot: Malformed function table");
+ Throw("Malformed function table");
return;
}
STATIC_ASSERT(kMaxItemCount + 1 <= FixedArray::kMaxLength);
@@ -1192,7 +1466,7 @@ void WebSnapshotDeserializer::DeserializeFunctions() {
// Note: > (not >= on purpose, we will subtract 1).
if (!deserializer_->ReadUint32(&context_id) ||
context_id > context_count_) {
- Throw("Web snapshot: Malformed function");
+ Throw("Malformed function");
return;
}
@@ -1206,19 +1480,24 @@ void WebSnapshotDeserializer::DeserializeFunctions() {
uint32_t start_position;
uint32_t length;
+ uint32_t parameter_count;
uint32_t flags;
if (!deserializer_->ReadUint32(&start_position) ||
!deserializer_->ReadUint32(&length) ||
+ !deserializer_->ReadUint32(&parameter_count) ||
!deserializer_->ReadUint32(&flags)) {
- Throw("Web snapshot: Malformed function");
+ Throw("Malformed function");
return;
}
// Index 0 is reserved for top-level shared function info (which web
// snapshot scripts don't have).
- Handle<JSFunction> function = CreateJSFunction(
- current_function_count_ + 1, start_position, length, flags, context_id);
+ Handle<JSFunction> function =
+ CreateJSFunction(current_function_count_ + 1, start_position, length,
+ parameter_count, flags, context_id);
functions_->set(current_function_count_, *function);
+
+ ReadFunctionPrototype(function);
}
}
@@ -1226,7 +1505,7 @@ void WebSnapshotDeserializer::DeserializeClasses() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Classes);
if (!deserializer_->ReadUint32(&class_count_) ||
class_count_ > kMaxItemCount) {
- Throw("Web snapshot: Malformed class table");
+ Throw("Malformed class table");
return;
}
STATIC_ASSERT(kMaxItemCount + 1 <= FixedArray::kMaxLength);
@@ -1243,7 +1522,7 @@ void WebSnapshotDeserializer::DeserializeClasses() {
// Note: > (not >= on purpose, we will subtract 1).
if (!deserializer_->ReadUint32(&context_id) ||
context_id > context_count_) {
- Throw("Web snapshot: Malformed class");
+ Throw("Malformed class");
return;
}
@@ -1257,43 +1536,24 @@ void WebSnapshotDeserializer::DeserializeClasses() {
uint32_t start_position;
uint32_t length;
+ uint32_t parameter_count;
uint32_t flags;
if (!deserializer_->ReadUint32(&start_position) ||
!deserializer_->ReadUint32(&length) ||
+ !deserializer_->ReadUint32(&parameter_count) ||
!deserializer_->ReadUint32(&flags)) {
- Throw("Web snapshot: Malformed class");
+ Throw("Malformed class");
return;
}
// Index 0 is reserved for top-level shared function info (which web
// snapshot scripts don't have).
- Handle<JSFunction> function =
- CreateJSFunction(function_count_ + current_class_count_ + 1,
- start_position, length, flags, context_id);
+ Handle<JSFunction> function = CreateJSFunction(
+ function_count_ + current_class_count_ + 1, start_position, length,
+ parameter_count, flags, context_id);
classes_->set(current_class_count_, *function);
- uint32_t function_prototype;
- if (!deserializer_->ReadUint32(&function_prototype) ||
- function_prototype >= object_count_) {
- Throw("Web snapshot: Malformed class");
- return;
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::cast(
- handle(Object::cast(objects_->get(function_prototype)), isolate_));
-
- // TODO(v8:11525): Enforce the invariant that no two prototypes share a map.
- Map map = prototype->map();
- map.set_is_prototype_map(true);
- if (!map.constructor_or_back_pointer().IsNullOrUndefined()) {
- Throw("Web snapshot: Map already has a constructor or back pointer set");
- return;
- }
- map.set_constructor_or_back_pointer(*function);
-
- function->set_prototype_or_initial_map(*prototype, kReleaseStore);
-
- classes_->set(current_class_count_, *function);
+ ReadFunctionPrototype(function);
}
}
@@ -1301,7 +1561,7 @@ void WebSnapshotDeserializer::DeserializeObjects() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Objects);
if (!deserializer_->ReadUint32(&object_count_) ||
object_count_ > kMaxItemCount) {
- Throw("Web snapshot: Malformed objects table");
+ Throw("Malformed objects table");
return;
}
STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
@@ -1309,7 +1569,7 @@ void WebSnapshotDeserializer::DeserializeObjects() {
for (; current_object_count_ < object_count_; ++current_object_count_) {
uint32_t map_id;
if (!deserializer_->ReadUint32(&map_id) || map_id >= map_count_) {
- Throw("Web snapshot: Malformed object");
+ Throw("Malformed object");
return;
}
Handle<Map> map = handle(Map::cast(maps_->get(map_id)), isolate_);
@@ -1326,7 +1586,7 @@ void WebSnapshotDeserializer::DeserializeObjects() {
// Read the representation from the map.
PropertyDetails details = descriptors->GetDetails(InternalIndex(i));
CHECK_EQ(details.location(), PropertyLocation::kField);
- CHECK_EQ(kData, details.kind());
+ CHECK_EQ(PropertyKind::kData, details.kind());
Representation r = details.representation();
if (r.IsNone()) {
// Switch over to wanted_representation.
@@ -1348,7 +1608,7 @@ void WebSnapshotDeserializer::DeserializeArrays() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Arrays);
if (!deserializer_->ReadUint32(&array_count_) ||
object_count_ > kMaxItemCount) {
- Throw("Web snapshot: Malformed array table");
+ Throw("Malformed array table");
return;
}
STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
@@ -1356,7 +1616,7 @@ void WebSnapshotDeserializer::DeserializeArrays() {
for (; current_array_count_ < array_count_; ++current_array_count_) {
uint32_t length;
if (!deserializer_->ReadUint32(&length) || length > kMaxItemCount) {
- Throw("Web snapshot: Malformed array");
+ Throw("Malformed array");
return;
}
Handle<FixedArray> elements = isolate_->factory()->NewFixedArray(length);
@@ -1381,7 +1641,7 @@ void WebSnapshotDeserializer::DeserializeExports() {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kWebSnapshotDeserialize_Exports);
uint32_t count;
if (!deserializer_->ReadUint32(&count) || count > kMaxItemCount) {
- Throw("Web snapshot: Malformed export table");
+ Throw("Malformed export table");
return;
}
for (uint32_t i = 0; i < count; ++i) {
@@ -1403,7 +1663,7 @@ void WebSnapshotDeserializer::DeserializeExports() {
auto result = Object::SetProperty(isolate_, isolate_->global_object(),
export_name, export_value);
if (result.is_null()) {
- Throw("Web snapshot: Setting global property failed");
+ Throw("Setting global property failed");
return;
}
}
@@ -1416,7 +1676,7 @@ void WebSnapshotDeserializer::ReadValue(
uint32_t value_type;
// TODO(v8:11525): Consider adding a ReadByte.
if (!deserializer_->ReadUint32(&value_type)) {
- Throw("Web snapshot: Malformed variable");
+ Throw("Malformed variable");
// Set "value" here so that the "keep on trucking" error handling won't fail
// when dereferencing the handle.
value = isolate_->factory()->undefined_value();
@@ -1447,7 +1707,7 @@ void WebSnapshotDeserializer::ReadValue(
case ValueType::INTEGER: {
Maybe<int32_t> number = deserializer_->ReadZigZag<int32_t>();
if (number.IsNothing()) {
- Throw("Web snapshot: Malformed integer");
+ Throw("Malformed integer");
return;
}
value = isolate_->factory()->NewNumberFromInt(number.FromJust());
@@ -1457,7 +1717,7 @@ void WebSnapshotDeserializer::ReadValue(
case ValueType::DOUBLE: {
double number;
if (!deserializer_->ReadDouble(&number)) {
- Throw("Web snapshot: Malformed double");
+ Throw("Malformed double");
return;
}
value = isolate_->factory()->NewNumber(number);
@@ -1472,7 +1732,7 @@ void WebSnapshotDeserializer::ReadValue(
case ValueType::ARRAY_ID:
uint32_t array_id;
if (!deserializer_->ReadUint32(&array_id) || array_id >= kMaxItemCount) {
- Throw("Web snapshot: Malformed variable");
+ Throw("Malformed variable");
return;
}
if (array_id < current_array_count_) {
@@ -1481,7 +1741,7 @@ void WebSnapshotDeserializer::ReadValue(
// The array hasn't been deserialized yet.
value = isolate_->factory()->undefined_value();
if (object_for_deferred_reference.is_null()) {
- Throw("Web snapshot: Invalid array reference");
+ Throw("Invalid array reference");
return;
}
AddDeferredReference(object_for_deferred_reference,
@@ -1492,7 +1752,7 @@ void WebSnapshotDeserializer::ReadValue(
case ValueType::OBJECT_ID:
uint32_t object_id;
if (!deserializer_->ReadUint32(&object_id) || object_id > kMaxItemCount) {
- Throw("Web snapshot: Malformed variable");
+ Throw("Malformed variable");
return;
}
if (object_id < current_object_count_) {
@@ -1501,7 +1761,7 @@ void WebSnapshotDeserializer::ReadValue(
// The object hasn't been deserialized yet.
value = isolate_->factory()->undefined_value();
if (object_for_deferred_reference.is_null()) {
- Throw("Web snapshot: Invalid object reference");
+ Throw("Invalid object reference");
return;
}
AddDeferredReference(object_for_deferred_reference,
@@ -1514,7 +1774,7 @@ void WebSnapshotDeserializer::ReadValue(
uint32_t function_id;
if (!deserializer_->ReadUint32(&function_id) ||
function_id >= function_count_) {
- Throw("Web snapshot: Malformed object property");
+ Throw("Malformed object property");
return;
}
if (function_id < current_function_count_) {
@@ -1523,7 +1783,7 @@ void WebSnapshotDeserializer::ReadValue(
// The function hasn't been deserialized yet.
value = isolate_->factory()->undefined_value();
if (object_for_deferred_reference.is_null()) {
- Throw("Web snapshot: Invalid object reference");
+ Throw("Invalid object reference");
return;
}
AddDeferredReference(object_for_deferred_reference,
@@ -1536,7 +1796,7 @@ void WebSnapshotDeserializer::ReadValue(
case ValueType::CLASS_ID: {
uint32_t class_id;
if (!deserializer_->ReadUint32(&class_id) || class_id >= kMaxItemCount) {
- Throw("Web snapshot: Malformed object property");
+ Throw("Malformed object property");
return;
}
if (class_id < current_class_count_) {
@@ -1545,7 +1805,7 @@ void WebSnapshotDeserializer::ReadValue(
// The class hasn't been deserialized yet.
value = isolate_->factory()->undefined_value();
if (object_for_deferred_reference.is_null()) {
- Throw("Web snapshot: Invalid object reference");
+ Throw("Invalid object reference");
return;
}
AddDeferredReference(object_for_deferred_reference,
@@ -1560,13 +1820,13 @@ void WebSnapshotDeserializer::ReadValue(
base::Optional<JSRegExp::Flags> flags =
JSRegExp::FlagsFromString(isolate_, flags_string);
if (!flags.has_value()) {
- Throw("Web snapshot: Malformed flags in regular expression");
+ Throw("Malformed flags in regular expression");
return;
}
MaybeHandle<JSRegExp> maybe_regexp =
JSRegExp::New(isolate_, pattern, flags.value());
if (!maybe_regexp.ToHandle(&value)) {
- Throw("Web snapshot: Malformed RegExp");
+ Throw("Malformed RegExp");
return;
}
representation = Representation::Tagged();
@@ -1574,23 +1834,67 @@ void WebSnapshotDeserializer::ReadValue(
}
default:
// TODO(v8:11525): Handle other value types.
- Throw("Web snapshot: Unsupported value type");
+ Throw("Unsupported value type");
return;
}
}
+void WebSnapshotDeserializer::ReadFunctionPrototype(
+ Handle<JSFunction> function) {
+ uint32_t object_id;
+
+ if (!deserializer_->ReadUint32(&object_id) || object_id > kMaxItemCount + 1) {
+ Throw("Malformed class / function");
+ return;
+ }
+ if (object_id == 0) {
+ // No prototype.
+ return;
+ }
+ --object_id;
+ if (object_id < current_object_count_) {
+ if (!SetFunctionPrototype(*function,
+ JSReceiver::cast(objects_->get(object_id)))) {
+ Throw("Can't reuse function prototype");
+ return;
+ }
+ } else {
+ // The object hasn't been deserialized yet.
+ AddDeferredReference(function, 0, OBJECT_ID, object_id);
+ }
+}
+
+bool WebSnapshotDeserializer::SetFunctionPrototype(JSFunction function,
+ JSReceiver prototype) {
+ // TODO(v8:11525): Enforce the invariant that no two prototypes share a map.
+ Map map = prototype.map();
+ map.set_is_prototype_map(true);
+ if (!map.constructor_or_back_pointer().IsNullOrUndefined()) {
+ return false;
+ }
+ map.set_constructor_or_back_pointer(function);
+ function.set_prototype_or_initial_map(prototype, kReleaseStore);
+ return true;
+}
+
void WebSnapshotDeserializer::AddDeferredReference(Handle<Object> container,
uint32_t index,
ValueType target_type,
uint32_t target_index) {
DCHECK(container->IsPropertyArray() || container->IsContext() ||
- container->IsFixedArray());
+ container->IsFixedArray() || container->IsJSFunction());
deferred_references_ = ArrayList::Add(
isolate_, deferred_references_, container, Smi::FromInt(index),
Smi::FromInt(target_type), Smi::FromInt(target_index));
}
void WebSnapshotDeserializer::ProcessDeferredReferences() {
+ // Check for error now, since the FixedArrays below might not have been
+ // created if there was an error.
+ if (has_error()) {
+ return;
+ }
+
DisallowGarbageCollection no_gc;
ArrayList raw_deferred_references = *deferred_references_;
FixedArray raw_functions = *functions_;
@@ -1613,17 +1917,15 @@ void WebSnapshotDeserializer::ProcessDeferredReferences() {
// Throw can allocate, but it's ok, since we're not using the raw
// pointers after that.
AllowGarbageCollection allow_gc;
- Throw("Web Snapshots: Invalid function reference");
+ Throw("Invalid function reference");
return;
}
target = raw_functions.get(target_index);
break;
case CLASS_ID:
if (static_cast<uint32_t>(target_index) >= class_count_) {
- // Throw can allocate, but it's ok, since we're not using the raw
- // pointers after that.
AllowGarbageCollection allow_gc;
- Throw("Web Snapshots: Invalid class reference");
+ Throw("Invalid class reference");
return;
}
target = raw_classes.get(target_index);
@@ -1631,7 +1933,7 @@ void WebSnapshotDeserializer::ProcessDeferredReferences() {
case ARRAY_ID:
if (static_cast<uint32_t>(target_index) >= array_count_) {
AllowGarbageCollection allow_gc;
- Throw("Web Snapshots: Invalid array reference");
+ Throw("Invalid array reference");
return;
}
target = raw_arrays.get(target_index);
@@ -1639,7 +1941,7 @@ void WebSnapshotDeserializer::ProcessDeferredReferences() {
case OBJECT_ID:
if (static_cast<uint32_t>(target_index) >= object_count_) {
AllowGarbageCollection allow_gc;
- Throw("Web Snapshots: Invalid object reference");
+ Throw("Invalid object reference");
return;
}
target = raw_objects.get(target_index);
@@ -1653,6 +1955,17 @@ void WebSnapshotDeserializer::ProcessDeferredReferences() {
Context::cast(container).set(index, target);
} else if (container.IsFixedArray()) {
FixedArray::cast(container).set(index, target);
+ } else if (container.IsJSFunction()) {
+ // The only deferred reference allowed for a JSFunction is the function
+ // prototype.
+ DCHECK_EQ(index, 0);
+ DCHECK(target.IsJSReceiver());
+ if (!SetFunctionPrototype(JSFunction::cast(container),
+ JSReceiver::cast(target))) {
+ AllowGarbageCollection allow_gc;
+ Throw("Can't reuse function prototype");
+ return;
+ }
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/web-snapshot/web-snapshot.h b/deps/v8/src/web-snapshot/web-snapshot.h
index 4dd89debca..25a76f3572 100644
--- a/deps/v8/src/web-snapshot/web-snapshot.h
+++ b/deps/v8/src/web-snapshot/web-snapshot.h
@@ -155,6 +155,15 @@ class V8_EXPORT WebSnapshotSerializer
// Returns true if the object was already in the map, false if it was added.
bool InsertIntoIndexMap(ObjectCacheIndexMap& map, Handle<HeapObject> object,
uint32_t& id);
+
+ void Discovery(Handle<Object> object);
+ void DiscoverFunction(Handle<JSFunction> function);
+ void DiscoverClass(Handle<JSFunction> function);
+ void DiscoverContextAndPrototype(Handle<JSFunction> function);
+ void DiscoverContext(Handle<Context> context);
+ void DiscoverArray(Handle<JSArray> array);
+ void DiscoverObject(Handle<JSObject> object);
+
void SerializeSource(ValueSerializer* serializer,
Handle<JSFunction> function);
void SerializeFunctionInfo(ValueSerializer* serializer,
@@ -162,16 +171,22 @@ class V8_EXPORT WebSnapshotSerializer
void SerializeString(Handle<String> string, uint32_t& id);
void SerializeMap(Handle<Map> map, uint32_t& id);
- void SerializeFunction(Handle<JSFunction> function, uint32_t& id);
- void SerializeClass(Handle<JSFunction> function, uint32_t& id);
- void SerializeContext(Handle<Context> context, uint32_t& id);
- void SerializeArray(Handle<JSArray> array, uint32_t& id);
- void SerializePendingArray(Handle<JSArray> array);
- void SerializeObject(Handle<JSObject> object, uint32_t& id);
- void SerializePendingObject(Handle<JSObject> object);
+
+ void SerializeFunction(Handle<JSFunction> function);
+ void SerializeClass(Handle<JSFunction> function);
+ void SerializeContext(Handle<Context> context);
+ void SerializeArray(Handle<JSArray> array);
+ void SerializeObject(Handle<JSObject> object);
+
void SerializeExport(Handle<JSObject> object, Handle<String> export_name);
void WriteValue(Handle<Object> object, ValueSerializer& serializer);
+ uint32_t GetFunctionId(JSFunction function);
+ uint32_t GetClassId(JSFunction function);
+ uint32_t GetContextId(Context context);
+ uint32_t GetArrayId(JSArray array);
+ uint32_t GetObjectId(JSObject object);
+
ValueSerializer string_serializer_;
ValueSerializer map_serializer_;
ValueSerializer context_serializer_;
@@ -181,6 +196,14 @@ class V8_EXPORT WebSnapshotSerializer
ValueSerializer object_serializer_;
ValueSerializer export_serializer_;
+ // These are needed for being able to serialize items in order.
+ Handle<ArrayList> contexts_;
+ Handle<ArrayList> functions_;
+ Handle<ArrayList> classes_;
+ Handle<ArrayList> arrays_;
+ Handle<ArrayList> objects_;
+
+ // ObjectCacheIndexMap implements fast lookup item -> id.
ObjectCacheIndexMap string_ids_;
ObjectCacheIndexMap map_ids_;
ObjectCacheIndexMap context_ids_;
@@ -190,8 +213,7 @@ class V8_EXPORT WebSnapshotSerializer
ObjectCacheIndexMap object_ids_;
uint32_t export_count_ = 0;
- std::queue<Handle<JSObject>> pending_objects_;
- std::queue<Handle<JSArray>> pending_arrays_;
+ std::queue<Handle<Object>> discovery_queue_;
};
class V8_EXPORT WebSnapshotDeserializer
@@ -213,6 +235,8 @@ class V8_EXPORT WebSnapshotDeserializer
private:
bool Deserialize();
+ bool DeserializeSnapshot();
+ bool DeserializeScript();
WebSnapshotDeserializer(const WebSnapshotDeserializer&) = delete;
WebSnapshotDeserializer& operator=(const WebSnapshotDeserializer&) = delete;
@@ -224,8 +248,8 @@ class V8_EXPORT WebSnapshotDeserializer
Handle<ScopeInfo> CreateScopeInfo(uint32_t variable_count, bool has_parent,
ContextType context_type);
Handle<JSFunction> CreateJSFunction(int index, uint32_t start,
- uint32_t length, uint32_t flags,
- uint32_t context_id);
+ uint32_t length, uint32_t parameter_count,
+ uint32_t flags, uint32_t context_id);
void DeserializeFunctionData(uint32_t count, uint32_t current_count);
void DeserializeFunctions();
void DeserializeClasses();
@@ -236,6 +260,8 @@ class V8_EXPORT WebSnapshotDeserializer
Handle<Object>& value, Representation& representation,
Handle<Object> object_for_deferred_reference = Handle<Object>(),
uint32_t index_for_deferred_reference = 0);
+ void ReadFunctionPrototype(Handle<JSFunction> function);
+ bool SetFunctionPrototype(JSFunction function, JSReceiver prototype);
void AddDeferredReference(Handle<Object> container, uint32_t index,
ValueType target_type,